VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 49221

Last change on this file since 49221 was 48935, checked in by vboxsync, 11 years ago

Runtime: Whitespace and svn:keyword cleanups by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 29.1 KB
Line 
1/* $Id: memobj-r0drv-nt.cpp 48935 2013-10-07 21:19:37Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/memobj.h>
34#include <iprt/alloc.h>
35#include <iprt/assert.h>
36#include <iprt/log.h>
37#include <iprt/param.h>
38#include <iprt/string.h>
39#include <iprt/process.h>
40#include "internal/memobj.h"
41
42
43/*******************************************************************************
44* Defined Constants And Macros *
45*******************************************************************************/
46/** Maximum number of bytes we try to lock down in one go.
47 * This is supposed to have a limit right below 256MB, but this appears
48 * to actually be much lower. The values here have been determined experimentally.
49 */
50#ifdef RT_ARCH_X86
51# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
52#endif
53#ifdef RT_ARCH_AMD64
54# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
55#endif
56
57
58/*******************************************************************************
59* Structures and Typedefs *
60*******************************************************************************/
61/**
62 * The NT version of the memory object structure.
63 */
64typedef struct RTR0MEMOBJNT
65{
66 /** The core structure. */
67 RTR0MEMOBJINTERNAL Core;
68#ifndef IPRT_TARGET_NT4
69 /** Used MmAllocatePagesForMdl(). */
70 bool fAllocatedPagesForMdl;
71#endif
72 /** Pointer returned by MmSecureVirtualMemory */
73 PVOID pvSecureMem;
74 /** The number of PMDLs (memory descriptor lists) in the array. */
75 uint32_t cMdls;
76 /** Array of MDL pointers. (variable size) */
77 PMDL apMdls[1];
78} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
79
80
81DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
82{
83 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
84
85 /*
86 * Deal with it on a per type basis (just as a variation).
87 */
88 switch (pMemNt->Core.enmType)
89 {
90 case RTR0MEMOBJTYPE_LOW:
91#ifndef IPRT_TARGET_NT4
92 if (pMemNt->fAllocatedPagesForMdl)
93 {
94 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
95 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
96 pMemNt->Core.pv = NULL;
97 if (pMemNt->pvSecureMem)
98 {
99 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
100 pMemNt->pvSecureMem = NULL;
101 }
102
103 MmFreePagesFromMdl(pMemNt->apMdls[0]);
104 ExFreePool(pMemNt->apMdls[0]);
105 pMemNt->apMdls[0] = NULL;
106 pMemNt->cMdls = 0;
107 break;
108 }
109#endif
110 AssertFailed();
111 break;
112
113 case RTR0MEMOBJTYPE_PAGE:
114 Assert(pMemNt->Core.pv);
115 ExFreePool(pMemNt->Core.pv);
116 pMemNt->Core.pv = NULL;
117
118 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
119 IoFreeMdl(pMemNt->apMdls[0]);
120 pMemNt->apMdls[0] = NULL;
121 pMemNt->cMdls = 0;
122 break;
123
124 case RTR0MEMOBJTYPE_CONT:
125 Assert(pMemNt->Core.pv);
126 MmFreeContiguousMemory(pMemNt->Core.pv);
127 pMemNt->Core.pv = NULL;
128
129 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
130 IoFreeMdl(pMemNt->apMdls[0]);
131 pMemNt->apMdls[0] = NULL;
132 pMemNt->cMdls = 0;
133 break;
134
135 case RTR0MEMOBJTYPE_PHYS:
136 /* rtR0MemObjNativeEnterPhys? */
137 if (!pMemNt->Core.u.Phys.fAllocated)
138 {
139#ifndef IPRT_TARGET_NT4
140 Assert(!pMemNt->fAllocatedPagesForMdl);
141#endif
142 /* Nothing to do here. */
143 break;
144 }
145 /* fall thru */
146
147 case RTR0MEMOBJTYPE_PHYS_NC:
148#ifndef IPRT_TARGET_NT4
149 if (pMemNt->fAllocatedPagesForMdl)
150 {
151 MmFreePagesFromMdl(pMemNt->apMdls[0]);
152 ExFreePool(pMemNt->apMdls[0]);
153 pMemNt->apMdls[0] = NULL;
154 pMemNt->cMdls = 0;
155 break;
156 }
157#endif
158 AssertFailed();
159 break;
160
161 case RTR0MEMOBJTYPE_LOCK:
162 if (pMemNt->pvSecureMem)
163 {
164 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
165 pMemNt->pvSecureMem = NULL;
166 }
167 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
168 {
169 MmUnlockPages(pMemNt->apMdls[i]);
170 IoFreeMdl(pMemNt->apMdls[i]);
171 pMemNt->apMdls[i] = NULL;
172 }
173 break;
174
175 case RTR0MEMOBJTYPE_RES_VIRT:
176/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
177 {
178 }
179 else
180 {
181 }*/
182 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
183 return VERR_INTERNAL_ERROR;
184 break;
185
186 case RTR0MEMOBJTYPE_MAPPING:
187 {
188 Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
189 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
190 Assert(pMemNtParent);
191 if (pMemNtParent->cMdls)
192 {
193 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
194 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
195 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
196 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
197 }
198 else
199 {
200 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
201 && !pMemNtParent->Core.u.Phys.fAllocated);
202 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
203 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
204 }
205 pMemNt->Core.pv = NULL;
206 break;
207 }
208
209 default:
210 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
211 return VERR_INTERNAL_ERROR;
212 }
213
214 return VINF_SUCCESS;
215}
216
217
218DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
219{
220 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
221
222 /*
223 * Try allocate the memory and create an MDL for them so
224 * we can query the physical addresses and do mappings later
225 * without running into out-of-memory conditions and similar problems.
226 */
227 int rc = VERR_NO_PAGE_MEMORY;
228 void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
229 if (pv)
230 {
231 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
232 if (pMdl)
233 {
234 MmBuildMdlForNonPagedPool(pMdl);
235#ifdef RT_ARCH_AMD64
236 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
237#endif
238
239 /*
240 * Create the IPRT memory object.
241 */
242 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
243 if (pMemNt)
244 {
245 pMemNt->cMdls = 1;
246 pMemNt->apMdls[0] = pMdl;
247 *ppMem = &pMemNt->Core;
248 return VINF_SUCCESS;
249 }
250
251 rc = VERR_NO_MEMORY;
252 IoFreeMdl(pMdl);
253 }
254 ExFreePool(pv);
255 }
256 return rc;
257}
258
259
260DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
261{
262 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
263
264 /*
265 * Try see if we get lucky first...
266 * (We could probably just assume we're lucky on NT4.)
267 */
268 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
269 if (RT_SUCCESS(rc))
270 {
271 size_t iPage = cb >> PAGE_SHIFT;
272 while (iPage-- > 0)
273 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
274 {
275 rc = VERR_NO_LOW_MEMORY;
276 break;
277 }
278 if (RT_SUCCESS(rc))
279 return rc;
280
281 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
282 RTR0MemObjFree(*ppMem, false);
283 *ppMem = NULL;
284 }
285
286#ifndef IPRT_TARGET_NT4
287 /*
288 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
289 */
290 PHYSICAL_ADDRESS Zero;
291 Zero.QuadPart = 0;
292 PHYSICAL_ADDRESS HighAddr;
293 HighAddr.QuadPart = _4G - 1;
294 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
295 if (pMdl)
296 {
297 if (MmGetMdlByteCount(pMdl) >= cb)
298 {
299 __try
300 {
301 void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
302 FALSE /* no bug check on failure */, NormalPagePriority);
303 if (pv)
304 {
305 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
306 if (pMemNt)
307 {
308 pMemNt->fAllocatedPagesForMdl = true;
309 pMemNt->cMdls = 1;
310 pMemNt->apMdls[0] = pMdl;
311 *ppMem = &pMemNt->Core;
312 return VINF_SUCCESS;
313 }
314 MmUnmapLockedPages(pv, pMdl);
315 }
316 }
317 __except(EXCEPTION_EXECUTE_HANDLER)
318 {
319 NTSTATUS rcNt = GetExceptionCode();
320 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
321 /* nothing */
322 }
323 }
324 MmFreePagesFromMdl(pMdl);
325 ExFreePool(pMdl);
326 }
327#endif /* !IPRT_TARGET_NT4 */
328
329 /*
330 * Fall back on contiguous memory...
331 */
332 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
333}
334
335
336/**
337 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
338 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
339 * to what rtR0MemObjNativeAllocCont() does.
340 *
341 * @returns IPRT status code.
342 * @param ppMem Where to store the pointer to the ring-0 memory object.
343 * @param cb The size.
344 * @param fExecutable Whether the mapping should be executable or not.
345 * @param PhysHighest The highest physical address for the pages in allocation.
346 * @param uAlignment The alignment of the physical memory to allocate.
347 * Supported values are PAGE_SIZE, _2M, _4M and _1G.
348 */
349static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
350 size_t uAlignment)
351{
352 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
353#ifdef IPRT_TARGET_NT4
354 if (uAlignment != PAGE_SIZE)
355 return VERR_NOT_SUPPORTED;
356#endif
357
358 /*
359 * Allocate the memory and create an MDL for it.
360 */
361 PHYSICAL_ADDRESS PhysAddrHighest;
362 PhysAddrHighest.QuadPart = PhysHighest;
363#ifndef IPRT_TARGET_NT4
364 PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
365 PhysAddrLowest.QuadPart = 0;
366 PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
367 void *pv = MmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
368#else
369 void *pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
370#endif
371 if (!pv)
372 return VERR_NO_MEMORY;
373
374 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
375 if (pMdl)
376 {
377 MmBuildMdlForNonPagedPool(pMdl);
378#ifdef RT_ARCH_AMD64
379 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
380#endif
381
382 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
383 if (pMemNt)
384 {
385 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
386 pMemNt->cMdls = 1;
387 pMemNt->apMdls[0] = pMdl;
388 *ppMem = &pMemNt->Core;
389 return VINF_SUCCESS;
390 }
391
392 IoFreeMdl(pMdl);
393 }
394 MmFreeContiguousMemory(pv);
395 return VERR_NO_MEMORY;
396}
397
398
399DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
400{
401 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1, PAGE_SIZE /* alignment */);
402}
403
404
405DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
406{
407#ifndef IPRT_TARGET_NT4
408 /*
409 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
410 *
411 * This is preferable to using MmAllocateContiguousMemory because there are
412 * a few situations where the memory shouldn't be mapped, like for instance
413 * VT-x control memory. Since these are rather small allocations (one or
414 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
415 * request.
416 *
417 * If the allocation is big, the chances are *probably* not very good. The
418 * current limit is kind of random...
419 */
420 if ( cb < _128K
421 && uAlignment == PAGE_SIZE)
422
423 {
424 PHYSICAL_ADDRESS Zero;
425 Zero.QuadPart = 0;
426 PHYSICAL_ADDRESS HighAddr;
427 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
428 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
429 if (pMdl)
430 {
431 if (MmGetMdlByteCount(pMdl) >= cb)
432 {
433 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
434 PFN_NUMBER Pfn = paPfns[0] + 1;
435 const size_t cPages = cb >> PAGE_SHIFT;
436 size_t iPage;
437 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
438 if (paPfns[iPage] != Pfn)
439 break;
440 if (iPage >= cPages)
441 {
442 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
443 if (pMemNt)
444 {
445 pMemNt->Core.u.Phys.fAllocated = true;
446 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
447 pMemNt->fAllocatedPagesForMdl = true;
448 pMemNt->cMdls = 1;
449 pMemNt->apMdls[0] = pMdl;
450 *ppMem = &pMemNt->Core;
451 return VINF_SUCCESS;
452 }
453 }
454 }
455 MmFreePagesFromMdl(pMdl);
456 ExFreePool(pMdl);
457 }
458 }
459#endif /* !IPRT_TARGET_NT4 */
460
461 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment);
462}
463
464
465DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
466{
467#ifndef IPRT_TARGET_NT4
468 PHYSICAL_ADDRESS Zero;
469 Zero.QuadPart = 0;
470 PHYSICAL_ADDRESS HighAddr;
471 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
472 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
473 if (pMdl)
474 {
475 if (MmGetMdlByteCount(pMdl) >= cb)
476 {
477 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
478 if (pMemNt)
479 {
480 pMemNt->fAllocatedPagesForMdl = true;
481 pMemNt->cMdls = 1;
482 pMemNt->apMdls[0] = pMdl;
483 *ppMem = &pMemNt->Core;
484 return VINF_SUCCESS;
485 }
486 }
487 MmFreePagesFromMdl(pMdl);
488 ExFreePool(pMdl);
489 }
490 return VERR_NO_MEMORY;
491#else /* IPRT_TARGET_NT4 */
492 return VERR_NOT_SUPPORTED;
493#endif /* IPRT_TARGET_NT4 */
494}
495
496
497DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
498{
499 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE || uCachePolicy == RTMEM_CACHE_POLICY_MMIO, VERR_NOT_SUPPORTED);
500
501 /*
502 * Validate the address range and create a descriptor for it.
503 */
504 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
505 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
506 return VERR_ADDRESS_TOO_BIG;
507
508 /*
509 * Create the IPRT memory object.
510 */
511 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
512 if (pMemNt)
513 {
514 pMemNt->Core.u.Phys.PhysBase = Phys;
515 pMemNt->Core.u.Phys.fAllocated = false;
516 pMemNt->Core.u.Phys.uCachePolicy = uCachePolicy;
517 *ppMem = &pMemNt->Core;
518 return VINF_SUCCESS;
519 }
520 return VERR_NO_MEMORY;
521}
522
523
524/**
525 * Internal worker for locking down pages.
526 *
527 * @return IPRT status code.
528 *
529 * @param ppMem Where to store the memory object pointer.
530 * @param pv First page.
531 * @param cb Number of bytes.
532 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
533 * and RTMEM_PROT_WRITE.
534 * @param R0Process The process \a pv and \a cb refers to.
535 */
536static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
537{
538 /*
539 * Calc the number of MDLs we need and allocate the memory object structure.
540 */
541 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
542 if (cb % MAX_LOCK_MEM_SIZE)
543 cMdls++;
544 if (cMdls >= UINT32_MAX)
545 return VERR_OUT_OF_RANGE;
546 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
547 RTR0MEMOBJTYPE_LOCK, pv, cb);
548 if (!pMemNt)
549 return VERR_NO_MEMORY;
550
551 /*
552 * Loop locking down the sub parts of the memory.
553 */
554 int rc = VINF_SUCCESS;
555 size_t cbTotal = 0;
556 uint8_t *pb = (uint8_t *)pv;
557 uint32_t iMdl;
558 for (iMdl = 0; iMdl < cMdls; iMdl++)
559 {
560 /*
561 * Calc the Mdl size and allocate it.
562 */
563 size_t cbCur = cb - cbTotal;
564 if (cbCur > MAX_LOCK_MEM_SIZE)
565 cbCur = MAX_LOCK_MEM_SIZE;
566 AssertMsg(cbCur, ("cbCur: 0!\n"));
567 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
568 if (!pMdl)
569 {
570 rc = VERR_NO_MEMORY;
571 break;
572 }
573
574 /*
575 * Lock the pages.
576 */
577 __try
578 {
579 MmProbeAndLockPages(pMdl,
580 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
581 fAccess == RTMEM_PROT_READ
582 ? IoReadAccess
583 : fAccess == RTMEM_PROT_WRITE
584 ? IoWriteAccess
585 : IoModifyAccess);
586
587 pMemNt->apMdls[iMdl] = pMdl;
588 pMemNt->cMdls++;
589 }
590 __except(EXCEPTION_EXECUTE_HANDLER)
591 {
592 IoFreeMdl(pMdl);
593 rc = VERR_LOCK_FAILED;
594 break;
595 }
596
597 if (R0Process != NIL_RTR0PROCESS)
598 {
599 /* Make sure the user process can't change the allocation. */
600 pMemNt->pvSecureMem = MmSecureVirtualMemory(pv, cb,
601 fAccess & RTMEM_PROT_WRITE
602 ? PAGE_READWRITE
603 : PAGE_READONLY);
604 if (!pMemNt->pvSecureMem)
605 {
606 rc = VERR_NO_MEMORY;
607 break;
608 }
609 }
610
611 /* next */
612 cbTotal += cbCur;
613 pb += cbCur;
614 }
615 if (RT_SUCCESS(rc))
616 {
617 Assert(pMemNt->cMdls == cMdls);
618 pMemNt->Core.u.Lock.R0Process = R0Process;
619 *ppMem = &pMemNt->Core;
620 return rc;
621 }
622
623 /*
624 * We failed, perform cleanups.
625 */
626 while (iMdl-- > 0)
627 {
628 MmUnlockPages(pMemNt->apMdls[iMdl]);
629 IoFreeMdl(pMemNt->apMdls[iMdl]);
630 pMemNt->apMdls[iMdl] = NULL;
631 }
632 if (pMemNt->pvSecureMem)
633 {
634 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
635 pMemNt->pvSecureMem = NULL;
636 }
637
638 rtR0MemObjDelete(&pMemNt->Core);
639 return rc;
640}
641
642
643DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
644 RTR0PROCESS R0Process)
645{
646 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
647 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
648 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process);
649}
650
651
652DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
653{
654 return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS);
655}
656
657
658DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
659{
660 /*
661 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
662 */
663 return VERR_NOT_SUPPORTED;
664}
665
666
667DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
668 RTR0PROCESS R0Process)
669{
670 /*
671 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
672 */
673 return VERR_NOT_SUPPORTED;
674}
675
676
677/**
678 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
679 *
680 * @returns IPRT status code.
681 * @param ppMem Where to store the memory object for the mapping.
682 * @param pMemToMap The memory object to map.
683 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
684 * @param uAlignment The alignment requirement for the mapping.
685 * @param fProt The desired page protection for the mapping.
686 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
687 * If not nil, it's the current process.
688 */
689static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
690 unsigned fProt, RTR0PROCESS R0Process)
691{
692 int rc = VERR_MAP_FAILED;
693
694 /*
695 * Check that the specified alignment is supported.
696 */
697 if (uAlignment > PAGE_SIZE)
698 return VERR_NOT_SUPPORTED;
699
700 /*
701 * There are two basic cases here, either we've got an MDL and can
702 * map it using MmMapLockedPages, or we've got a contiguous physical
703 * range (MMIO most likely) and can use MmMapIoSpace.
704 */
705 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
706 if (pMemNtToMap->cMdls)
707 {
708 /* don't attempt map locked regions with more than one mdl. */
709 if (pMemNtToMap->cMdls != 1)
710 return VERR_NOT_SUPPORTED;
711
712#ifdef IPRT_TARGET_NT4
713 /* NT SP0 can't map to a specific address. */
714 if (pvFixed != (void *)-1)
715 return VERR_NOT_SUPPORTED;
716#endif
717
718 /* we can't map anything to the first page, sorry. */
719 if (pvFixed == 0)
720 return VERR_NOT_SUPPORTED;
721
722 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
723 if ( pMemNtToMap->Core.uRel.Parent.cMappings
724 && R0Process == NIL_RTR0PROCESS)
725 return VERR_NOT_SUPPORTED;
726
727 __try
728 {
729 /** @todo uAlignment */
730 /** @todo How to set the protection on the pages? */
731#ifdef IPRT_TARGET_NT4
732 void *pv = MmMapLockedPages(pMemNtToMap->apMdls[0],
733 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
734#else
735 void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
736 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
737 MmCached,
738 pvFixed != (void *)-1 ? pvFixed : NULL,
739 FALSE /* no bug check on failure */,
740 NormalPagePriority);
741#endif
742 if (pv)
743 {
744 NOREF(fProt);
745
746 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
747 pMemNtToMap->Core.cb);
748 if (pMemNt)
749 {
750 pMemNt->Core.u.Mapping.R0Process = R0Process;
751 *ppMem = &pMemNt->Core;
752 return VINF_SUCCESS;
753 }
754
755 rc = VERR_NO_MEMORY;
756 MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
757 }
758 }
759 __except(EXCEPTION_EXECUTE_HANDLER)
760 {
761 NTSTATUS rcNt = GetExceptionCode();
762 Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
763
764 /* nothing */
765 rc = VERR_MAP_FAILED;
766 }
767
768 }
769 else
770 {
771 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
772 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
773
774 /* cannot map phys mem to user space (yet). */
775 if (R0Process != NIL_RTR0PROCESS)
776 return VERR_NOT_SUPPORTED;
777
778 /** @todo uAlignment */
779 /** @todo How to set the protection on the pages? */
780 PHYSICAL_ADDRESS Phys;
781 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
782 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb,
783 pMemNtToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO ? MmNonCached : MmCached);
784 if (pv)
785 {
786 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
787 pMemNtToMap->Core.cb);
788 if (pMemNt)
789 {
790 pMemNt->Core.u.Mapping.R0Process = R0Process;
791 *ppMem = &pMemNt->Core;
792 return VINF_SUCCESS;
793 }
794
795 rc = VERR_NO_MEMORY;
796 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
797 }
798 }
799
800 NOREF(uAlignment); NOREF(fProt);
801 return rc;
802}
803
804
805DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
806 unsigned fProt, size_t offSub, size_t cbSub)
807{
808 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
809 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
810}
811
812
813DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
814{
815 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
816 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
817}
818
819
820DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
821{
822 NOREF(pMem);
823 NOREF(offSub);
824 NOREF(cbSub);
825 NOREF(fProt);
826 return VERR_NOT_SUPPORTED;
827}
828
829
830DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
831{
832 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
833
834 if (pMemNt->cMdls)
835 {
836 if (pMemNt->cMdls == 1)
837 {
838 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
839 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
840 }
841
842 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
843 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
844 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
845 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
846 }
847
848 switch (pMemNt->Core.enmType)
849 {
850 case RTR0MEMOBJTYPE_MAPPING:
851 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
852
853 case RTR0MEMOBJTYPE_PHYS:
854 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
855
856 case RTR0MEMOBJTYPE_PAGE:
857 case RTR0MEMOBJTYPE_PHYS_NC:
858 case RTR0MEMOBJTYPE_LOW:
859 case RTR0MEMOBJTYPE_CONT:
860 case RTR0MEMOBJTYPE_LOCK:
861 default:
862 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
863 case RTR0MEMOBJTYPE_RES_VIRT:
864 return NIL_RTHCPHYS;
865 }
866}
867
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette