VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 78071

Last change on this file since 78071 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 35.5 KB
Line 
1/* $Id: memobj-r0drv-nt.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/memobj.h>
34#include <iprt/alloc.h>
35#include <iprt/assert.h>
36#include <iprt/err.h>
37#include <iprt/log.h>
38#include <iprt/param.h>
39#include <iprt/string.h>
40#include <iprt/process.h>
41#include "internal/memobj.h"
42#include "internal-r0drv-nt.h"
43
44
45/*********************************************************************************************************************************
46* Defined Constants And Macros *
47*********************************************************************************************************************************/
48/** Maximum number of bytes we try to lock down in one go.
49 * This is supposed to have a limit right below 256MB, but this appears
50 * to actually be much lower. The values here have been determined experimentally.
51 */
52#ifdef RT_ARCH_X86
53# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
54#endif
55#ifdef RT_ARCH_AMD64
56# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
57#endif
58
59
60/*********************************************************************************************************************************
61* Structures and Typedefs *
62*********************************************************************************************************************************/
63/**
64 * The NT version of the memory object structure.
65 */
66typedef struct RTR0MEMOBJNT
67{
68 /** The core structure. */
69 RTR0MEMOBJINTERNAL Core;
70 /** Used MmAllocatePagesForMdl(). */
71 bool fAllocatedPagesForMdl;
72 /** Pointer returned by MmSecureVirtualMemory */
73 PVOID pvSecureMem;
74 /** The number of PMDLs (memory descriptor lists) in the array. */
75 uint32_t cMdls;
76 /** Array of MDL pointers. (variable size) */
77 PMDL apMdls[1];
78} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
79
80
81
82DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
83{
84 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
85
86 /*
87 * Deal with it on a per type basis (just as a variation).
88 */
89 switch (pMemNt->Core.enmType)
90 {
91 case RTR0MEMOBJTYPE_LOW:
92 if (pMemNt->fAllocatedPagesForMdl)
93 {
94 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
95 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
96 pMemNt->Core.pv = NULL;
97 if (pMemNt->pvSecureMem)
98 {
99 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
100 pMemNt->pvSecureMem = NULL;
101 }
102
103 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
104 ExFreePool(pMemNt->apMdls[0]);
105 pMemNt->apMdls[0] = NULL;
106 pMemNt->cMdls = 0;
107 break;
108 }
109 AssertFailed();
110 break;
111
112 case RTR0MEMOBJTYPE_PAGE:
113 Assert(pMemNt->Core.pv);
114 if (g_pfnrtExFreePoolWithTag)
115 g_pfnrtExFreePoolWithTag(pMemNt->Core.pv, IPRT_NT_POOL_TAG);
116 else
117 ExFreePool(pMemNt->Core.pv);
118 pMemNt->Core.pv = NULL;
119
120 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
121 IoFreeMdl(pMemNt->apMdls[0]);
122 pMemNt->apMdls[0] = NULL;
123 pMemNt->cMdls = 0;
124 break;
125
126 case RTR0MEMOBJTYPE_CONT:
127 Assert(pMemNt->Core.pv);
128 MmFreeContiguousMemory(pMemNt->Core.pv);
129 pMemNt->Core.pv = NULL;
130
131 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
132 IoFreeMdl(pMemNt->apMdls[0]);
133 pMemNt->apMdls[0] = NULL;
134 pMemNt->cMdls = 0;
135 break;
136
137 case RTR0MEMOBJTYPE_PHYS:
138 /* rtR0MemObjNativeEnterPhys? */
139 if (!pMemNt->Core.u.Phys.fAllocated)
140 {
141 Assert(!pMemNt->fAllocatedPagesForMdl);
142 /* Nothing to do here. */
143 break;
144 }
145 RT_FALL_THRU();
146
147 case RTR0MEMOBJTYPE_PHYS_NC:
148 if (pMemNt->fAllocatedPagesForMdl)
149 {
150 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
151 ExFreePool(pMemNt->apMdls[0]);
152 pMemNt->apMdls[0] = NULL;
153 pMemNt->cMdls = 0;
154 break;
155 }
156 AssertFailed();
157 break;
158
159 case RTR0MEMOBJTYPE_LOCK:
160 if (pMemNt->pvSecureMem)
161 {
162 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
163 pMemNt->pvSecureMem = NULL;
164 }
165 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
166 {
167 MmUnlockPages(pMemNt->apMdls[i]);
168 IoFreeMdl(pMemNt->apMdls[i]);
169 pMemNt->apMdls[i] = NULL;
170 }
171 break;
172
173 case RTR0MEMOBJTYPE_RES_VIRT:
174/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
175 {
176 }
177 else
178 {
179 }*/
180 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
181 return VERR_INTERNAL_ERROR;
182 break;
183
184 case RTR0MEMOBJTYPE_MAPPING:
185 {
186 Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
187 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
188 Assert(pMemNtParent);
189 if (pMemNtParent->cMdls)
190 {
191 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
192 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
193 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
194 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
195 }
196 else
197 {
198 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
199 && !pMemNtParent->Core.u.Phys.fAllocated);
200 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
201 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
202 }
203 pMemNt->Core.pv = NULL;
204 break;
205 }
206
207 default:
208 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
209 return VERR_INTERNAL_ERROR;
210 }
211
212 return VINF_SUCCESS;
213}
214
215
216DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
217{
218 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
219 RT_NOREF1(fExecutable);
220
221 /*
222 * Try allocate the memory and create an MDL for them so
223 * we can query the physical addresses and do mappings later
224 * without running into out-of-memory conditions and similar problems.
225 */
226 int rc = VERR_NO_PAGE_MEMORY;
227 void *pv;
228 if (g_pfnrtExAllocatePoolWithTag)
229 pv = g_pfnrtExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
230 else
231 pv = ExAllocatePool(NonPagedPool, cb);
232 if (pv)
233 {
234 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
235 if (pMdl)
236 {
237 MmBuildMdlForNonPagedPool(pMdl);
238#ifdef RT_ARCH_AMD64
239 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
240#endif
241
242 /*
243 * Create the IPRT memory object.
244 */
245 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
246 if (pMemNt)
247 {
248 pMemNt->cMdls = 1;
249 pMemNt->apMdls[0] = pMdl;
250 *ppMem = &pMemNt->Core;
251 return VINF_SUCCESS;
252 }
253
254 rc = VERR_NO_MEMORY;
255 IoFreeMdl(pMdl);
256 }
257 ExFreePool(pv);
258 }
259 return rc;
260}
261
262
263DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
264{
265 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
266
267 /*
268 * Try see if we get lucky first...
269 * (We could probably just assume we're lucky on NT4.)
270 */
271 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
272 if (RT_SUCCESS(rc))
273 {
274 size_t iPage = cb >> PAGE_SHIFT;
275 while (iPage-- > 0)
276 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
277 {
278 rc = VERR_NO_LOW_MEMORY;
279 break;
280 }
281 if (RT_SUCCESS(rc))
282 return rc;
283
284 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
285 RTR0MemObjFree(*ppMem, false);
286 *ppMem = NULL;
287 }
288
289 /*
290 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
291 */
292 if ( g_pfnrtMmAllocatePagesForMdl
293 && g_pfnrtMmFreePagesFromMdl
294 && g_pfnrtMmMapLockedPagesSpecifyCache)
295 {
296 PHYSICAL_ADDRESS Zero;
297 Zero.QuadPart = 0;
298 PHYSICAL_ADDRESS HighAddr;
299 HighAddr.QuadPart = _4G - 1;
300 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
301 if (pMdl)
302 {
303 if (MmGetMdlByteCount(pMdl) >= cb)
304 {
305 __try
306 {
307 void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
308 FALSE /* no bug check on failure */, NormalPagePriority);
309 if (pv)
310 {
311 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
312 if (pMemNt)
313 {
314 pMemNt->fAllocatedPagesForMdl = true;
315 pMemNt->cMdls = 1;
316 pMemNt->apMdls[0] = pMdl;
317 *ppMem = &pMemNt->Core;
318 return VINF_SUCCESS;
319 }
320 MmUnmapLockedPages(pv, pMdl);
321 }
322 }
323 __except(EXCEPTION_EXECUTE_HANDLER)
324 {
325# ifdef LOG_ENABLED
326 NTSTATUS rcNt = GetExceptionCode();
327 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
328# endif
329 /* nothing */
330 }
331 }
332 g_pfnrtMmFreePagesFromMdl(pMdl);
333 ExFreePool(pMdl);
334 }
335 }
336
337 /*
338 * Fall back on contiguous memory...
339 */
340 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
341}
342
343
344/**
345 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
346 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
347 * to what rtR0MemObjNativeAllocCont() does.
348 *
349 * @returns IPRT status code.
350 * @param ppMem Where to store the pointer to the ring-0 memory object.
351 * @param cb The size.
352 * @param fExecutable Whether the mapping should be executable or not.
353 * @param PhysHighest The highest physical address for the pages in allocation.
354 * @param uAlignment The alignment of the physical memory to allocate.
355 * Supported values are PAGE_SIZE, _2M, _4M and _1G.
356 */
357static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
358 size_t uAlignment)
359{
360 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
361 RT_NOREF1(fExecutable);
362
363 /*
364 * Allocate the memory and create an MDL for it.
365 */
366 PHYSICAL_ADDRESS PhysAddrHighest;
367 PhysAddrHighest.QuadPart = PhysHighest;
368 void *pv;
369 if (g_pfnrtMmAllocateContiguousMemorySpecifyCache)
370 {
371 PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
372 PhysAddrLowest.QuadPart = 0;
373 PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
374 pv = g_pfnrtMmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
375 }
376 else if (uAlignment == PAGE_SIZE)
377 pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
378 else
379 return VERR_NOT_SUPPORTED;
380 if (!pv)
381 return VERR_NO_MEMORY;
382
383 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
384 if (pMdl)
385 {
386 MmBuildMdlForNonPagedPool(pMdl);
387#ifdef RT_ARCH_AMD64
388 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
389#endif
390
391 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
392 if (pMemNt)
393 {
394 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
395 pMemNt->cMdls = 1;
396 pMemNt->apMdls[0] = pMdl;
397 *ppMem = &pMemNt->Core;
398 return VINF_SUCCESS;
399 }
400
401 IoFreeMdl(pMdl);
402 }
403 MmFreeContiguousMemory(pv);
404 return VERR_NO_MEMORY;
405}
406
407
408DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
409{
410 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1, PAGE_SIZE /* alignment */);
411}
412
413
414DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
415{
416 /*
417 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
418 *
419 * This is preferable to using MmAllocateContiguousMemory because there are
420 * a few situations where the memory shouldn't be mapped, like for instance
421 * VT-x control memory. Since these are rather small allocations (one or
422 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
423 * request.
424 *
425 * If the allocation is big, the chances are *probably* not very good. The
426 * current limit is kind of random...
427 */
428 if ( cb < _128K
429 && uAlignment == PAGE_SIZE
430 && g_pfnrtMmAllocatePagesForMdl
431 && g_pfnrtMmFreePagesFromMdl)
432 {
433 PHYSICAL_ADDRESS Zero;
434 Zero.QuadPart = 0;
435 PHYSICAL_ADDRESS HighAddr;
436 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
437 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
438 if (pMdl)
439 {
440 if (MmGetMdlByteCount(pMdl) >= cb)
441 {
442 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
443 PFN_NUMBER Pfn = paPfns[0] + 1;
444 const size_t cPages = cb >> PAGE_SHIFT;
445 size_t iPage;
446 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
447 if (paPfns[iPage] != Pfn)
448 break;
449 if (iPage >= cPages)
450 {
451 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
452 if (pMemNt)
453 {
454 pMemNt->Core.u.Phys.fAllocated = true;
455 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
456 pMemNt->fAllocatedPagesForMdl = true;
457 pMemNt->cMdls = 1;
458 pMemNt->apMdls[0] = pMdl;
459 *ppMem = &pMemNt->Core;
460 return VINF_SUCCESS;
461 }
462 }
463 }
464 g_pfnrtMmFreePagesFromMdl(pMdl);
465 ExFreePool(pMdl);
466 }
467 }
468
469 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment);
470}
471
472
473DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
474{
475 if (g_pfnrtMmAllocatePagesForMdl && g_pfnrtMmFreePagesFromMdl)
476 {
477 PHYSICAL_ADDRESS Zero;
478 Zero.QuadPart = 0;
479 PHYSICAL_ADDRESS HighAddr;
480 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
481 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
482 if (pMdl)
483 {
484 if (MmGetMdlByteCount(pMdl) >= cb)
485 {
486 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
487 if (pMemNt)
488 {
489 pMemNt->fAllocatedPagesForMdl = true;
490 pMemNt->cMdls = 1;
491 pMemNt->apMdls[0] = pMdl;
492 *ppMem = &pMemNt->Core;
493 return VINF_SUCCESS;
494 }
495 }
496 g_pfnrtMmFreePagesFromMdl(pMdl);
497 ExFreePool(pMdl);
498 }
499 return VERR_NO_MEMORY;
500 }
501 return VERR_NOT_SUPPORTED;
502}
503
504
505DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
506{
507 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE || uCachePolicy == RTMEM_CACHE_POLICY_MMIO, VERR_NOT_SUPPORTED);
508
509 /*
510 * Validate the address range and create a descriptor for it.
511 */
512 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
513 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
514 return VERR_ADDRESS_TOO_BIG;
515
516 /*
517 * Create the IPRT memory object.
518 */
519 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
520 if (pMemNt)
521 {
522 pMemNt->Core.u.Phys.PhysBase = Phys;
523 pMemNt->Core.u.Phys.fAllocated = false;
524 pMemNt->Core.u.Phys.uCachePolicy = uCachePolicy;
525 *ppMem = &pMemNt->Core;
526 return VINF_SUCCESS;
527 }
528 return VERR_NO_MEMORY;
529}
530
531
532/**
533 * Internal worker for locking down pages.
534 *
535 * @return IPRT status code.
536 *
537 * @param ppMem Where to store the memory object pointer.
538 * @param pv First page.
539 * @param cb Number of bytes.
540 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
541 * and RTMEM_PROT_WRITE.
542 * @param R0Process The process \a pv and \a cb refers to.
543 */
544static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
545{
546 /*
547 * Calc the number of MDLs we need and allocate the memory object structure.
548 */
549 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
550 if (cb % MAX_LOCK_MEM_SIZE)
551 cMdls++;
552 if (cMdls >= UINT32_MAX)
553 return VERR_OUT_OF_RANGE;
554 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[cMdls]),
555 RTR0MEMOBJTYPE_LOCK, pv, cb);
556 if (!pMemNt)
557 return VERR_NO_MEMORY;
558
559 /*
560 * Loop locking down the sub parts of the memory.
561 */
562 int rc = VINF_SUCCESS;
563 size_t cbTotal = 0;
564 uint8_t *pb = (uint8_t *)pv;
565 uint32_t iMdl;
566 for (iMdl = 0; iMdl < cMdls; iMdl++)
567 {
568 /*
569 * Calc the Mdl size and allocate it.
570 */
571 size_t cbCur = cb - cbTotal;
572 if (cbCur > MAX_LOCK_MEM_SIZE)
573 cbCur = MAX_LOCK_MEM_SIZE;
574 AssertMsg(cbCur, ("cbCur: 0!\n"));
575 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
576 if (!pMdl)
577 {
578 rc = VERR_NO_MEMORY;
579 break;
580 }
581
582 /*
583 * Lock the pages.
584 */
585 __try
586 {
587 MmProbeAndLockPages(pMdl,
588 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
589 fAccess == RTMEM_PROT_READ
590 ? IoReadAccess
591 : fAccess == RTMEM_PROT_WRITE
592 ? IoWriteAccess
593 : IoModifyAccess);
594
595 pMemNt->apMdls[iMdl] = pMdl;
596 pMemNt->cMdls++;
597 }
598 __except(EXCEPTION_EXECUTE_HANDLER)
599 {
600 IoFreeMdl(pMdl);
601 rc = VERR_LOCK_FAILED;
602 break;
603 }
604
605 if ( R0Process != NIL_RTR0PROCESS
606 && g_pfnrtMmSecureVirtualMemory
607 && g_pfnrtMmUnsecureVirtualMemory)
608 {
609 /* Make sure the user process can't change the allocation. */
610 pMemNt->pvSecureMem = g_pfnrtMmSecureVirtualMemory(pv, cb,
611 fAccess & RTMEM_PROT_WRITE
612 ? PAGE_READWRITE
613 : PAGE_READONLY);
614 if (!pMemNt->pvSecureMem)
615 {
616 rc = VERR_NO_MEMORY;
617 break;
618 }
619 }
620
621 /* next */
622 cbTotal += cbCur;
623 pb += cbCur;
624 }
625 if (RT_SUCCESS(rc))
626 {
627 Assert(pMemNt->cMdls == cMdls);
628 pMemNt->Core.u.Lock.R0Process = R0Process;
629 *ppMem = &pMemNt->Core;
630 return rc;
631 }
632
633 /*
634 * We failed, perform cleanups.
635 */
636 while (iMdl-- > 0)
637 {
638 MmUnlockPages(pMemNt->apMdls[iMdl]);
639 IoFreeMdl(pMemNt->apMdls[iMdl]);
640 pMemNt->apMdls[iMdl] = NULL;
641 }
642 if (pMemNt->pvSecureMem)
643 {
644 if (g_pfnrtMmUnsecureVirtualMemory)
645 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
646 pMemNt->pvSecureMem = NULL;
647 }
648
649 rtR0MemObjDelete(&pMemNt->Core);
650 return rc;
651}
652
653
654DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
655 RTR0PROCESS R0Process)
656{
657 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
658 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
659 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process);
660}
661
662
663DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
664{
665 return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS);
666}
667
668
669DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
670{
671 /*
672 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
673 */
674 RT_NOREF4(ppMem, pvFixed, cb, uAlignment);
675 return VERR_NOT_SUPPORTED;
676}
677
678
679DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
680 RTR0PROCESS R0Process)
681{
682 /*
683 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
684 */
685 RT_NOREF5(ppMem, R3PtrFixed, cb, uAlignment, R0Process);
686 return VERR_NOT_SUPPORTED;
687}
688
689
690/**
691 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
692 *
693 * @returns IPRT status code.
694 * @param ppMem Where to store the memory object for the mapping.
695 * @param pMemToMap The memory object to map.
696 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
697 * @param uAlignment The alignment requirement for the mapping.
698 * @param fProt The desired page protection for the mapping.
699 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
700 * If not nil, it's the current process.
701 */
702static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
703 unsigned fProt, RTR0PROCESS R0Process)
704{
705 int rc = VERR_MAP_FAILED;
706
707 /*
708 * Check that the specified alignment is supported.
709 */
710 if (uAlignment > PAGE_SIZE)
711 return VERR_NOT_SUPPORTED;
712
713 /*
714 * There are two basic cases here, either we've got an MDL and can
715 * map it using MmMapLockedPages, or we've got a contiguous physical
716 * range (MMIO most likely) and can use MmMapIoSpace.
717 */
718 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
719 if (pMemNtToMap->cMdls)
720 {
721 /* don't attempt map locked regions with more than one mdl. */
722 if (pMemNtToMap->cMdls != 1)
723 return VERR_NOT_SUPPORTED;
724
725 /* Need g_pfnrtMmMapLockedPagesSpecifyCache to map to a specific address. */
726 if (pvFixed != (void *)-1 && g_pfnrtMmMapLockedPagesSpecifyCache == NULL)
727 return VERR_NOT_SUPPORTED;
728
729 /* we can't map anything to the first page, sorry. */
730 if (pvFixed == 0)
731 return VERR_NOT_SUPPORTED;
732
733 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
734 if ( pMemNtToMap->Core.uRel.Parent.cMappings
735 && R0Process == NIL_RTR0PROCESS)
736 {
737 if (pMemNtToMap->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC)
738 return VERR_NOT_SUPPORTED;
739 uint32_t iMapping = pMemNtToMap->Core.uRel.Parent.cMappings;
740 while (iMapping-- > 0)
741 {
742 PRTR0MEMOBJNT pMapping = (PRTR0MEMOBJNT)pMemNtToMap->Core.uRel.Parent.papMappings[iMapping];
743 if ( pMapping->Core.enmType != RTR0MEMOBJTYPE_MAPPING
744 || pMapping->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
745 return VERR_NOT_SUPPORTED;
746 }
747 }
748
749 __try
750 {
751 /** @todo uAlignment */
752 /** @todo How to set the protection on the pages? */
753 void *pv;
754 if (g_pfnrtMmMapLockedPagesSpecifyCache)
755 pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
756 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
757 MmCached,
758 pvFixed != (void *)-1 ? pvFixed : NULL,
759 FALSE /* no bug check on failure */,
760 NormalPagePriority);
761 else
762 pv = MmMapLockedPages(pMemNtToMap->apMdls[0],
763 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
764 if (pv)
765 {
766 NOREF(fProt);
767
768 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
769 pMemNtToMap->Core.cb);
770 if (pMemNt)
771 {
772 pMemNt->Core.u.Mapping.R0Process = R0Process;
773 *ppMem = &pMemNt->Core;
774 return VINF_SUCCESS;
775 }
776
777 rc = VERR_NO_MEMORY;
778 MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
779 }
780 }
781 __except(EXCEPTION_EXECUTE_HANDLER)
782 {
783#ifdef LOG_ENABLED
784 NTSTATUS rcNt = GetExceptionCode();
785 Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
786#endif
787
788 /* nothing */
789 rc = VERR_MAP_FAILED;
790 }
791
792 }
793 else
794 {
795 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
796 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
797
798 /* cannot map phys mem to user space (yet). */
799 if (R0Process != NIL_RTR0PROCESS)
800 return VERR_NOT_SUPPORTED;
801
802 /** @todo uAlignment */
803 /** @todo How to set the protection on the pages? */
804 PHYSICAL_ADDRESS Phys;
805 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
806 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb,
807 pMemNtToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO ? MmNonCached : MmCached);
808 if (pv)
809 {
810 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
811 pMemNtToMap->Core.cb);
812 if (pMemNt)
813 {
814 pMemNt->Core.u.Mapping.R0Process = R0Process;
815 *ppMem = &pMemNt->Core;
816 return VINF_SUCCESS;
817 }
818
819 rc = VERR_NO_MEMORY;
820 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
821 }
822 }
823
824 NOREF(uAlignment); NOREF(fProt);
825 return rc;
826}
827
828
829DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
830 unsigned fProt, size_t offSub, size_t cbSub)
831{
832 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
833 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
834}
835
836
837DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed,
838 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
839{
840 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
841 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
842}
843
844
845DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
846{
847#if 0
848 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
849#endif
850
851 /*
852 * Seems there are some issues with this MmProtectMdlSystemAddress API, so
853 * this code isn't currently enabled until we've tested it with the verifier.
854 */
855#if 0
856 /*
857 * The API we've got requires a kernel mapping.
858 */
859 if ( pMemNt->cMdls
860 && g_pfnrtMmProtectMdlSystemAddress
861 && (g_uRtNtMajorVer > 6 || (g_uRtNtMajorVer == 6 && g_uRtNtMinorVer >= 1)) /* Windows 7 and later. */
862 && pMemNt->Core.pv != NULL
863 && ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_PAGE
864 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOW
865 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_CONT
866 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOCK
867 && pMemNt->Core.u.Lock.R0Process == NIL_RTPROCESS)
868 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_MAPPING
869 && pMemNt->Core.u.Mapping.R0Process == NIL_RTPROCESS) ) )
870 {
871 /* Convert the protection. */
872 LOCK_OPERATION enmLockOp;
873 ULONG fAccess;
874 switch (fProt)
875 {
876 case RTMEM_PROT_NONE:
877 fAccess = PAGE_NOACCESS;
878 enmLockOp = IoReadAccess;
879 break;
880 case RTMEM_PROT_READ:
881 fAccess = PAGE_READONLY;
882 enmLockOp = IoReadAccess;
883 break;
884 case RTMEM_PROT_WRITE:
885 case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
886 fAccess = PAGE_READWRITE;
887 enmLockOp = IoModifyAccess;
888 break;
889 case RTMEM_PROT_EXEC:
890 fAccess = PAGE_EXECUTE;
891 enmLockOp = IoReadAccess;
892 break;
893 case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
894 fAccess = PAGE_EXECUTE_READ;
895 enmLockOp = IoReadAccess;
896 break;
897 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE:
898 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ:
899 fAccess = PAGE_EXECUTE_READWRITE;
900 enmLockOp = IoModifyAccess;
901 break;
902 default:
903 AssertFailedReturn(VERR_INVALID_FLAGS);
904 }
905
906 NTSTATUS rcNt = STATUS_SUCCESS;
907# if 0 /** @todo test this against the verifier. */
908 if (offSub == 0 && pMemNt->Core.cb == cbSub)
909 {
910 uint32_t iMdl = pMemNt->cMdls;
911 while (iMdl-- > 0)
912 {
913 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMemNt->apMdls[i], fAccess);
914 if (!NT_SUCCESS(rcNt))
915 break;
916 }
917 }
918 else
919# endif
920 {
921 /*
922 * We ASSUME the following here:
923 * - MmProtectMdlSystemAddress can deal with nonpaged pool memory
924 * - MmProtectMdlSystemAddress doesn't actually store anything in the MDL we pass it.
925 * - We are not required to call MmProtectMdlSystemAddress with PAGE_READWRITE for the
926 * exact same ranges prior to freeing them.
927 *
928 * So, we lock the pages temporarily, call the API and unlock them.
929 */
930 uint8_t *pbCur = (uint8_t *)pMemNt->Core.pv + offSub;
931 while (cbSub > 0 && NT_SUCCESS(rcNt))
932 {
933 size_t cbCur = cbSub;
934 if (cbCur > MAX_LOCK_MEM_SIZE)
935 cbCur = MAX_LOCK_MEM_SIZE;
936 PMDL pMdl = IoAllocateMdl(pbCur, (ULONG)cbCur, FALSE, FALSE, NULL);
937 if (pMdl)
938 {
939 __try
940 {
941 MmProbeAndLockPages(pMdl, KernelMode, enmLockOp);
942 }
943 __except(EXCEPTION_EXECUTE_HANDLER)
944 {
945 rcNt = GetExceptionCode();
946 }
947 if (NT_SUCCESS(rcNt))
948 {
949 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMdl, fAccess);
950 MmUnlockPages(pMdl);
951 }
952 IoFreeMdl(pMdl);
953 }
954 else
955 rcNt = STATUS_NO_MEMORY;
956 pbCur += cbCur;
957 cbSub -= cbCur;
958 }
959 }
960
961 if (NT_SUCCESS(rcNt))
962 return VINF_SUCCESS;
963 return RTErrConvertFromNtStatus(rcNt);
964 }
965#else
966 RT_NOREF4(pMem, offSub, cbSub, fProt);
967#endif
968
969 return VERR_NOT_SUPPORTED;
970}
971
972
973DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
974{
975 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
976
977 if (pMemNt->cMdls)
978 {
979 if (pMemNt->cMdls == 1)
980 {
981 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
982 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
983 }
984
985 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
986 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
987 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
988 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
989 }
990
991 switch (pMemNt->Core.enmType)
992 {
993 case RTR0MEMOBJTYPE_MAPPING:
994 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
995
996 case RTR0MEMOBJTYPE_PHYS:
997 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
998
999 case RTR0MEMOBJTYPE_PAGE:
1000 case RTR0MEMOBJTYPE_PHYS_NC:
1001 case RTR0MEMOBJTYPE_LOW:
1002 case RTR0MEMOBJTYPE_CONT:
1003 case RTR0MEMOBJTYPE_LOCK:
1004 default:
1005 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
1006 case RTR0MEMOBJTYPE_RES_VIRT:
1007 return NIL_RTHCPHYS;
1008 }
1009}
1010
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette