VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 91483

Last change on this file since 91483 was 91483, checked in by vboxsync, 3 years ago

IPRT/memobj: Passing pszTag around...

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 46.0 KB
Line 
1/* $Id: memobj-r0drv-nt.cpp 91483 2021-09-30 00:19:19Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/memobj.h>
34#include <iprt/alloc.h>
35#include <iprt/assert.h>
36#include <iprt/err.h>
37#include <iprt/log.h>
38#include <iprt/param.h>
39#include <iprt/string.h>
40#include <iprt/process.h>
41#include "internal/memobj.h"
42#include "internal-r0drv-nt.h"
43
44
45/*********************************************************************************************************************************
46* Defined Constants And Macros *
47*********************************************************************************************************************************/
48/** Maximum number of bytes we try to lock down in one go.
49 * This is supposed to have a limit right below 256MB, but this appears
50 * to actually be much lower. The values here have been determined experimentally.
51 */
52#ifdef RT_ARCH_X86
53# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
54#endif
55#ifdef RT_ARCH_AMD64
56# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
57#endif
58
59/* Newer WDK constants: */
60#ifndef MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS
61# define MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS 0x20
62#endif
63#ifndef MM_ALLOCATE_FAST_LARGE_PAGES
64# define MM_ALLOCATE_FAST_LARGE_PAGES 0x40
65#endif
66
67
68/*********************************************************************************************************************************
69* Structures and Typedefs *
70*********************************************************************************************************************************/
71/**
72 * The NT version of the memory object structure.
73 */
74typedef struct RTR0MEMOBJNT
75{
76 /** The core structure. */
77 RTR0MEMOBJINTERNAL Core;
78 /** Used MmAllocatePagesForMdl(). */
79 bool fAllocatedPagesForMdl;
80 /** Set if this is sub-section of the parent. */
81 bool fSubMapping;
82 /** Pointer returned by MmSecureVirtualMemory */
83 PVOID pvSecureMem;
84 /** The number of PMDLs (memory descriptor lists) in the array. */
85 uint32_t cMdls;
86 /** Array of MDL pointers. (variable size) */
87 PMDL apMdls[1];
88} RTR0MEMOBJNT;
89/** Pointer to the NT version of the memory object structure. */
90typedef RTR0MEMOBJNT *PRTR0MEMOBJNT;
91
92
93
94DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
95{
96 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
97
98 /*
99 * Deal with it on a per type basis (just as a variation).
100 */
101 switch (pMemNt->Core.enmType)
102 {
103 case RTR0MEMOBJTYPE_LOW:
104 if (pMemNt->fAllocatedPagesForMdl)
105 {
106 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
107 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
108 pMemNt->Core.pv = NULL;
109 if (pMemNt->pvSecureMem)
110 {
111 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
112 pMemNt->pvSecureMem = NULL;
113 }
114
115 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
116 ExFreePool(pMemNt->apMdls[0]);
117 pMemNt->apMdls[0] = NULL;
118 pMemNt->cMdls = 0;
119 break;
120 }
121 AssertFailed();
122 break;
123
124 case RTR0MEMOBJTYPE_PAGE:
125 Assert(pMemNt->Core.pv);
126 if (pMemNt->fAllocatedPagesForMdl)
127 {
128 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
129 Assert(pMemNt->pvSecureMem == NULL);
130 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
131 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
132 ExFreePool(pMemNt->apMdls[0]);
133 }
134 else
135 {
136 if (g_pfnrtExFreePoolWithTag)
137 g_pfnrtExFreePoolWithTag(pMemNt->Core.pv, IPRT_NT_POOL_TAG);
138 else
139 ExFreePool(pMemNt->Core.pv);
140
141 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
142 IoFreeMdl(pMemNt->apMdls[0]);
143 }
144 pMemNt->Core.pv = NULL;
145 pMemNt->apMdls[0] = NULL;
146 pMemNt->cMdls = 0;
147 break;
148
149 case RTR0MEMOBJTYPE_CONT:
150 Assert(pMemNt->Core.pv);
151 MmFreeContiguousMemory(pMemNt->Core.pv);
152 pMemNt->Core.pv = NULL;
153
154 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
155 IoFreeMdl(pMemNt->apMdls[0]);
156 pMemNt->apMdls[0] = NULL;
157 pMemNt->cMdls = 0;
158 break;
159
160 case RTR0MEMOBJTYPE_PHYS:
161 /* rtR0MemObjNativeEnterPhys? */
162 if (!pMemNt->Core.u.Phys.fAllocated)
163 {
164 Assert(!pMemNt->fAllocatedPagesForMdl);
165 /* Nothing to do here. */
166 break;
167 }
168 RT_FALL_THRU();
169
170 case RTR0MEMOBJTYPE_PHYS_NC:
171 if (pMemNt->fAllocatedPagesForMdl)
172 {
173 g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
174 ExFreePool(pMemNt->apMdls[0]);
175 pMemNt->apMdls[0] = NULL;
176 pMemNt->cMdls = 0;
177 break;
178 }
179 AssertFailed();
180 break;
181
182 case RTR0MEMOBJTYPE_LOCK:
183 if (pMemNt->pvSecureMem)
184 {
185 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
186 pMemNt->pvSecureMem = NULL;
187 }
188 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
189 {
190 MmUnlockPages(pMemNt->apMdls[i]);
191 IoFreeMdl(pMemNt->apMdls[i]);
192 pMemNt->apMdls[i] = NULL;
193 }
194 break;
195
196 case RTR0MEMOBJTYPE_RES_VIRT:
197/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
198 {
199 }
200 else
201 {
202 }*/
203 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
204 return VERR_INTERNAL_ERROR;
205 break;
206
207 case RTR0MEMOBJTYPE_MAPPING:
208 {
209 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
210 Assert(pMemNtParent);
211 Assert(pMemNt->Core.pv);
212 Assert((pMemNt->cMdls == 0 && !pMemNt->fSubMapping) || (pMemNt->cMdls == 1 && pMemNt->fSubMapping));
213 if (pMemNtParent->cMdls)
214 {
215 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
216 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
217 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
218 if (!pMemNt->cMdls)
219 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
220 else
221 {
222 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
223 IoFreeMdl(pMemNt->apMdls[0]);
224 pMemNt->apMdls[0] = NULL;
225 }
226 }
227 else
228 {
229 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
230 && !pMemNtParent->Core.u.Phys.fAllocated);
231 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
232 Assert(!pMemNt->fSubMapping);
233 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
234 }
235 pMemNt->Core.pv = NULL;
236 break;
237 }
238
239 default:
240 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
241 return VERR_INTERNAL_ERROR;
242 }
243
244 return VINF_SUCCESS;
245}
246
247
248DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
249{
250 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
251 RT_NOREF1(fExecutable);
252
253 /*
254 * Use MmAllocatePagesForMdl if the allocation is a little bit big.
255 */
256 int rc = VERR_NO_PAGE_MEMORY;
257 if ( cb > _1M
258 && g_pfnrtMmAllocatePagesForMdl
259 && g_pfnrtMmFreePagesFromMdl
260 && g_pfnrtMmMapLockedPagesSpecifyCache)
261 {
262 PHYSICAL_ADDRESS Zero;
263 Zero.QuadPart = 0;
264 PHYSICAL_ADDRESS HighAddr;
265 HighAddr.QuadPart = MAXLONGLONG;
266 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
267 if (pMdl)
268 {
269 if (MmGetMdlByteCount(pMdl) >= cb)
270 {
271 __try
272 {
273 void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
274 FALSE /* no bug check on failure */, NormalPagePriority);
275 if (pv)
276 {
277#ifdef RT_ARCH_AMD64
278 if (fExecutable)
279 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
280#endif
281
282 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, pszTag);
283 if (pMemNt)
284 {
285 pMemNt->fAllocatedPagesForMdl = true;
286 pMemNt->cMdls = 1;
287 pMemNt->apMdls[0] = pMdl;
288 *ppMem = &pMemNt->Core;
289 return VINF_SUCCESS;
290 }
291 MmUnmapLockedPages(pv, pMdl);
292 }
293 }
294 __except(EXCEPTION_EXECUTE_HANDLER)
295 {
296# ifdef LOG_ENABLED
297 NTSTATUS rcNt = GetExceptionCode();
298 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
299# endif
300 /* nothing */
301 }
302 }
303 g_pfnrtMmFreePagesFromMdl(pMdl);
304 ExFreePool(pMdl);
305 }
306 }
307
308 /*
309 * Try allocate the memory and create an MDL for them so
310 * we can query the physical addresses and do mappings later
311 * without running into out-of-memory conditions and similar problems.
312 */
313 void *pv;
314 if (g_pfnrtExAllocatePoolWithTag)
315 pv = g_pfnrtExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
316 else
317 pv = ExAllocatePool(NonPagedPool, cb);
318 if (pv)
319 {
320 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
321 if (pMdl)
322 {
323 MmBuildMdlForNonPagedPool(pMdl);
324#ifdef RT_ARCH_AMD64
325 if (fExecutable)
326 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
327#endif
328
329 /*
330 * Create the IPRT memory object.
331 */
332 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, pszTag);
333 if (pMemNt)
334 {
335 pMemNt->cMdls = 1;
336 pMemNt->apMdls[0] = pMdl;
337 *ppMem = &pMemNt->Core;
338 return VINF_SUCCESS;
339 }
340
341 rc = VERR_NO_MEMORY;
342 IoFreeMdl(pMdl);
343 }
344 ExFreePool(pv);
345 }
346 return rc;
347}
348
349
350/**
351 * Helper for rtR0MemObjNativeAllocLarge that verifies the result.
352 */
353static bool rtR0MemObjNtVerifyLargePageAlloc(PMDL pMdl, size_t cb, size_t cbLargePage)
354{
355 if (MmGetMdlByteCount(pMdl) >= cb)
356 {
357 PPFN_NUMBER const paPfns = MmGetMdlPfnArray(pMdl);
358 size_t const cPagesPerLargePage = cbLargePage >> PAGE_SHIFT;
359 size_t const cLargePages = cb / cbLargePage;
360 size_t iPage = 0;
361 for (size_t iLargePage = 0; iLargePage < cLargePages; iLargePage++)
362 {
363 PFN_NUMBER Pfn = paPfns[iPage];
364 if (!(Pfn & (cbLargePage >> PAGE_SHIFT) - 1U))
365 {
366 for (size_t iSubPage = 1; iSubPage < cPagesPerLargePage; iSubPage++)
367 {
368 iPage++;
369 Pfn++;
370 if (paPfns[iPage] == Pfn)
371 { /* likely */ }
372 else
373 {
374 Log(("rtR0MemObjNativeAllocLarge: Subpage %#zu in large page #%zu is not contiguous: %#x, expected %#x\n",
375 iSubPage, iLargePage, paPfns[iPage], Pfn));
376 return false;
377 }
378 }
379 }
380 else
381 {
382 Log(("rtR0MemObjNativeAllocLarge: Large page #%zu is misaligned: %#x, cbLargePage=%#zx\n",
383 iLargePage, Pfn, cbLargePage));
384 return false;
385 }
386 }
387 return true;
388 }
389 Log(("rtR0MemObjNativeAllocLarge: Got back too few pages: %#zx, requested %#zx\n", MmGetMdlByteCount(pMdl), cb));
390 return false;
391}
392
393
394DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
395 const char *pszTag)
396{
397 /*
398 * Need the MmAllocatePagesForMdlEx function so we can specify flags.
399 */
400 if ( g_uRtNtVersion >= RTNT_MAKE_VERSION(6,1) /* Windows 7+ */
401 && g_pfnrtMmAllocatePagesForMdlEx
402 && g_pfnrtMmFreePagesFromMdl
403 && g_pfnrtMmMapLockedPagesSpecifyCache)
404 {
405 ULONG fNtFlags = MM_ALLOCATE_FULLY_REQUIRED /* W7+: Make it fail if we don't get all we ask for.*/
406 | MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS; /* W7+: The SkipBytes chunks must be physcially contiguous. */
407 if ((fFlags & RTMEMOBJ_ALLOC_LARGE_F_FAST) && g_uRtNtVersion >= RTNT_MAKE_VERSION(6, 2))
408 fNtFlags |= MM_ALLOCATE_FAST_LARGE_PAGES; /* W8+: Don't try too hard, just fail if not enough handy. */
409
410 PHYSICAL_ADDRESS Zero;
411 Zero.QuadPart = 0;
412
413 PHYSICAL_ADDRESS HighAddr;
414 HighAddr.QuadPart = MAXLONGLONG;
415
416 PHYSICAL_ADDRESS Skip;
417 Skip.QuadPart = cbLargePage;
418
419 int rc;
420 PMDL const pMdl = g_pfnrtMmAllocatePagesForMdlEx(Zero, HighAddr, Skip, cb, MmCached, fNtFlags);
421 if (pMdl)
422 {
423 /* Verify the result. */
424 if (rtR0MemObjNtVerifyLargePageAlloc(pMdl, cb, cbLargePage))
425 {
426 /*
427 * Map the allocation into kernel space. Unless the memory is already mapped
428 * somewhere (seems to be actually), I guess it's unlikely that we'll get a
429 * large page aligned mapping back here...
430 */
431 __try
432 {
433 void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
434 FALSE /* no bug check on failure */, NormalPagePriority);
435 if (pv)
436 {
437 /*
438 * Create the memory object.
439 */
440 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb, pszTag);
441 if (pMemNt)
442 {
443 pMemNt->fAllocatedPagesForMdl = true;
444 pMemNt->cMdls = 1;
445 pMemNt->apMdls[0] = pMdl;
446 *ppMem = &pMemNt->Core;
447 return VINF_SUCCESS;
448 }
449
450 MmUnmapLockedPages(pv, pMdl);
451 }
452 }
453 __except(EXCEPTION_EXECUTE_HANDLER)
454 {
455#ifdef LOG_ENABLED
456 NTSTATUS rcNt = GetExceptionCode();
457 Log(("rtR0MemObjNativeAllocLarge: Exception Code %#x\n", rcNt));
458#endif
459 /* nothing */
460 }
461 }
462
463 g_pfnrtMmFreePagesFromMdl(pMdl);
464 ExFreePool(pMdl);
465 rc = VERR_NO_MEMORY;
466 }
467 else
468 rc = fFlags & RTMEMOBJ_ALLOC_LARGE_F_FAST ? VERR_TRY_AGAIN : VERR_NO_MEMORY;
469 return rc;
470 }
471
472 return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
473}
474
475
476DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
477{
478 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
479
480 /*
481 * Try see if we get lucky first...
482 * (We could probably just assume we're lucky on NT4.)
483 */
484 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable, pszTag);
485 if (RT_SUCCESS(rc))
486 {
487 size_t iPage = cb >> PAGE_SHIFT;
488 while (iPage-- > 0)
489 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
490 {
491 rc = VERR_NO_LOW_MEMORY;
492 break;
493 }
494 if (RT_SUCCESS(rc))
495 return rc;
496
497 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
498 RTR0MemObjFree(*ppMem, false);
499 *ppMem = NULL;
500 }
501
502 /*
503 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
504 */
505 if ( g_pfnrtMmAllocatePagesForMdl
506 && g_pfnrtMmFreePagesFromMdl
507 && g_pfnrtMmMapLockedPagesSpecifyCache)
508 {
509 PHYSICAL_ADDRESS Zero;
510 Zero.QuadPart = 0;
511 PHYSICAL_ADDRESS HighAddr;
512 HighAddr.QuadPart = _4G - 1;
513 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
514 if (pMdl)
515 {
516 if (MmGetMdlByteCount(pMdl) >= cb)
517 {
518 __try
519 {
520 void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
521 FALSE /* no bug check on failure */, NormalPagePriority);
522 if (pv)
523 {
524 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb, pszTag);
525 if (pMemNt)
526 {
527 pMemNt->fAllocatedPagesForMdl = true;
528 pMemNt->cMdls = 1;
529 pMemNt->apMdls[0] = pMdl;
530 *ppMem = &pMemNt->Core;
531 return VINF_SUCCESS;
532 }
533 MmUnmapLockedPages(pv, pMdl);
534 }
535 }
536 __except(EXCEPTION_EXECUTE_HANDLER)
537 {
538# ifdef LOG_ENABLED
539 NTSTATUS rcNt = GetExceptionCode();
540 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
541# endif
542 /* nothing */
543 }
544 }
545 g_pfnrtMmFreePagesFromMdl(pMdl);
546 ExFreePool(pMdl);
547 }
548 }
549
550 /*
551 * Fall back on contiguous memory...
552 */
553 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable, pszTag);
554}
555
556
557/**
558 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
559 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
560 * to what rtR0MemObjNativeAllocCont() does.
561 *
562 * @returns IPRT status code.
563 * @param ppMem Where to store the pointer to the ring-0 memory object.
564 * @param cb The size.
565 * @param fExecutable Whether the mapping should be executable or not.
566 * @param PhysHighest The highest physical address for the pages in allocation.
567 * @param uAlignment The alignment of the physical memory to allocate.
568 * Supported values are PAGE_SIZE, _2M, _4M and _1G.
569 * @param pszTag Allocation tag used for statistics and such.
570 */
571static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
572 size_t uAlignment, const char *pszTag)
573{
574 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
575 RT_NOREF1(fExecutable);
576
577 /*
578 * Allocate the memory and create an MDL for it.
579 */
580 PHYSICAL_ADDRESS PhysAddrHighest;
581 PhysAddrHighest.QuadPart = PhysHighest;
582 void *pv;
583 if (g_pfnrtMmAllocateContiguousMemorySpecifyCache)
584 {
585 PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
586 PhysAddrLowest.QuadPart = 0;
587 PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
588 pv = g_pfnrtMmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
589 }
590 else if (uAlignment == PAGE_SIZE)
591 pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
592 else
593 return VERR_NOT_SUPPORTED;
594 if (!pv)
595 return VERR_NO_MEMORY;
596
597 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
598 if (pMdl)
599 {
600 MmBuildMdlForNonPagedPool(pMdl);
601#ifdef RT_ARCH_AMD64
602 if (fExecutable)
603 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
604#endif
605
606 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb, pszTag);
607 if (pMemNt)
608 {
609 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
610 pMemNt->cMdls = 1;
611 pMemNt->apMdls[0] = pMdl;
612 *ppMem = &pMemNt->Core;
613 return VINF_SUCCESS;
614 }
615
616 IoFreeMdl(pMdl);
617 }
618 MmFreeContiguousMemory(pv);
619 return VERR_NO_MEMORY;
620}
621
622
623DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
624{
625 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1, PAGE_SIZE /* alignment */, pszTag);
626}
627
628
629DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment,
630 const char *pszTag)
631{
632 /*
633 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
634 *
635 * This is preferable to using MmAllocateContiguousMemory because there are
636 * a few situations where the memory shouldn't be mapped, like for instance
637 * VT-x control memory. Since these are rather small allocations (one or
638 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
639 * request.
640 *
641 * If the allocation is big, the chances are *probably* not very good. The
642 * current limit is kind of random...
643 */
644 if ( cb < _128K
645 && uAlignment == PAGE_SIZE
646 && g_pfnrtMmAllocatePagesForMdl
647 && g_pfnrtMmFreePagesFromMdl)
648 {
649 PHYSICAL_ADDRESS Zero;
650 Zero.QuadPart = 0;
651 PHYSICAL_ADDRESS HighAddr;
652 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
653 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
654 if (pMdl)
655 {
656 if (MmGetMdlByteCount(pMdl) >= cb)
657 {
658 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
659 PFN_NUMBER Pfn = paPfns[0] + 1;
660 const size_t cPages = cb >> PAGE_SHIFT;
661 size_t iPage;
662 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
663 if (paPfns[iPage] != Pfn)
664 break;
665 if (iPage >= cPages)
666 {
667 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
668 if (pMemNt)
669 {
670 pMemNt->Core.u.Phys.fAllocated = true;
671 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
672 pMemNt->fAllocatedPagesForMdl = true;
673 pMemNt->cMdls = 1;
674 pMemNt->apMdls[0] = pMdl;
675 *ppMem = &pMemNt->Core;
676 return VINF_SUCCESS;
677 }
678 }
679 }
680 g_pfnrtMmFreePagesFromMdl(pMdl);
681 ExFreePool(pMdl);
682 }
683 }
684
685 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment, pszTag);
686}
687
688
689DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
690{
691 if (g_pfnrtMmAllocatePagesForMdl && g_pfnrtMmFreePagesFromMdl)
692 {
693 /** @todo use the Ex version with the fail-if-not-all-requested-pages flag
694 * when possible. */
695 PHYSICAL_ADDRESS Zero;
696 Zero.QuadPart = 0;
697 PHYSICAL_ADDRESS HighAddr;
698 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
699 PMDL pMdl = g_pfnrtMmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
700 if (pMdl)
701 {
702 if (MmGetMdlByteCount(pMdl) >= cb)
703 {
704 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb, pszTag);
705 if (pMemNt)
706 {
707 pMemNt->fAllocatedPagesForMdl = true;
708 pMemNt->cMdls = 1;
709 pMemNt->apMdls[0] = pMdl;
710 *ppMem = &pMemNt->Core;
711 return VINF_SUCCESS;
712 }
713 }
714 g_pfnrtMmFreePagesFromMdl(pMdl);
715 ExFreePool(pMdl);
716 }
717 return VERR_NO_MEMORY;
718 }
719 return VERR_NOT_SUPPORTED;
720}
721
722
723DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy,
724 const char *pszTag)
725{
726 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE || uCachePolicy == RTMEM_CACHE_POLICY_MMIO, VERR_NOT_SUPPORTED);
727
728 /*
729 * Validate the address range and create a descriptor for it.
730 */
731 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
732 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
733 return VERR_ADDRESS_TOO_BIG;
734
735 /*
736 * Create the IPRT memory object.
737 */
738 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
739 if (pMemNt)
740 {
741 pMemNt->Core.u.Phys.PhysBase = Phys;
742 pMemNt->Core.u.Phys.fAllocated = false;
743 pMemNt->Core.u.Phys.uCachePolicy = uCachePolicy;
744 *ppMem = &pMemNt->Core;
745 return VINF_SUCCESS;
746 }
747 return VERR_NO_MEMORY;
748}
749
750
751/**
752 * Internal worker for locking down pages.
753 *
754 * @return IPRT status code.
755 *
756 * @param ppMem Where to store the memory object pointer.
757 * @param pv First page.
758 * @param cb Number of bytes.
759 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
760 * and RTMEM_PROT_WRITE.
761 * @param R0Process The process \a pv and \a cb refers to.
762 * @param pszTag Allocation tag used for statistics and such.
763 */
764static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process,
765 const char *pszTag)
766{
767 /*
768 * Calc the number of MDLs we need and allocate the memory object structure.
769 */
770 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
771 if (cb % MAX_LOCK_MEM_SIZE)
772 cMdls++;
773 if (cMdls >= UINT32_MAX)
774 return VERR_OUT_OF_RANGE;
775 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[cMdls]),
776 RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag);
777 if (!pMemNt)
778 return VERR_NO_MEMORY;
779
780 /*
781 * Loop locking down the sub parts of the memory.
782 */
783 int rc = VINF_SUCCESS;
784 size_t cbTotal = 0;
785 uint8_t *pb = (uint8_t *)pv;
786 uint32_t iMdl;
787 for (iMdl = 0; iMdl < cMdls; iMdl++)
788 {
789 /*
790 * Calc the Mdl size and allocate it.
791 */
792 size_t cbCur = cb - cbTotal;
793 if (cbCur > MAX_LOCK_MEM_SIZE)
794 cbCur = MAX_LOCK_MEM_SIZE;
795 AssertMsg(cbCur, ("cbCur: 0!\n"));
796 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
797 if (!pMdl)
798 {
799 rc = VERR_NO_MEMORY;
800 break;
801 }
802
803 /*
804 * Lock the pages.
805 */
806 __try
807 {
808 MmProbeAndLockPages(pMdl,
809 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
810 fAccess == RTMEM_PROT_READ
811 ? IoReadAccess
812 : fAccess == RTMEM_PROT_WRITE
813 ? IoWriteAccess
814 : IoModifyAccess);
815
816 pMemNt->apMdls[iMdl] = pMdl;
817 pMemNt->cMdls++;
818 }
819 __except(EXCEPTION_EXECUTE_HANDLER)
820 {
821 IoFreeMdl(pMdl);
822 rc = VERR_LOCK_FAILED;
823 break;
824 }
825
826 if ( R0Process != NIL_RTR0PROCESS
827 && g_pfnrtMmSecureVirtualMemory
828 && g_pfnrtMmUnsecureVirtualMemory)
829 {
830 /* Make sure the user process can't change the allocation. */
831 pMemNt->pvSecureMem = g_pfnrtMmSecureVirtualMemory(pv, cb,
832 fAccess & RTMEM_PROT_WRITE
833 ? PAGE_READWRITE
834 : PAGE_READONLY);
835 if (!pMemNt->pvSecureMem)
836 {
837 rc = VERR_NO_MEMORY;
838 break;
839 }
840 }
841
842 /* next */
843 cbTotal += cbCur;
844 pb += cbCur;
845 }
846 if (RT_SUCCESS(rc))
847 {
848 Assert(pMemNt->cMdls == cMdls);
849 pMemNt->Core.u.Lock.R0Process = R0Process;
850 *ppMem = &pMemNt->Core;
851 return rc;
852 }
853
854 /*
855 * We failed, perform cleanups.
856 */
857 while (iMdl-- > 0)
858 {
859 MmUnlockPages(pMemNt->apMdls[iMdl]);
860 IoFreeMdl(pMemNt->apMdls[iMdl]);
861 pMemNt->apMdls[iMdl] = NULL;
862 }
863 if (pMemNt->pvSecureMem)
864 {
865 if (g_pfnrtMmUnsecureVirtualMemory)
866 g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
867 pMemNt->pvSecureMem = NULL;
868 }
869
870 rtR0MemObjDelete(&pMemNt->Core);
871 return rc;
872}
873
874
875DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
876 RTR0PROCESS R0Process, const char *pszTag)
877{
878 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
879 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
880 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process, pszTag);
881}
882
883
884DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
885{
886 return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS, pszTag);
887}
888
889
890DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
891 const char *pszTag)
892{
893 /*
894 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
895 * Or MmAllocateMappingAddress?
896 */
897 RT_NOREF(ppMem, pvFixed, cb, uAlignment, pszTag);
898 return VERR_NOT_SUPPORTED;
899}
900
901
902DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
903 RTR0PROCESS R0Process, const char *pszTag)
904{
905 /*
906 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
907 */
908 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process, pszTag);
909 return VERR_NOT_SUPPORTED;
910}
911
912
913/**
914 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
915 *
916 * @returns IPRT status code.
917 * @param ppMem Where to store the memory object for the mapping.
918 * @param pMemToMap The memory object to map.
919 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
920 * @param uAlignment The alignment requirement for the mapping.
921 * @param fProt The desired page protection for the mapping.
922 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
923 * If not nil, it's the current process.
924 * @param offSub Offset into @a pMemToMap to start mapping.
925 * @param cbSub The number of bytes to map from @a pMapToMem. 0 if
926 * we're to map everything. Non-zero if @a offSub is
927 * non-zero.
928 * @param pszTag Allocation tag used for statistics and such.
929 */
930static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
931 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
932{
933 int rc = VERR_MAP_FAILED;
934
935 /*
936 * Check that the specified alignment is supported.
937 */
938 if (uAlignment > PAGE_SIZE)
939 return VERR_NOT_SUPPORTED;
940
941 /*
942 * There are two basic cases here, either we've got an MDL and can
943 * map it using MmMapLockedPages, or we've got a contiguous physical
944 * range (MMIO most likely) and can use MmMapIoSpace.
945 */
946 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
947 if (pMemNtToMap->cMdls)
948 {
949 /* don't attempt map locked regions with more than one mdl. */
950 if (pMemNtToMap->cMdls != 1)
951 return VERR_NOT_SUPPORTED;
952
953 /* Need g_pfnrtMmMapLockedPagesSpecifyCache to map to a specific address. */
954 if (pvFixed != (void *)-1 && g_pfnrtMmMapLockedPagesSpecifyCache == NULL)
955 return VERR_NOT_SUPPORTED;
956
957 /* we can't map anything to the first page, sorry. */
958 if (pvFixed == 0)
959 return VERR_NOT_SUPPORTED;
960
961 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
962 if ( pMemNtToMap->Core.uRel.Parent.cMappings
963 && R0Process == NIL_RTR0PROCESS)
964 {
965 if (pMemNtToMap->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC)
966 return VERR_NOT_SUPPORTED;
967 uint32_t iMapping = pMemNtToMap->Core.uRel.Parent.cMappings;
968 while (iMapping-- > 0)
969 {
970 PRTR0MEMOBJNT pMapping = (PRTR0MEMOBJNT)pMemNtToMap->Core.uRel.Parent.papMappings[iMapping];
971 if ( pMapping->Core.enmType != RTR0MEMOBJTYPE_MAPPING
972 || pMapping->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
973 return VERR_NOT_SUPPORTED;
974 }
975 }
976
977 /* Create a partial MDL if this is a sub-range request. */
978 PMDL pMdl;
979 if (!offSub && !cbSub)
980 pMdl = pMemNtToMap->apMdls[0];
981 else
982 {
983 pMdl = IoAllocateMdl(NULL, (ULONG)cbSub, FALSE, FALSE, NULL);
984 if (pMdl)
985 IoBuildPartialMdl(pMemNtToMap->apMdls[0], pMdl,
986 (uint8_t *)MmGetMdlVirtualAddress(pMemNtToMap->apMdls[0]) + offSub, (ULONG)cbSub);
987 else
988 {
989 IoFreeMdl(pMdl);
990 return VERR_NO_MEMORY;
991 }
992 }
993
994 __try
995 {
996 /** @todo uAlignment */
997 /** @todo How to set the protection on the pages? */
998 void *pv;
999 if (g_pfnrtMmMapLockedPagesSpecifyCache)
1000 pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl,
1001 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
1002 MmCached,
1003 pvFixed != (void *)-1 ? pvFixed : NULL,
1004 FALSE /* no bug check on failure */,
1005 NormalPagePriority);
1006 else
1007 pv = MmMapLockedPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
1008 if (pv)
1009 {
1010 NOREF(fProt);
1011
1012 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew( !offSub && !cbSub
1013 ? sizeof(*pMemNt) : RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[1]),
1014 RTR0MEMOBJTYPE_MAPPING, pv, pMemNtToMap->Core.cb, pszTag);
1015 if (pMemNt)
1016 {
1017 pMemNt->Core.u.Mapping.R0Process = R0Process;
1018 if (!offSub && !cbSub)
1019 pMemNt->fSubMapping = false;
1020 else
1021 {
1022 pMemNt->apMdls[0] = pMdl;
1023 pMemNt->cMdls = 1;
1024 pMemNt->fSubMapping = true;
1025 }
1026
1027 *ppMem = &pMemNt->Core;
1028 return VINF_SUCCESS;
1029 }
1030
1031 rc = VERR_NO_MEMORY;
1032 MmUnmapLockedPages(pv, pMdl);
1033 }
1034 }
1035 __except(EXCEPTION_EXECUTE_HANDLER)
1036 {
1037#ifdef LOG_ENABLED
1038 NTSTATUS rcNt = GetExceptionCode();
1039 Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
1040#endif
1041
1042 /* nothing */
1043 rc = VERR_MAP_FAILED;
1044 }
1045
1046 }
1047 else
1048 {
1049 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
1050 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
1051
1052 /* cannot map phys mem to user space (yet). */
1053 if (R0Process != NIL_RTR0PROCESS)
1054 return VERR_NOT_SUPPORTED;
1055
1056 /* Cannot sub-mak these (yet). */
1057 AssertMsgReturn(!offSub && !cbSub, ("%#zx %#zx\n", offSub, cbSub), VERR_NOT_SUPPORTED);
1058
1059
1060 /** @todo uAlignment */
1061 /** @todo How to set the protection on the pages? */
1062 PHYSICAL_ADDRESS Phys;
1063 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
1064 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb,
1065 pMemNtToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO ? MmNonCached : MmCached);
1066 if (pv)
1067 {
1068 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
1069 pMemNtToMap->Core.cb, pszTag);
1070 if (pMemNt)
1071 {
1072 pMemNt->Core.u.Mapping.R0Process = R0Process;
1073 *ppMem = &pMemNt->Core;
1074 return VINF_SUCCESS;
1075 }
1076
1077 rc = VERR_NO_MEMORY;
1078 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
1079 }
1080 }
1081
1082 NOREF(uAlignment); NOREF(fProt);
1083 return rc;
1084}
1085
1086
1087DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
1088 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
1089{
1090 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS, offSub, cbSub, pszTag);
1091}
1092
1093
1094DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
1095 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
1096{
1097 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
1098 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process, offSub, cbSub, pszTag);
1099}
1100
1101
1102DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1103{
1104#if 0
1105 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
1106#endif
1107
1108 /*
1109 * Seems there are some issues with this MmProtectMdlSystemAddress API, so
1110 * this code isn't currently enabled until we've tested it with the verifier.
1111 */
1112#if 0
1113 /*
1114 * The API we've got requires a kernel mapping.
1115 */
1116 if ( pMemNt->cMdls
1117 && g_pfnrtMmProtectMdlSystemAddress
1118 && (g_uRtNtMajorVer > 6 || (g_uRtNtMajorVer == 6 && g_uRtNtMinorVer >= 1)) /* Windows 7 and later. */
1119 && pMemNt->Core.pv != NULL
1120 && ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_PAGE
1121 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOW
1122 || pMemNt->Core.enmType == RTR0MEMOBJTYPE_CONT
1123 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_LOCK
1124 && pMemNt->Core.u.Lock.R0Process == NIL_RTPROCESS)
1125 || ( pMemNt->Core.enmType == RTR0MEMOBJTYPE_MAPPING
1126 && pMemNt->Core.u.Mapping.R0Process == NIL_RTPROCESS) ) )
1127 {
1128 /* Convert the protection. */
1129 LOCK_OPERATION enmLockOp;
1130 ULONG fAccess;
1131 switch (fProt)
1132 {
1133 case RTMEM_PROT_NONE:
1134 fAccess = PAGE_NOACCESS;
1135 enmLockOp = IoReadAccess;
1136 break;
1137 case RTMEM_PROT_READ:
1138 fAccess = PAGE_READONLY;
1139 enmLockOp = IoReadAccess;
1140 break;
1141 case RTMEM_PROT_WRITE:
1142 case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
1143 fAccess = PAGE_READWRITE;
1144 enmLockOp = IoModifyAccess;
1145 break;
1146 case RTMEM_PROT_EXEC:
1147 fAccess = PAGE_EXECUTE;
1148 enmLockOp = IoReadAccess;
1149 break;
1150 case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
1151 fAccess = PAGE_EXECUTE_READ;
1152 enmLockOp = IoReadAccess;
1153 break;
1154 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE:
1155 case RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ:
1156 fAccess = PAGE_EXECUTE_READWRITE;
1157 enmLockOp = IoModifyAccess;
1158 break;
1159 default:
1160 AssertFailedReturn(VERR_INVALID_FLAGS);
1161 }
1162
1163 NTSTATUS rcNt = STATUS_SUCCESS;
1164# if 0 /** @todo test this against the verifier. */
1165 if (offSub == 0 && pMemNt->Core.cb == cbSub)
1166 {
1167 uint32_t iMdl = pMemNt->cMdls;
1168 while (iMdl-- > 0)
1169 {
1170 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMemNt->apMdls[i], fAccess);
1171 if (!NT_SUCCESS(rcNt))
1172 break;
1173 }
1174 }
1175 else
1176# endif
1177 {
1178 /*
1179 * We ASSUME the following here:
1180 * - MmProtectMdlSystemAddress can deal with nonpaged pool memory
1181 * - MmProtectMdlSystemAddress doesn't actually store anything in the MDL we pass it.
1182 * - We are not required to call MmProtectMdlSystemAddress with PAGE_READWRITE for the
1183 * exact same ranges prior to freeing them.
1184 *
1185 * So, we lock the pages temporarily, call the API and unlock them.
1186 */
1187 uint8_t *pbCur = (uint8_t *)pMemNt->Core.pv + offSub;
1188 while (cbSub > 0 && NT_SUCCESS(rcNt))
1189 {
1190 size_t cbCur = cbSub;
1191 if (cbCur > MAX_LOCK_MEM_SIZE)
1192 cbCur = MAX_LOCK_MEM_SIZE;
1193 PMDL pMdl = IoAllocateMdl(pbCur, (ULONG)cbCur, FALSE, FALSE, NULL);
1194 if (pMdl)
1195 {
1196 __try
1197 {
1198 MmProbeAndLockPages(pMdl, KernelMode, enmLockOp);
1199 }
1200 __except(EXCEPTION_EXECUTE_HANDLER)
1201 {
1202 rcNt = GetExceptionCode();
1203 }
1204 if (NT_SUCCESS(rcNt))
1205 {
1206 rcNt = g_pfnrtMmProtectMdlSystemAddress(pMdl, fAccess);
1207 MmUnlockPages(pMdl);
1208 }
1209 IoFreeMdl(pMdl);
1210 }
1211 else
1212 rcNt = STATUS_NO_MEMORY;
1213 pbCur += cbCur;
1214 cbSub -= cbCur;
1215 }
1216 }
1217
1218 if (NT_SUCCESS(rcNt))
1219 return VINF_SUCCESS;
1220 return RTErrConvertFromNtStatus(rcNt);
1221 }
1222#else
1223 RT_NOREF4(pMem, offSub, cbSub, fProt);
1224#endif
1225
1226 return VERR_NOT_SUPPORTED;
1227}
1228
1229
1230DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1231{
1232 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
1233
1234 if (pMemNt->cMdls)
1235 {
1236 if (pMemNt->cMdls == 1)
1237 {
1238 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
1239 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
1240 }
1241
1242 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
1243 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
1244 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
1245 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
1246 }
1247
1248 switch (pMemNt->Core.enmType)
1249 {
1250 case RTR0MEMOBJTYPE_MAPPING:
1251 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
1252
1253 case RTR0MEMOBJTYPE_PHYS:
1254 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
1255
1256 case RTR0MEMOBJTYPE_PAGE:
1257 case RTR0MEMOBJTYPE_PHYS_NC:
1258 case RTR0MEMOBJTYPE_LOW:
1259 case RTR0MEMOBJTYPE_CONT:
1260 case RTR0MEMOBJTYPE_LOCK:
1261 default:
1262 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
1263 case RTR0MEMOBJTYPE_RES_VIRT:
1264 return NIL_RTHCPHYS;
1265 }
1266}
1267
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette