VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 5999

Last change on this file since 5999 was 5999, checked in by vboxsync, 17 years ago

The Giant CDDL Dual-License Header Change.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 26.0 KB
Line 
1/* $Id: memobj-r0drv-nt.cpp 5999 2007-12-07 15:05:06Z vboxsync $ */
2/** @file
3 * innotek Portable Runtime - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/memobj.h>
34#include <iprt/alloc.h>
35#include <iprt/assert.h>
36#include <iprt/log.h>
37#include <iprt/param.h>
38#include <iprt/string.h>
39#include <iprt/process.h>
40#include "internal/memobj.h"
41
42
43/*******************************************************************************
44* Defined Constants And Macros *
45*******************************************************************************/
46/** Maximum number of bytes we try to lock down in one go.
47 * This is supposed to have a limit right below 256MB, but this appears
48 * to actually be much lower. The values here have been determined experimentally.
49 */
50#ifdef RT_ARCH_X86
51# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
52#endif
53#ifdef RT_ARCH_AMD64
54# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
55#endif
56
57
58/*******************************************************************************
59* Structures and Typedefs *
60*******************************************************************************/
61/**
62 * The NT version of the memory object structure.
63 */
64typedef struct RTR0MEMOBJNT
65{
66 /** The core structure. */
67 RTR0MEMOBJINTERNAL Core;
68#ifndef IPRT_TARGET_NT4
69 /** Used MmAllocatePagesForMdl(). */
70 bool fAllocatedPagesForMdl;
71#endif
72 /** Pointer returned by MmSecureVirtualMemory */
73 PVOID pvSecureMem;
74 /** The number of PMDLs (memory descriptor lists) in the array. */
75 uint32_t cMdls;
76 /** Array of MDL pointers. (variable size) */
77 PMDL apMdls[1];
78} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
79
80
81int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
82{
83 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
84
85 /*
86 * Deal with it on a per type basis (just as a variation).
87 */
88 switch (pMemNt->Core.enmType)
89 {
90 case RTR0MEMOBJTYPE_LOW:
91#ifndef IPRT_TARGET_NT4
92 if (pMemNt->fAllocatedPagesForMdl)
93 {
94 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
95 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
96 pMemNt->Core.pv = NULL;
97 if (pMemNt->pvSecureMem)
98 {
99 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
100 pMemNt->pvSecureMem = NULL;
101 }
102
103 MmFreePagesFromMdl(pMemNt->apMdls[0]);
104 ExFreePool(pMemNt->apMdls[0]);
105 pMemNt->apMdls[0] = NULL;
106 pMemNt->cMdls = 0;
107 break;
108 }
109#endif
110 AssertFailed();
111 break;
112
113 case RTR0MEMOBJTYPE_PAGE:
114 Assert(pMemNt->Core.pv);
115 ExFreePool(pMemNt->Core.pv);
116 pMemNt->Core.pv = NULL;
117
118 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
119 IoFreeMdl(pMemNt->apMdls[0]);
120 pMemNt->apMdls[0] = NULL;
121 pMemNt->cMdls = 0;
122 break;
123
124 case RTR0MEMOBJTYPE_CONT:
125 Assert(pMemNt->Core.pv);
126 MmFreeContiguousMemory(pMemNt->Core.pv);
127 pMemNt->Core.pv = NULL;
128
129 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
130 IoFreeMdl(pMemNt->apMdls[0]);
131 pMemNt->apMdls[0] = NULL;
132 pMemNt->cMdls = 0;
133 break;
134
135 case RTR0MEMOBJTYPE_PHYS:
136 case RTR0MEMOBJTYPE_PHYS_NC:
137#ifndef IPRT_TARGET_NT4
138 if (pMemNt->fAllocatedPagesForMdl)
139 {
140 MmFreePagesFromMdl(pMemNt->apMdls[0]);
141 ExFreePool(pMemNt->apMdls[0]);
142 pMemNt->apMdls[0] = NULL;
143 pMemNt->cMdls = 0;
144 break;
145 }
146#endif
147 AssertFailed();
148 break;
149
150 case RTR0MEMOBJTYPE_LOCK:
151 if (pMemNt->pvSecureMem)
152 {
153 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
154 pMemNt->pvSecureMem = NULL;
155 }
156 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
157 {
158 MmUnlockPages(pMemNt->apMdls[i]);
159 IoFreeMdl(pMemNt->apMdls[i]);
160 pMemNt->apMdls[i] = NULL;
161 }
162 break;
163
164 case RTR0MEMOBJTYPE_RES_VIRT:
165/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
166 {
167 }
168 else
169 {
170 }*/
171 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
172 return VERR_INTERNAL_ERROR;
173 break;
174
175 case RTR0MEMOBJTYPE_MAPPING:
176 {
177 Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
178 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
179 Assert(pMemNtParent);
180 if (pMemNtParent->cMdls)
181 {
182 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
183 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
184 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
185 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
186 }
187 else
188 {
189 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
190 && !pMemNtParent->Core.u.Phys.fAllocated);
191 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
192 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
193 }
194 pMemNt->Core.pv = NULL;
195 break;
196 }
197
198 default:
199 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
200 return VERR_INTERNAL_ERROR;
201 }
202
203 return VINF_SUCCESS;
204}
205
206
207int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
208{
209 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
210
211 /*
212 * Try allocate the memory and create an MDL for them so
213 * we can query the physical addresses and do mappings later
214 * without running into out-of-memory conditions and similar problems.
215 */
216 int rc = VERR_NO_PAGE_MEMORY;
217 void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
218 if (pv)
219 {
220 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
221 if (pMdl)
222 {
223 MmBuildMdlForNonPagedPool(pMdl);
224#ifdef RT_ARCH_AMD64
225 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
226#endif
227
228 /*
229 * Create the IPRT memory object.
230 */
231 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
232 if (pMemNt)
233 {
234 pMemNt->cMdls = 1;
235 pMemNt->apMdls[0] = pMdl;
236 *ppMem = &pMemNt->Core;
237 return VINF_SUCCESS;
238 }
239
240 rc = VERR_NO_MEMORY;
241 IoFreeMdl(pMdl);
242 }
243 ExFreePool(pv);
244 }
245 return rc;
246}
247
248
249int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
250{
251 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
252
253 /*
254 * Try see if we get lucky first...
255 * (We could probably just assume we're lucky on NT4.)
256 */
257 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
258 if (RT_SUCCESS(rc))
259 {
260 size_t iPage = cb >> PAGE_SHIFT;
261 while (iPage-- > 0)
262 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
263 {
264 rc = VERR_NO_MEMORY;
265 break;
266 }
267 if (RT_SUCCESS(rc))
268 return rc;
269
270 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
271 RTR0MemObjFree(*ppMem, false);
272 *ppMem = NULL;
273 }
274
275#ifndef IPRT_TARGET_NT4
276 /*
277 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
278 */
279 PHYSICAL_ADDRESS Zero;
280 Zero.QuadPart = 0;
281 PHYSICAL_ADDRESS HighAddr;
282 HighAddr.QuadPart = _4G - 1;
283 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
284 if (pMdl)
285 {
286 if (MmGetMdlByteCount(pMdl) >= cb)
287 {
288 __try
289 {
290 void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
291 FALSE /* no bug check on failure */, NormalPagePriority);
292 if (pv)
293 {
294 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
295 if (pMemNt)
296 {
297 pMemNt->fAllocatedPagesForMdl = true;
298 pMemNt->cMdls = 1;
299 pMemNt->apMdls[0] = pMdl;
300 *ppMem = &pMemNt->Core;
301 return VINF_SUCCESS;
302 }
303 MmUnmapLockedPages(pv, pMdl);
304 }
305 }
306 __except(EXCEPTION_EXECUTE_HANDLER)
307 {
308 NTSTATUS rcNt = GetExceptionCode();
309 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
310 /* nothing */
311 }
312 }
313 MmFreePagesFromMdl(pMdl);
314 ExFreePool(pMdl);
315 }
316#endif /* !IPRT_TARGET_NT4 */
317
318 /*
319 * Fall back on contiguous memory...
320 */
321 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
322}
323
324
325/**
326 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
327 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
328 * to what rtR0MemObjNativeAllocCont() does.
329 *
330 * @returns IPRT status code.
331 * @param ppMem Where to store the pointer to the ring-0 memory object.
332 * @param cb The size.
333 * @param fExecutable Whether the mapping should be executable or not.
334 * @param PhysHighest The highest physical address for the pages in allocation.
335 */
336static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest)
337{
338 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
339
340 /*
341 * Allocate the memory and create an MDL for it.
342 */
343 PHYSICAL_ADDRESS PhysAddrHighest;
344 PhysAddrHighest.QuadPart = PhysHighest;
345 void *pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
346 if (!pv)
347 return VERR_NO_MEMORY;
348
349 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
350 if (pMdl)
351 {
352 MmBuildMdlForNonPagedPool(pMdl);
353#ifdef RT_ARCH_AMD64
354 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
355#endif
356
357 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
358 if (pMemNt)
359 {
360 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
361 pMemNt->cMdls = 1;
362 pMemNt->apMdls[0] = pMdl;
363 *ppMem = &pMemNt->Core;
364 return VINF_SUCCESS;
365 }
366
367 IoFreeMdl(pMdl);
368 }
369 MmFreeContiguousMemory(pv);
370 return VERR_NO_MEMORY;
371}
372
373
374int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
375{
376 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1);
377}
378
379
380int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
381{
382#ifndef IPRT_TARGET_NT4
383 /*
384 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
385 *
386 * This is preferable to using MmAllocateContiguousMemory because there are
387 * a few situations where the memory shouldn't be mapped, like for instance
388 * VT-x control memory. Since these are rather small allocations (one or
389 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
390 * request.
391 *
392 * If the allocation is big, the chances are *probably* not very good. The
393 * current limit is kind of random...
394 */
395 if (cb < _128K)
396 {
397 PHYSICAL_ADDRESS Zero;
398 Zero.QuadPart = 0;
399 PHYSICAL_ADDRESS HighAddr;
400 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
401 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
402 if (pMdl)
403 {
404 if (MmGetMdlByteCount(pMdl) >= cb)
405 {
406 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
407 PFN_NUMBER Pfn = paPfns[0] + 1;
408 const size_t cPages = cb >> PAGE_SHIFT;
409 size_t iPage;
410 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
411 if (paPfns[iPage] != Pfn)
412 break;
413 if (iPage >= cPages)
414 {
415 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
416 if (pMemNt)
417 {
418 pMemNt->Core.u.Phys.fAllocated = true;
419 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
420 pMemNt->fAllocatedPagesForMdl = true;
421 pMemNt->cMdls = 1;
422 pMemNt->apMdls[0] = pMdl;
423 *ppMem = &pMemNt->Core;
424 return VINF_SUCCESS;
425 }
426 }
427 }
428 MmFreePagesFromMdl(pMdl);
429 ExFreePool(pMdl);
430 }
431 }
432#endif /* !IPRT_TARGET_NT4 */
433
434 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest);
435}
436
437
438int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
439{
440#ifndef IPRT_TARGET_NT4
441 PHYSICAL_ADDRESS Zero;
442 Zero.QuadPart = 0;
443 PHYSICAL_ADDRESS HighAddr;
444 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
445 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
446 if (pMdl)
447 {
448 if (MmGetMdlByteCount(pMdl) >= cb)
449 {
450 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
451 if (pMemNt)
452 {
453 pMemNt->fAllocatedPagesForMdl = true;
454 pMemNt->cMdls = 1;
455 pMemNt->apMdls[0] = pMdl;
456 *ppMem = &pMemNt->Core;
457 return VINF_SUCCESS;
458 }
459 }
460 MmFreePagesFromMdl(pMdl);
461 ExFreePool(pMdl);
462 }
463 return VERR_NO_MEMORY;
464#else /* IPRT_TARGET_NT4 */
465 return VERR_NOT_SUPPORTED;
466#endif /* IPRT_TARGET_NT4 */
467}
468
469
470int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
471{
472 /*
473 * Validate the address range and create a descriptor for it.
474 */
475 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
476 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
477 return VERR_ADDRESS_TOO_BIG;
478
479 /*
480 * Create the IPRT memory object.
481 */
482 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
483 if (pMemNt)
484 {
485 pMemNt->Core.u.Phys.PhysBase = Phys;
486 pMemNt->Core.u.Phys.fAllocated = false;
487 *ppMem = &pMemNt->Core;
488 return VINF_SUCCESS;
489 }
490 return VERR_NO_MEMORY;
491}
492
493
494/**
495 * Internal worker for locking down pages.
496 *
497 * @return IPRT status code.
498 *
499 * @param ppMem Where to store the memory object pointer.
500 * @param pv First page.
501 * @param cb Number of bytes.
502 * @param Task The task \a pv and \a cb refers to.
503 */
504static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR0PROCESS R0Process)
505{
506 /*
507 * Calc the number of MDLs we need and allocate the memory object structure.
508 */
509 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
510 if (cb % MAX_LOCK_MEM_SIZE)
511 cMdls++;
512 if (cMdls >= UINT32_MAX)
513 return VERR_OUT_OF_RANGE;
514 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
515 RTR0MEMOBJTYPE_LOCK, pv, cb);
516 if (!pMemNt)
517 return VERR_NO_MEMORY;
518
519 /*
520 * Loop locking down the sub parts of the memory.
521 */
522 int rc = VINF_SUCCESS;
523 size_t cbTotal = 0;
524 uint8_t *pb = (uint8_t *)pv;
525 uint32_t iMdl;
526 for (iMdl = 0; iMdl < cMdls; iMdl++)
527 {
528 /*
529 * Calc the Mdl size and allocate it.
530 */
531 size_t cbCur = cb - cbTotal;
532 if (cbCur > MAX_LOCK_MEM_SIZE)
533 cbCur = MAX_LOCK_MEM_SIZE;
534 AssertMsg(cbCur, ("cbCur: 0!\n"));
535 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
536 if (!pMdl)
537 {
538 rc = VERR_NO_MEMORY;
539 break;
540 }
541
542 /*
543 * Lock the pages.
544 */
545 __try
546 {
547 MmProbeAndLockPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode, IoModifyAccess);
548
549 pMemNt->apMdls[iMdl] = pMdl;
550 pMemNt->cMdls++;
551 }
552 __except(EXCEPTION_EXECUTE_HANDLER)
553 {
554 IoFreeMdl(pMdl);
555 rc = VERR_LOCK_FAILED;
556 break;
557 }
558
559 if (R0Process != NIL_RTR0PROCESS )
560 {
561 /* Make sure the user process can't change the allocation. */
562 pMemNt->pvSecureMem = MmSecureVirtualMemory(pv, cb, PAGE_READWRITE);
563 if (!pMemNt->pvSecureMem)
564 {
565 rc = VERR_NO_MEMORY;
566 break;
567 }
568 }
569
570 /* next */
571 cbTotal += cbCur;
572 pb += cbCur;
573 }
574 if (RT_SUCCESS(rc))
575 {
576 Assert(pMemNt->cMdls == cMdls);
577 pMemNt->Core.u.Lock.R0Process = R0Process;
578 *ppMem = &pMemNt->Core;
579 return rc;
580 }
581
582 /*
583 * We failed, perform cleanups.
584 */
585 while (iMdl-- > 0)
586 {
587 MmUnlockPages(pMemNt->apMdls[iMdl]);
588 IoFreeMdl(pMemNt->apMdls[iMdl]);
589 pMemNt->apMdls[iMdl] = NULL;
590 }
591 if (pMemNt->pvSecureMem)
592 {
593 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
594 pMemNt->pvSecureMem = NULL;
595 }
596
597 rtR0MemObjDelete(&pMemNt->Core);
598 return rc;
599}
600
601
602int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
603{
604 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
605 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
606 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, R0Process);
607}
608
609
610int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
611{
612 return rtR0MemObjNtLock(ppMem, pv, cb, NIL_RTR0PROCESS);
613}
614
615
616int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
617{
618 /*
619 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
620 */
621 return VERR_NOT_IMPLEMENTED;
622}
623
624
625int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
626{
627 /*
628 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
629 */
630 return VERR_NOT_IMPLEMENTED;
631}
632
633
634/**
635 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
636 *
637 * @returns IPRT status code.
638 * @param ppMem Where to store the memory object for the mapping.
639 * @param pMemToMap The memory object to map.
640 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
641 * @param uAlignment The alignment requirement for the mapping.
642 * @param fProt The desired page protection for the mapping.
643 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
644 * If not nil, it's the current process.
645 */
646static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
647 unsigned fProt, RTR0PROCESS R0Process)
648{
649 int rc = VERR_MAP_FAILED;
650
651 /*
652 * There are two basic cases here, either we've got an MDL and can
653 * map it using MmMapLockedPages, or we've got a contiguous physical
654 * range (MMIO most likely) and can use MmMapIoSpace.
655 */
656 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
657 if (pMemNtToMap->cMdls)
658 {
659 /* don't attempt map locked regions with more than one mdl. */
660 if (pMemNtToMap->cMdls != 1)
661 return VERR_NOT_SUPPORTED;
662
663 /* we can't map anything to the first page, sorry. */
664 if (pvFixed == 0)
665 return VERR_NOT_SUPPORTED;
666
667 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
668 if ( pMemNtToMap->Core.uRel.Parent.cMappings
669 && R0Process == NIL_RTR0PROCESS)
670 return VERR_NOT_SUPPORTED;
671
672 __try
673 {
674 /** @todo uAlignment */
675 /** @todo How to set the protection on the pages? */
676 void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
677 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
678 MmCached,
679 pvFixed != (void *)-1 ? pvFixed : NULL,
680 FALSE /* no bug check on failure */,
681 NormalPagePriority);
682 if (pv)
683 {
684 NOREF(fProt);
685
686 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
687 pMemNtToMap->Core.cb);
688 if (pMemNt)
689 {
690 pMemNt->Core.u.Mapping.R0Process = R0Process;
691 *ppMem = &pMemNt->Core;
692 return VINF_SUCCESS;
693 }
694
695 rc = VERR_NO_MEMORY;
696 MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
697 }
698 }
699 __except(EXCEPTION_EXECUTE_HANDLER)
700 {
701 NTSTATUS rcNt = GetExceptionCode();
702 Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
703
704 /* nothing */
705 rc = VERR_MAP_FAILED;
706 }
707
708 }
709 else
710 {
711 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
712 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
713
714 /* cannot map phys mem to user space (yet). */
715 if (R0Process != NIL_RTR0PROCESS)
716 return VERR_NOT_SUPPORTED;
717
718 /** @todo uAlignment */
719 /** @todo How to set the protection on the pages? */
720 PHYSICAL_ADDRESS Phys;
721 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
722 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb, MmCached); /** @todo add cache type to fProt. */
723 if (pv)
724 {
725 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
726 pMemNtToMap->Core.cb);
727 if (pMemNt)
728 {
729 pMemNt->Core.u.Mapping.R0Process = R0Process;
730 *ppMem = &pMemNt->Core;
731 return VINF_SUCCESS;
732 }
733
734 rc = VERR_NO_MEMORY;
735 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
736 }
737 }
738
739 NOREF(uAlignment); NOREF(fProt);
740 return rc;
741}
742
743
744int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
745{
746 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
747}
748
749
750int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
751{
752 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
753 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
754}
755
756
757RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
758{
759 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
760
761 if (pMemNt->cMdls)
762 {
763 if (pMemNt->cMdls == 1)
764 {
765 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
766 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
767 }
768
769 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
770 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
771 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
772 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
773 }
774
775 switch (pMemNt->Core.enmType)
776 {
777 case RTR0MEMOBJTYPE_MAPPING:
778 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
779
780 case RTR0MEMOBJTYPE_PHYS:
781 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
782
783 case RTR0MEMOBJTYPE_PAGE:
784 case RTR0MEMOBJTYPE_PHYS_NC:
785 case RTR0MEMOBJTYPE_LOW:
786 case RTR0MEMOBJTYPE_CONT:
787 case RTR0MEMOBJTYPE_LOCK:
788 default:
789 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
790 case RTR0MEMOBJTYPE_RES_VIRT:
791 return NIL_RTHCPHYS;
792 }
793}
794
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette