VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 5528

Last change on this file since 5528 was 5005, checked in by vboxsync, 17 years ago

Failure logging

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 25.5 KB
Line 
1/* $Id: memobj-r0drv-nt.cpp 5005 2007-09-24 14:20:57Z vboxsync $ */
2/** @file
3 * innotek Portable Runtime - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#include "the-nt-kernel.h"
23
24#include <iprt/memobj.h>
25#include <iprt/alloc.h>
26#include <iprt/assert.h>
27#include <iprt/log.h>
28#include <iprt/param.h>
29#include <iprt/string.h>
30#include <iprt/process.h>
31#include "internal/memobj.h"
32
33
34/*******************************************************************************
35* Defined Constants And Macros *
36*******************************************************************************/
37/** Maximum number of bytes we try to lock down in one go.
38 * This is supposed to have a limit right below 256MB, but this appears
39 * to actually be much lower. The values here have been determined experimentally.
40 */
41#ifdef RT_ARCH_X86
42# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
43#endif
44#ifdef RT_ARCH_AMD64
45# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
46#endif
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/**
53 * The NT version of the memory object structure.
54 */
55typedef struct RTR0MEMOBJNT
56{
57 /** The core structure. */
58 RTR0MEMOBJINTERNAL Core;
59#ifndef IPRT_TARGET_NT4
60 /** Used MmAllocatePagesForMdl(). */
61 bool fAllocatedPagesForMdl;
62#endif
63 /** Pointer returned by MmSecureVirtualMemory */
64 PVOID pvSecureMem;
65 /** The number of PMDLs (memory descriptor lists) in the array. */
66 uint32_t cMdls;
67 /** Array of MDL pointers. (variable size) */
68 PMDL apMdls[1];
69} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
70
71
72int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
73{
74 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
75
76 /*
77 * Deal with it on a per type basis (just as a variation).
78 */
79 switch (pMemNt->Core.enmType)
80 {
81 case RTR0MEMOBJTYPE_LOW:
82#ifndef IPRT_TARGET_NT4
83 if (pMemNt->fAllocatedPagesForMdl)
84 {
85 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
86 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
87 pMemNt->Core.pv = NULL;
88 if (pMemNt->pvSecureMem)
89 {
90 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
91 pMemNt->pvSecureMem = NULL;
92 }
93
94 MmFreePagesFromMdl(pMemNt->apMdls[0]);
95 ExFreePool(pMemNt->apMdls[0]);
96 pMemNt->apMdls[0] = NULL;
97 pMemNt->cMdls = 0;
98 break;
99 }
100#endif
101 AssertFailed();
102 break;
103
104 case RTR0MEMOBJTYPE_PAGE:
105 Assert(pMemNt->Core.pv);
106 ExFreePool(pMemNt->Core.pv);
107 pMemNt->Core.pv = NULL;
108
109 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
110 IoFreeMdl(pMemNt->apMdls[0]);
111 pMemNt->apMdls[0] = NULL;
112 pMemNt->cMdls = 0;
113 break;
114
115 case RTR0MEMOBJTYPE_CONT:
116 Assert(pMemNt->Core.pv);
117 MmFreeContiguousMemory(pMemNt->Core.pv);
118 pMemNt->Core.pv = NULL;
119
120 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
121 IoFreeMdl(pMemNt->apMdls[0]);
122 pMemNt->apMdls[0] = NULL;
123 pMemNt->cMdls = 0;
124 break;
125
126 case RTR0MEMOBJTYPE_PHYS:
127 case RTR0MEMOBJTYPE_PHYS_NC:
128#ifndef IPRT_TARGET_NT4
129 if (pMemNt->fAllocatedPagesForMdl)
130 {
131 MmFreePagesFromMdl(pMemNt->apMdls[0]);
132 ExFreePool(pMemNt->apMdls[0]);
133 pMemNt->apMdls[0] = NULL;
134 pMemNt->cMdls = 0;
135 break;
136 }
137#endif
138 AssertFailed();
139 break;
140
141 case RTR0MEMOBJTYPE_LOCK:
142 if (pMemNt->pvSecureMem)
143 {
144 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
145 pMemNt->pvSecureMem = NULL;
146 }
147 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
148 {
149 MmUnlockPages(pMemNt->apMdls[i]);
150 IoFreeMdl(pMemNt->apMdls[i]);
151 pMemNt->apMdls[i] = NULL;
152 }
153 break;
154
155 case RTR0MEMOBJTYPE_RES_VIRT:
156/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
157 {
158 }
159 else
160 {
161 }*/
162 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
163 return VERR_INTERNAL_ERROR;
164 break;
165
166 case RTR0MEMOBJTYPE_MAPPING:
167 {
168 Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
169 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
170 Assert(pMemNtParent);
171 if (pMemNtParent->cMdls)
172 {
173 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
174 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
175 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
176 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
177 }
178 else
179 {
180 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
181 && !pMemNtParent->Core.u.Phys.fAllocated);
182 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
183 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
184 }
185 pMemNt->Core.pv = NULL;
186 break;
187 }
188
189 default:
190 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
191 return VERR_INTERNAL_ERROR;
192 }
193
194 return VINF_SUCCESS;
195}
196
197
198int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
199{
200 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
201
202 /*
203 * Try allocate the memory and create an MDL for them so
204 * we can query the physical addresses and do mappings later
205 * without running into out-of-memory conditions and similar problems.
206 */
207 int rc = VERR_NO_PAGE_MEMORY;
208 void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
209 if (pv)
210 {
211 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
212 if (pMdl)
213 {
214 MmBuildMdlForNonPagedPool(pMdl);
215#ifdef RT_ARCH_AMD64
216 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
217#endif
218
219 /*
220 * Create the IPRT memory object.
221 */
222 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
223 if (pMemNt)
224 {
225 pMemNt->cMdls = 1;
226 pMemNt->apMdls[0] = pMdl;
227 *ppMem = &pMemNt->Core;
228 return VINF_SUCCESS;
229 }
230
231 rc = VERR_NO_MEMORY;
232 IoFreeMdl(pMdl);
233 }
234 ExFreePool(pv);
235 }
236 return rc;
237}
238
239
240int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
241{
242 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
243
244 /*
245 * Try see if we get lucky first...
246 * (We could probably just assume we're lucky on NT4.)
247 */
248 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
249 if (RT_SUCCESS(rc))
250 {
251 size_t iPage = cb >> PAGE_SHIFT;
252 while (iPage-- > 0)
253 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
254 {
255 rc = VERR_NO_MEMORY;
256 break;
257 }
258 if (RT_SUCCESS(rc))
259 return rc;
260
261 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
262 RTR0MemObjFree(*ppMem, false);
263 *ppMem = NULL;
264 }
265
266#ifndef IPRT_TARGET_NT4
267 /*
268 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
269 */
270 PHYSICAL_ADDRESS Zero;
271 Zero.QuadPart = 0;
272 PHYSICAL_ADDRESS HighAddr;
273 HighAddr.QuadPart = _4G - 1;
274 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
275 if (pMdl)
276 {
277 if (MmGetMdlByteCount(pMdl) >= cb)
278 {
279 __try
280 {
281 void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
282 FALSE /* no bug check on failure */, NormalPagePriority);
283 if (pv)
284 {
285 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
286 if (pMemNt)
287 {
288 pMemNt->fAllocatedPagesForMdl = true;
289 pMemNt->cMdls = 1;
290 pMemNt->apMdls[0] = pMdl;
291 *ppMem = &pMemNt->Core;
292 return VINF_SUCCESS;
293 }
294 MmUnmapLockedPages(pv, pMdl);
295 }
296 }
297 __except(EXCEPTION_EXECUTE_HANDLER)
298 {
299 NTSTATUS rcNt = GetExceptionCode();
300 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
301 /* nothing */
302 }
303 }
304 MmFreePagesFromMdl(pMdl);
305 ExFreePool(pMdl);
306 }
307#endif /* !IPRT_TARGET_NT4 */
308
309 /*
310 * Fall back on contiguous memory...
311 */
312 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
313}
314
315
316/**
317 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
318 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
319 * to what rtR0MemObjNativeAllocCont() does.
320 *
321 * @returns IPRT status code.
322 * @param ppMem Where to store the pointer to the ring-0 memory object.
323 * @param cb The size.
324 * @param fExecutable Whether the mapping should be executable or not.
325 * @param PhysHighest The highest physical address for the pages in allocation.
326 */
327static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest)
328{
329 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
330
331 /*
332 * Allocate the memory and create an MDL for it.
333 */
334 PHYSICAL_ADDRESS PhysAddrHighest;
335 PhysAddrHighest.QuadPart = PhysHighest;
336 void *pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
337 if (!pv)
338 return VERR_NO_MEMORY;
339
340 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
341 if (pMdl)
342 {
343 MmBuildMdlForNonPagedPool(pMdl);
344#ifdef RT_ARCH_AMD64
345 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
346#endif
347
348 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
349 if (pMemNt)
350 {
351 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
352 pMemNt->cMdls = 1;
353 pMemNt->apMdls[0] = pMdl;
354 *ppMem = &pMemNt->Core;
355 return VINF_SUCCESS;
356 }
357
358 IoFreeMdl(pMdl);
359 }
360 MmFreeContiguousMemory(pv);
361 return VERR_NO_MEMORY;
362}
363
364
365int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
366{
367 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1);
368}
369
370
371int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
372{
373#ifndef IPRT_TARGET_NT4
374 /*
375 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
376 *
377 * This is preferable to using MmAllocateContiguousMemory because there are
378 * a few situations where the memory shouldn't be mapped, like for instance
379 * VT-x control memory. Since these are rather small allocations (one or
380 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
381 * request.
382 *
383 * If the allocation is big, the chances are *probably* not very good. The
384 * current limit is kind of random...
385 */
386 if (cb < _128K)
387 {
388 PHYSICAL_ADDRESS Zero;
389 Zero.QuadPart = 0;
390 PHYSICAL_ADDRESS HighAddr;
391 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
392 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
393 if (pMdl)
394 {
395 if (MmGetMdlByteCount(pMdl) >= cb)
396 {
397 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
398 PFN_NUMBER Pfn = paPfns[0] + 1;
399 const size_t cPages = cb >> PAGE_SHIFT;
400 size_t iPage;
401 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
402 if (paPfns[iPage] != Pfn)
403 break;
404 if (iPage >= cPages)
405 {
406 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
407 if (pMemNt)
408 {
409 pMemNt->Core.u.Phys.fAllocated = true;
410 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
411 pMemNt->fAllocatedPagesForMdl = true;
412 pMemNt->cMdls = 1;
413 pMemNt->apMdls[0] = pMdl;
414 *ppMem = &pMemNt->Core;
415 return VINF_SUCCESS;
416 }
417 }
418 }
419 MmFreePagesFromMdl(pMdl);
420 ExFreePool(pMdl);
421 }
422 }
423#endif /* !IPRT_TARGET_NT4 */
424
425 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest);
426}
427
428
429int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
430{
431#ifndef IPRT_TARGET_NT4
432 PHYSICAL_ADDRESS Zero;
433 Zero.QuadPart = 0;
434 PHYSICAL_ADDRESS HighAddr;
435 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
436 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
437 if (pMdl)
438 {
439 if (MmGetMdlByteCount(pMdl) >= cb)
440 {
441 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
442 if (pMemNt)
443 {
444 pMemNt->fAllocatedPagesForMdl = true;
445 pMemNt->cMdls = 1;
446 pMemNt->apMdls[0] = pMdl;
447 *ppMem = &pMemNt->Core;
448 return VINF_SUCCESS;
449 }
450 }
451 MmFreePagesFromMdl(pMdl);
452 ExFreePool(pMdl);
453 }
454 return VERR_NO_MEMORY;
455#else /* IPRT_TARGET_NT4 */
456 return VERR_NOT_SUPPORTED;
457#endif /* IPRT_TARGET_NT4 */
458}
459
460
461int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
462{
463 /*
464 * Validate the address range and create a descriptor for it.
465 */
466 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
467 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
468 return VERR_ADDRESS_TOO_BIG;
469
470 /*
471 * Create the IPRT memory object.
472 */
473 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
474 if (pMemNt)
475 {
476 pMemNt->Core.u.Phys.PhysBase = Phys;
477 pMemNt->Core.u.Phys.fAllocated = false;
478 *ppMem = &pMemNt->Core;
479 return VINF_SUCCESS;
480 }
481 return VERR_NO_MEMORY;
482}
483
484
485/**
486 * Internal worker for locking down pages.
487 *
488 * @return IPRT status code.
489 *
490 * @param ppMem Where to store the memory object pointer.
491 * @param pv First page.
492 * @param cb Number of bytes.
493 * @param Task The task \a pv and \a cb refers to.
494 */
495static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR0PROCESS R0Process)
496{
497 /*
498 * Calc the number of MDLs we need and allocate the memory object structure.
499 */
500 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
501 if (cb % MAX_LOCK_MEM_SIZE)
502 cMdls++;
503 if (cMdls >= UINT32_MAX)
504 return VERR_OUT_OF_RANGE;
505 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
506 RTR0MEMOBJTYPE_LOCK, pv, cb);
507 if (!pMemNt)
508 return VERR_NO_MEMORY;
509
510 /*
511 * Loop locking down the sub parts of the memory.
512 */
513 int rc = VINF_SUCCESS;
514 size_t cbTotal = 0;
515 uint8_t *pb = (uint8_t *)pv;
516 uint32_t iMdl;
517 for (iMdl = 0; iMdl < cMdls; iMdl++)
518 {
519 /*
520 * Calc the Mdl size and allocate it.
521 */
522 size_t cbCur = cb - cbTotal;
523 if (cbCur > MAX_LOCK_MEM_SIZE)
524 cbCur = MAX_LOCK_MEM_SIZE;
525 AssertMsg(cbCur, ("cbCur: 0!\n"));
526 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
527 if (!pMdl)
528 {
529 rc = VERR_NO_MEMORY;
530 break;
531 }
532
533 /*
534 * Lock the pages.
535 */
536 __try
537 {
538 MmProbeAndLockPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode, IoModifyAccess);
539
540 pMemNt->apMdls[iMdl] = pMdl;
541 pMemNt->cMdls++;
542 }
543 __except(EXCEPTION_EXECUTE_HANDLER)
544 {
545 IoFreeMdl(pMdl);
546 rc = VERR_LOCK_FAILED;
547 break;
548 }
549
550 if (R0Process != NIL_RTR0PROCESS )
551 {
552 /* Make sure the user process can't change the allocation. */
553 pMemNt->pvSecureMem = MmSecureVirtualMemory(pv, cb, PAGE_READWRITE);
554 if (!pMemNt->pvSecureMem)
555 {
556 rc = VERR_NO_MEMORY;
557 break;
558 }
559 }
560
561 /* next */
562 cbTotal += cbCur;
563 pb += cbCur;
564 }
565 if (RT_SUCCESS(rc))
566 {
567 Assert(pMemNt->cMdls == cMdls);
568 pMemNt->Core.u.Lock.R0Process = R0Process;
569 *ppMem = &pMemNt->Core;
570 return rc;
571 }
572
573 /*
574 * We failed, perform cleanups.
575 */
576 while (iMdl-- > 0)
577 {
578 MmUnlockPages(pMemNt->apMdls[iMdl]);
579 IoFreeMdl(pMemNt->apMdls[iMdl]);
580 pMemNt->apMdls[iMdl] = NULL;
581 }
582 if (pMemNt->pvSecureMem)
583 {
584 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
585 pMemNt->pvSecureMem = NULL;
586 }
587
588 rtR0MemObjDelete(&pMemNt->Core);
589 return rc;
590}
591
592
593int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
594{
595 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
596 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
597 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, R0Process);
598}
599
600
601int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
602{
603 return rtR0MemObjNtLock(ppMem, pv, cb, NIL_RTR0PROCESS);
604}
605
606
607int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
608{
609 /*
610 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
611 */
612 return VERR_NOT_IMPLEMENTED;
613}
614
615
616int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
617{
618 /*
619 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
620 */
621 return VERR_NOT_IMPLEMENTED;
622}
623
624
625/**
626 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
627 *
628 * @returns IPRT status code.
629 * @param ppMem Where to store the memory object for the mapping.
630 * @param pMemToMap The memory object to map.
631 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
632 * @param uAlignment The alignment requirement for the mapping.
633 * @param fProt The desired page protection for the mapping.
634 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
635 * If not nil, it's the current process.
636 */
637static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
638 unsigned fProt, RTR0PROCESS R0Process)
639{
640 int rc = VERR_MAP_FAILED;
641
642 /*
643 * There are two basic cases here, either we've got an MDL and can
644 * map it using MmMapLockedPages, or we've got a contiguous physical
645 * range (MMIO most likely) and can use MmMapIoSpace.
646 */
647 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
648 if (pMemNtToMap->cMdls)
649 {
650 /* don't attempt map locked regions with more than one mdl. */
651 if (pMemNtToMap->cMdls != 1)
652 return VERR_NOT_SUPPORTED;
653
654 /* we can't map anything to the first page, sorry. */
655 if (pvFixed == 0)
656 return VERR_NOT_SUPPORTED;
657
658 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
659 if ( pMemNtToMap->Core.uRel.Parent.cMappings
660 && R0Process == NIL_RTR0PROCESS)
661 return VERR_NOT_SUPPORTED;
662
663 __try
664 {
665 /** @todo uAlignment */
666 /** @todo How to set the protection on the pages? */
667 void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
668 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
669 MmCached,
670 pvFixed != (void *)-1 ? pvFixed : NULL,
671 FALSE /* no bug check on failure */,
672 NormalPagePriority);
673 if (pv)
674 {
675 NOREF(fProt);
676
677 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
678 pMemNtToMap->Core.cb);
679 if (pMemNt)
680 {
681 pMemNt->Core.u.Mapping.R0Process = R0Process;
682 *ppMem = &pMemNt->Core;
683 return VINF_SUCCESS;
684 }
685
686 rc = VERR_NO_MEMORY;
687 MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
688 }
689 }
690 __except(EXCEPTION_EXECUTE_HANDLER)
691 {
692 NTSTATUS rcNt = GetExceptionCode();
693 Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
694
695 /* nothing */
696 rc = VERR_MAP_FAILED;
697 }
698
699 }
700 else
701 {
702 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
703 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
704
705 /* cannot map phys mem to user space (yet). */
706 if (R0Process != NIL_RTR0PROCESS)
707 return VERR_NOT_SUPPORTED;
708
709 /** @todo uAlignment */
710 /** @todo How to set the protection on the pages? */
711 PHYSICAL_ADDRESS Phys;
712 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
713 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb, MmCached); /** @todo add cache type to fProt. */
714 if (pv)
715 {
716 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
717 pMemNtToMap->Core.cb);
718 if (pMemNt)
719 {
720 pMemNt->Core.u.Mapping.R0Process = R0Process;
721 *ppMem = &pMemNt->Core;
722 return VINF_SUCCESS;
723 }
724
725 rc = VERR_NO_MEMORY;
726 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
727 }
728 }
729
730 NOREF(uAlignment); NOREF(fProt);
731 return rc;
732}
733
734
735int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
736{
737 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
738}
739
740
741int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
742{
743 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
744 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
745}
746
747
748RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
749{
750 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
751
752 if (pMemNt->cMdls)
753 {
754 if (pMemNt->cMdls == 1)
755 {
756 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
757 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
758 }
759
760 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
761 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
762 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
763 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
764 }
765
766 switch (pMemNt->Core.enmType)
767 {
768 case RTR0MEMOBJTYPE_MAPPING:
769 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
770
771 case RTR0MEMOBJTYPE_PHYS:
772 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
773
774 case RTR0MEMOBJTYPE_PAGE:
775 case RTR0MEMOBJTYPE_PHYS_NC:
776 case RTR0MEMOBJTYPE_LOW:
777 case RTR0MEMOBJTYPE_CONT:
778 case RTR0MEMOBJTYPE_LOCK:
779 default:
780 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
781 case RTR0MEMOBJTYPE_RES_VIRT:
782 return NIL_RTHCPHYS;
783 }
784}
785
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette