VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 41169

Last change on this file since 41169 was 41090, checked in by vboxsync, 13 years ago

memobj-r0drv-darwin.cpp: Attempt at fixing the vm_protect/fExecute problem.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 41.7 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 41090 2012-04-27 23:26:51Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-darwin-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/memobj.h>
34
35#include <iprt/asm.h>
36#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
37# include <iprt/asm-amd64-x86.h>
38#endif
39#include <iprt/assert.h>
40#include <iprt/log.h>
41#include <iprt/mem.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include <iprt/string.h>
45#include <iprt/thread.h>
46#include "internal/memobj.h"
47
48/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
49
50
51/*******************************************************************************
52* Structures and Typedefs *
53*******************************************************************************/
54/**
55 * The Darwin version of the memory object structure.
56 */
57typedef struct RTR0MEMOBJDARWIN
58{
59 /** The core structure. */
60 RTR0MEMOBJINTERNAL Core;
61 /** Pointer to the memory descriptor created for allocated and locked memory. */
62 IOMemoryDescriptor *pMemDesc;
63 /** Pointer to the memory mapping object for mapped memory. */
64 IOMemoryMap *pMemMap;
65} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
66
67
68/**
69 * Touch the pages to force the kernel to create or write-enable the page table
70 * entries.
71 *
72 * This is necessary since the kernel gets upset if we take a page fault when
73 * preemption is disabled and/or we own a simple lock (same thing). It has no
74 * problems with us disabling interrupts when taking the traps, weird stuff.
75 *
76 * (This is basically a way of invoking vm_fault on a range of pages.)
77 *
78 * @param pv Pointer to the first page.
79 * @param cb The number of bytes.
80 */
81static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb)
82{
83 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
84 for (;;)
85 {
86 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
87 if (cb <= PAGE_SIZE)
88 break;
89 cb -= PAGE_SIZE;
90 pu32 += PAGE_SIZE / sizeof(uint32_t);
91 }
92}
93
94
95/**
96 * Read (sniff) every page in the range to make sure there are some page tables
97 * entries backing it.
98 *
99 * This is just to be sure vm_protect didn't remove stuff without re-adding it
100 * if someone should try write-protect something.
101 *
102 * @param pv Pointer to the first page.
103 * @param cb The number of bytes.
104 */
105static void rtR0MemObjDarwinSniffPages(void const *pv, size_t cb)
106{
107 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
108 uint32_t volatile u32Counter = 0;
109 for (;;)
110 {
111 u32Counter += *pu32;
112
113 if (cb <= PAGE_SIZE)
114 break;
115 cb -= PAGE_SIZE;
116 pu32 += PAGE_SIZE / sizeof(uint32_t);
117 }
118}
119
120
121/**
122 * Gets the virtual memory map the specified object is mapped into.
123 *
124 * @returns VM map handle on success, NULL if no map.
125 * @param pMem The memory object.
126 */
127DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
128{
129 switch (pMem->enmType)
130 {
131 case RTR0MEMOBJTYPE_PAGE:
132 case RTR0MEMOBJTYPE_LOW:
133 case RTR0MEMOBJTYPE_CONT:
134 return kernel_map;
135
136 case RTR0MEMOBJTYPE_PHYS:
137 case RTR0MEMOBJTYPE_PHYS_NC:
138 return NULL; /* pretend these have no mapping atm. */
139
140 case RTR0MEMOBJTYPE_LOCK:
141 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
142 ? kernel_map
143 : get_task_map((task_t)pMem->u.Lock.R0Process);
144
145 case RTR0MEMOBJTYPE_RES_VIRT:
146 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
147 ? kernel_map
148 : get_task_map((task_t)pMem->u.ResVirt.R0Process);
149
150 case RTR0MEMOBJTYPE_MAPPING:
151 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
152 ? kernel_map
153 : get_task_map((task_t)pMem->u.Mapping.R0Process);
154
155 default:
156 return NULL;
157 }
158}
159
160#if 0 /* not necessary after all*/
161/* My vm_map mockup. */
162struct my_vm_map
163{
164 struct { char pad[8]; } lock;
165 struct my_vm_map_header
166 {
167 struct vm_map_links
168 {
169 void *prev;
170 void *next;
171 vm_map_offset_t start;
172 vm_map_offset_t end;
173 } links;
174 int nentries;
175 boolean_t entries_pageable;
176 } hdr;
177 pmap_t pmap;
178 vm_map_size_t size;
179};
180
181
182/**
183 * Gets the minimum map address, this is similar to get_map_min.
184 *
185 * @returns The start address of the map.
186 * @param pMap The map.
187 */
188static vm_map_offset_t rtR0MemObjDarwinGetMapMin(vm_map_t pMap)
189{
190 /* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
191 static int32_t volatile s_offAdjust = INT32_MAX;
192 int32_t off = s_offAdjust;
193 if (off == INT32_MAX)
194 {
195 for (off = 0; ; off += sizeof(pmap_t))
196 {
197 if (*(pmap_t *)((uint8_t *)kernel_map + off) == kernel_pmap)
198 break;
199 AssertReturn(off <= RT_MAX(RT_OFFSETOF(struct my_vm_map, pmap) * 4, 1024), 0x1000);
200 }
201 ASMAtomicWriteS32(&s_offAdjust, off - RT_OFFSETOF(struct my_vm_map, pmap));
202 }
203
204 /* calculate it. */
205 struct my_vm_map *pMyMap = (struct my_vm_map *)((uint8_t *)pMap + off);
206 return pMyMap->hdr.links.start;
207}
208#endif /* unused */
209
210#ifdef RT_STRICT
211
212/**
213 * Read from a physical page.
214 *
215 * @param HCPhys The address to start reading at.
216 * @param cb How many bytes to read.
217 * @param pvDst Where to put the bytes. This is zero'd on failure.
218 */
219static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst)
220{
221 memset(pvDst, '\0', cb);
222
223 IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN(cb, PAGE_SIZE) } };
224 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
225 kIODirectionIn, NULL /*task*/);
226 if (pMemDesc)
227 {
228#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
229 IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
230#else
231 IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
232#endif
233 if (pMemMap)
234 {
235 void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress();
236 memcpy(pvDst, pvSrc, cb);
237 pMemMap->release();
238 }
239 else
240 printf("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys);
241
242 pMemDesc->release();
243 }
244 else
245 printf("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys);
246}
247
248
249/**
250 * Gets the PTE for a page.
251 *
252 * @returns the PTE.
253 * @param pvPage The virtual address to get the PTE for.
254 */
255static uint64_t rtR0MemObjDarwinGetPTE(void *pvPage)
256{
257 RTUINT64U u64;
258 RTCCUINTREG cr3 = ASMGetCR3();
259 RTCCUINTREG cr4 = ASMGetCR4();
260 bool fPAE = false;
261 bool fLMA = false;
262 if (cr4 & RT_BIT(5) /*X86_CR4_PAE*/)
263 {
264 fPAE = true;
265 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
266 if (fAmdFeatures & RT_BIT(29) /*X86_CPUID_AMD_FEATURE_EDX_LONG_MODE*/)
267 {
268 uint64_t efer = ASMRdMsr(0xc0000080 /*MSR_K6_EFER*/);
269 if (efer & RT_BIT(10) /*MSR_K6_EFER_LMA*/)
270 fLMA = true;
271 }
272 }
273
274 if (fLMA)
275 {
276 /* PML4 */
277 rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> 39) & 0x1ff) * 8, 8, &u64);
278 if (!(u64.u & RT_BIT(0) /* present */))
279 {
280 printf("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage);
281 return 0;
282 }
283
284 /* PDPTR */
285 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 30) & 0x1ff) * 8, 8, &u64);
286 if (!(u64.u & RT_BIT(0) /* present */))
287 {
288 printf("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage);
289 return 0;
290 }
291 if (u64.u & RT_BIT(7) /* big */)
292 return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1));
293
294 /* PD */
295 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 21) & 0x1ff) * 8, 8, &u64);
296 if (!(u64.u & RT_BIT(0) /* present */))
297 {
298 printf("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage);
299 return 0;
300 }
301 if (u64.u & RT_BIT(7) /* big */)
302 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
303
304 /* PD */
305 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x1ff) * 8, 8, &u64);
306 if (!(u64.u & RT_BIT(0) /* present */))
307 {
308 printf("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage);
309 return 0;
310 }
311 return u64.u;
312 }
313
314 if (fPAE)
315 {
316 /* PDPTR */
317 rtR0MemObjDarwinReadPhys((u64.u & 0xffffffe0 /*X86_CR3_PAE_PAGE_MASK*/) | (((uintptr_t)pvPage >> 30) & 0x3) * 8, 8, &u64);
318 if (!(u64.u & RT_BIT(0) /* present */))
319 return 0;
320
321 /* PD */
322 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 21) & 0x1ff) * 8, 8, &u64);
323 if (!(u64.u & RT_BIT(0) /* present */))
324 return 0;
325 if (u64.u & RT_BIT(7) /* big */)
326 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
327
328 /* PD */
329 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x1ff) * 8, 8, &u64);
330 if (!(u64.u & RT_BIT(0) /* present */))
331 return 0;
332 return u64.u;
333 }
334
335 /* PD */
336 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 22) & 0x3ff) * 4, 4, &u64);
337 if (!(u64.au32[0] & RT_BIT(0) /* present */))
338 return 0;
339 if (u64.au32[0] & RT_BIT(7) /* big */)
340 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
341
342 /* PD */
343 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x3ff) * 4, 4, &u64);
344 if (!(u64.au32[0] & RT_BIT(0) /* present */))
345 return 0;
346 return u64.au32[0];
347
348 return 0;
349}
350
351#endif /* RT_STRICT */
352
353DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
354{
355 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
356
357 /*
358 * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object.
359 */
360 if (pMemDarwin->pMemDesc)
361 {
362 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
363 pMemDarwin->pMemDesc->complete(); /* paranoia */
364 pMemDarwin->pMemDesc->release();
365 pMemDarwin->pMemDesc = NULL;
366 }
367
368 if (pMemDarwin->pMemMap)
369 {
370 pMemDarwin->pMemMap->release();
371 pMemDarwin->pMemMap = NULL;
372 }
373
374 /*
375 * Release any memory that we've allocated or locked.
376 */
377 switch (pMemDarwin->Core.enmType)
378 {
379 case RTR0MEMOBJTYPE_LOW:
380 case RTR0MEMOBJTYPE_PAGE:
381 case RTR0MEMOBJTYPE_CONT:
382 break;
383
384 case RTR0MEMOBJTYPE_LOCK:
385 {
386#ifdef USE_VM_MAP_WIRE
387 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
388 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
389 : kernel_map;
390 kern_return_t kr = vm_map_unwire(Map,
391 (vm_map_offset_t)pMemDarwin->Core.pv,
392 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
393 0 /* not user */);
394 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
395#endif
396 break;
397 }
398
399 case RTR0MEMOBJTYPE_PHYS:
400 /*if (pMemDarwin->Core.u.Phys.fAllocated)
401 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
402 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
403 break;
404
405 case RTR0MEMOBJTYPE_PHYS_NC:
406 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
407 return VERR_INTERNAL_ERROR;
408
409 case RTR0MEMOBJTYPE_RES_VIRT:
410 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
411 return VERR_INTERNAL_ERROR;
412
413 case RTR0MEMOBJTYPE_MAPPING:
414 /* nothing to do here. */
415 break;
416
417 default:
418 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
419 return VERR_INTERNAL_ERROR;
420 }
421
422 return VINF_SUCCESS;
423}
424
425
426
427/**
428 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
429 *
430 * @returns IPRT status code.
431 * @retval VERR_ADDRESS_TOO_BIG try another way.
432 *
433 * @param ppMem Where to return the memory object.
434 * @param cb The page aligned memory size.
435 * @param fExecutable Whether the mapping needs to be executable.
436 * @param fContiguous Whether the backing memory needs to be contiguous.
437 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
438 * you don't care that much or is speculating.
439 * @param MaxPhysAddr The max address to verify the result against. Use
440 * UINT64_MAX if it doesn't matter.
441 * @param enmType The object type.
442 */
443static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
444 bool fExecutable, bool fContiguous,
445 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
446 RTR0MEMOBJTYPE enmType)
447{
448 /*
449 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
450 * actually respects the physical memory mask (10.5.x is certainly busted),
451 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
452 *
453 * The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
454 */
455#if 1 /** @todo Figure out why this is broken. Is it only on snow leopard? Seen allocating memory for the VM structure, last page corrupted or inaccessible. */
456 size_t const cbFudged = cb + PAGE_SIZE;
457#else
458 size_t const cbFudged = cb;
459#endif
460 int rc;
461 IOBufferMemoryDescriptor *pMemDesc =
462 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
463 kIOMemoryKernelUserShared
464 | kIODirectionInOut
465 | (fContiguous ? kIOMemoryPhysicallyContiguous : 0),
466 cbFudged,
467 PhysMask);
468 if (pMemDesc)
469 {
470 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
471 if (IORet == kIOReturnSuccess)
472 {
473 void *pv = pMemDesc->getBytesNoCopy(0, cbFudged);
474 if (pv)
475 {
476 /*
477 * Check if it's all below 4GB.
478 */
479 addr64_t AddrPrev = 0;
480 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
481 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
482 {
483#ifdef __LP64__ /* Grumble! */
484 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL);
485#else
486 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
487#endif
488 if ( Addr > MaxPhysAddr
489 || !Addr
490 || (Addr & PAGE_OFFSET_MASK)
491 || ( fContiguous
492 && !off
493 && Addr == AddrPrev + PAGE_SIZE))
494 {
495 /* Buggy API, try allocate the memory another way. */
496 pMemDesc->release();
497 if (PhysMask)
498 LogAlways(("rtR0MemObjNativeAllocWorker: off=%x Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx - buggy API!\n",
499 off, Addr, AddrPrev, MaxPhysAddr, PhysMask));
500 return VERR_ADDRESS_TOO_BIG;
501 }
502 AddrPrev = Addr;
503 }
504
505#ifdef RT_STRICT
506 /* check that the memory is actually mapped. */
507 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
508 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
509 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
510 RTThreadPreemptDisable(&State);
511 rtR0MemObjDarwinTouchPages(pv, cb);
512 RTThreadPreemptRestore(&State);
513#endif
514
515 /*
516 * Create the IPRT memory object.
517 */
518 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb);
519 if (pMemDarwin)
520 {
521 if (fContiguous)
522 {
523#ifdef __LP64__ /* Grumble! */
524 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL);
525#else
526 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
527#endif
528 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
529 if (enmType == RTR0MEMOBJTYPE_CONT)
530 pMemDarwin->Core.u.Cont.Phys = PhysBase;
531 else if (enmType == RTR0MEMOBJTYPE_PHYS)
532 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
533 else
534 AssertMsgFailed(("enmType=%d\n", enmType));
535 }
536
537#if 1 /* Experimental code. */
538 if (fExecutable)
539 {
540 rc = rtR0MemObjNativeProtect(&pMemDarwin->Core, 0, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
541# ifdef RT_STRICT
542 /* check that the memory is actually mapped. */
543 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
544 RTThreadPreemptDisable(&State);
545 rtR0MemObjDarwinTouchPages(pv, cb);
546 RTThreadPreemptRestore(&State);
547# endif
548 }
549 else
550#endif
551 rc = VINF_SUCCESS;
552 if (RT_SUCCESS(rc))
553 {
554 pMemDarwin->pMemDesc = pMemDesc;
555 *ppMem = &pMemDarwin->Core;
556 return VINF_SUCCESS;
557 }
558
559 rtR0MemObjDelete(&pMemDarwin->Core);
560 }
561
562 if (enmType == RTR0MEMOBJTYPE_PHYS_NC)
563 rc = VERR_NO_PHYS_MEMORY;
564 else if (enmType == RTR0MEMOBJTYPE_LOW)
565 rc = VERR_NO_LOW_MEMORY;
566 else if (enmType == RTR0MEMOBJTYPE_CONT)
567 rc = VERR_NO_CONT_MEMORY;
568 else
569 rc = VERR_NO_MEMORY;
570 }
571 else
572 rc = VERR_MEMOBJ_INIT_FAILED;
573 }
574 else
575 rc = RTErrConvertFromDarwinIO(IORet);
576 pMemDesc->release();
577 }
578 else
579 rc = VERR_MEMOBJ_INIT_FAILED;
580 Assert(rc != VERR_ADDRESS_TOO_BIG);
581 return rc;
582}
583
584
585DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
586{
587 return rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
588 0 /* PhysMask */, UINT64_MAX, RTR0MEMOBJTYPE_PAGE);
589}
590
591
592DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
593{
594 /*
595 * Try IOMallocPhysical/IOMallocAligned first.
596 * Then try optimistically without a physical address mask, which will always
597 * end up using IOMallocAligned.
598 *
599 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
600 */
601 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
602 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
603 if (rc == VERR_ADDRESS_TOO_BIG)
604 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
605 0 /* PhysMask */, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
606 return rc;
607}
608
609
610DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
611{
612 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
613 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
614 RTR0MEMOBJTYPE_CONT);
615
616 /*
617 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
618 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
619 */
620 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
621 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
622 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
623 RTR0MEMOBJTYPE_CONT);
624 return rc;
625}
626
627
628DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
629{
630 /** @todo alignment */
631 if (uAlignment != PAGE_SIZE)
632 return VERR_NOT_SUPPORTED;
633
634 /*
635 * Translate the PhysHighest address into a mask.
636 */
637 int rc;
638 if (PhysHighest == NIL_RTHCPHYS)
639 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
640 0 /* PhysMask*/, UINT64_MAX, RTR0MEMOBJTYPE_PHYS);
641 else
642 {
643 mach_vm_address_t PhysMask = 0;
644 PhysMask = ~(mach_vm_address_t)0;
645 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
646 PhysMask >>= 1;
647 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
648 PhysMask &= ~(mach_vm_address_t)PAGE_OFFSET_MASK;
649
650 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
651 PhysMask, PhysHighest, RTR0MEMOBJTYPE_PHYS);
652 }
653 return rc;
654}
655
656
657DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
658{
659 /** @todo rtR0MemObjNativeAllocPhys / darwin.
660 * This might be a bit problematic and may very well require having to create our own
661 * object which we populate with pages but without mapping it into any address space.
662 * Estimate is 2-3 days.
663 */
664 return VERR_NOT_SUPPORTED;
665}
666
667
668DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
669{
670 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
671
672 /*
673 * Create a descriptor for it (the validation is always true on intel macs, but
674 * as it doesn't harm us keep it in).
675 */
676 int rc = VERR_ADDRESS_TOO_BIG;
677 IOAddressRange aRanges[1] = { { Phys, cb } };
678 if ( aRanges[0].address == Phys
679 && aRanges[0].length == cb)
680 {
681 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
682 kIODirectionInOut, NULL /*task*/);
683 if (pMemDesc)
684 {
685#ifdef __LP64__ /* Grumble! */
686 Assert(Phys == pMemDesc->getPhysicalSegment(0, 0));
687#else
688 Assert(Phys == pMemDesc->getPhysicalSegment64(0, 0));
689#endif
690
691 /*
692 * Create the IPRT memory object.
693 */
694 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
695 if (pMemDarwin)
696 {
697 pMemDarwin->Core.u.Phys.PhysBase = Phys;
698 pMemDarwin->Core.u.Phys.fAllocated = false;
699 pMemDarwin->Core.u.Phys.uCachePolicy = uCachePolicy;
700 pMemDarwin->pMemDesc = pMemDesc;
701 *ppMem = &pMemDarwin->Core;
702 return VINF_SUCCESS;
703 }
704
705 rc = VERR_NO_MEMORY;
706 pMemDesc->release();
707 }
708 else
709 rc = VERR_MEMOBJ_INIT_FAILED;
710 }
711 else
712 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
713 return rc;
714}
715
716
717/**
718 * Internal worker for locking down pages.
719 *
720 * @return IPRT status code.
721 *
722 * @param ppMem Where to store the memory object pointer.
723 * @param pv First page.
724 * @param cb Number of bytes.
725 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
726 * and RTMEM_PROT_WRITE.
727 * @param Task The task \a pv and \a cb refers to.
728 */
729static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task)
730{
731 NOREF(fAccess);
732#ifdef USE_VM_MAP_WIRE
733 vm_map_t Map = get_task_map(Task);
734 Assert(Map);
735
736 /*
737 * First try lock the memory.
738 */
739 int rc = VERR_LOCK_FAILED;
740 kern_return_t kr = vm_map_wire(get_task_map(Task),
741 (vm_map_offset_t)pv,
742 (vm_map_offset_t)pv + cb,
743 VM_PROT_DEFAULT,
744 0 /* not user */);
745 if (kr == KERN_SUCCESS)
746 {
747 /*
748 * Create the IPRT memory object.
749 */
750 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
751 if (pMemDarwin)
752 {
753 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
754 *ppMem = &pMemDarwin->Core;
755 return VINF_SUCCESS;
756 }
757
758 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
759 Assert(kr == KERN_SUCCESS);
760 rc = VERR_NO_MEMORY;
761 }
762
763#else
764
765 /*
766 * Create a descriptor and try lock it (prepare).
767 */
768 int rc = VERR_MEMOBJ_INIT_FAILED;
769 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
770 if (pMemDesc)
771 {
772 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
773 if (IORet == kIOReturnSuccess)
774 {
775 /*
776 * Create the IPRT memory object.
777 */
778 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
779 if (pMemDarwin)
780 {
781 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
782 pMemDarwin->pMemDesc = pMemDesc;
783 *ppMem = &pMemDarwin->Core;
784 return VINF_SUCCESS;
785 }
786
787 pMemDesc->complete();
788 rc = VERR_NO_MEMORY;
789 }
790 else
791 rc = VERR_LOCK_FAILED;
792 pMemDesc->release();
793 }
794#endif
795 return rc;
796}
797
798
799DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
800{
801 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process);
802}
803
804
805DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
806{
807 return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task);
808}
809
810
811DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
812{
813 return VERR_NOT_SUPPORTED;
814}
815
816
817DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
818{
819 return VERR_NOT_SUPPORTED;
820}
821
822
823DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
824 unsigned fProt, size_t offSub, size_t cbSub)
825{
826 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
827
828 /*
829 * Check that the specified alignment is supported.
830 */
831 if (uAlignment > PAGE_SIZE)
832 return VERR_NOT_SUPPORTED;
833
834 /*
835 * Must have a memory descriptor that we can map.
836 */
837 int rc = VERR_INVALID_PARAMETER;
838 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
839 if (pMemToMapDarwin->pMemDesc)
840 {
841#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
842 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
843 0,
844 kIOMapAnywhere | kIOMapDefaultCache,
845 offSub,
846 cbSub);
847#else
848 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task,
849 0,
850 kIOMapAnywhere | kIOMapDefaultCache,
851 offSub,
852 cbSub);
853#endif
854 if (pMemMap)
855 {
856 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
857 void *pv = (void *)(uintptr_t)VirtAddr;
858 if ((uintptr_t)pv == VirtAddr)
859 {
860 //addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
861 //printf("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
862
863// /*
864// * Explicitly lock it so that we're sure it is present and that
865// * its PTEs cannot be recycled.
866// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
867// * to the options which causes prepare() to not wire the pages.
868// * This is probably a bug.
869// */
870// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
871// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
872// 1 /* count */,
873// 0 /* offset */,
874// kernel_task,
875// kIODirectionInOut | kIOMemoryTypeVirtual,
876// kIOMapperSystem);
877// if (pMemDesc)
878// {
879// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
880// if (IORet == kIOReturnSuccess)
881// {
882 /* HACK ALERT! */
883 rtR0MemObjDarwinTouchPages(pv, cbSub);
884 /** @todo First, the memory should've been mapped by now, and second, it
885 * should have the wired attribute in the PTE (bit 9). Neither
886 * seems to be the case. The disabled locking code doesn't make any
887 * difference, which is extremely odd, and breaks
888 * rtR0MemObjNativeGetPagePhysAddr (getPhysicalSegment64 -> 64 for the
889 * lock descriptor. */
890 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
891 //printf("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr, 2);
892
893 /*
894 * Create the IPRT memory object.
895 */
896 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
897 pv, cbSub);
898 if (pMemDarwin)
899 {
900 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
901 pMemDarwin->pMemMap = pMemMap;
902// pMemDarwin->pMemDesc = pMemDesc;
903 *ppMem = &pMemDarwin->Core;
904 return VINF_SUCCESS;
905 }
906
907// pMemDesc->complete();
908// rc = VERR_NO_MEMORY;
909// }
910// else
911// rc = RTErrConvertFromDarwinIO(IORet);
912// pMemDesc->release();
913// }
914// else
915// rc = VERR_MEMOBJ_INIT_FAILED;
916 }
917 else
918 rc = VERR_ADDRESS_TOO_BIG;
919 pMemMap->release();
920 }
921 else
922 rc = VERR_MAP_FAILED;
923 }
924 return rc;
925}
926
927
928DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
929{
930 /*
931 * Check for unsupported things.
932 */
933 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
934 if (uAlignment > PAGE_SIZE)
935 return VERR_NOT_SUPPORTED;
936
937 /*
938 * Must have a memory descriptor.
939 */
940 int rc = VERR_INVALID_PARAMETER;
941 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
942 if (pMemToMapDarwin->pMemDesc)
943 {
944#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
945 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
946 0,
947 kIOMapAnywhere | kIOMapDefaultCache,
948 0 /* offset */,
949 0 /* length */);
950#else
951 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process,
952 0,
953 kIOMapAnywhere | kIOMapDefaultCache);
954#endif
955 if (pMemMap)
956 {
957 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
958 void *pv = (void *)(uintptr_t)VirtAddr;
959 if ((uintptr_t)pv == VirtAddr)
960 {
961 /*
962 * Create the IPRT memory object.
963 */
964 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
965 pv, pMemToMapDarwin->Core.cb);
966 if (pMemDarwin)
967 {
968 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
969 pMemDarwin->pMemMap = pMemMap;
970 *ppMem = &pMemDarwin->Core;
971 return VINF_SUCCESS;
972 }
973
974 rc = VERR_NO_MEMORY;
975 }
976 else
977 rc = VERR_ADDRESS_TOO_BIG;
978 pMemMap->release();
979 }
980 else
981 rc = VERR_MAP_FAILED;
982 }
983 return rc;
984}
985
986
987DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
988{
989 /* Get the map for the object. */
990 vm_map_t pVmMap = rtR0MemObjDarwinGetMap(pMem);
991 if (!pVmMap)
992 return VERR_NOT_SUPPORTED;
993
994 /*
995 * Convert the protection.
996 */
997 vm_prot_t fMachProt;
998 switch (fProt)
999 {
1000 case RTMEM_PROT_NONE:
1001 fMachProt = VM_PROT_NONE;
1002 break;
1003 case RTMEM_PROT_READ:
1004 fMachProt = VM_PROT_READ;
1005 break;
1006 case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
1007 fMachProt = VM_PROT_READ | VM_PROT_WRITE;
1008 break;
1009 case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1010 fMachProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1011 break;
1012 case RTMEM_PROT_WRITE:
1013 fMachProt = VM_PROT_WRITE | VM_PROT_READ; /* never write-only */
1014 break;
1015 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1016 fMachProt = VM_PROT_WRITE | VM_PROT_EXECUTE | VM_PROT_READ; /* never write-only or execute-only */
1017 break;
1018 case RTMEM_PROT_EXEC:
1019 fMachProt = VM_PROT_EXECUTE | VM_PROT_READ; /* never execute-only */
1020 break;
1021 default:
1022 AssertFailedReturn(VERR_INVALID_PARAMETER);
1023 }
1024
1025 /*
1026 * Do the job.
1027 */
1028 vm_offset_t Start = (uintptr_t)pMem->pv + offSub;
1029 kern_return_t krc = vm_protect(pVmMap,
1030 Start,
1031 cbSub,
1032 false,
1033 fMachProt);
1034 if (krc != KERN_SUCCESS)
1035 return RTErrConvertFromDarwinKern(krc);
1036
1037 /*
1038 * Touch the pages if they should be writable afterwards and accessible
1039 * from code which should never fault. vm_protect() may leave pages
1040 * temporarily write protected, possibly due to pmap no-upgrade rules?
1041 *
1042 * This is the same trick (or HACK ALERT if you like) as applied in
1043 * rtR0MemObjNativeMapKernel.
1044 */
1045 if ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
1046 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
1047 {
1048 if (fProt & RTMEM_PROT_WRITE)
1049 rtR0MemObjDarwinTouchPages((void *)Start, cbSub);
1050 /*
1051 * Sniff (read) read-only pages too, just to be sure.
1052 */
1053 else if (fProt & (RTMEM_PROT_READ | RTMEM_PROT_EXEC))
1054 rtR0MemObjDarwinSniffPages((void const *)Start, cbSub);
1055 }
1056
1057 return VINF_SUCCESS;
1058}
1059
1060
1061DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1062{
1063 RTHCPHYS PhysAddr;
1064 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
1065
1066#ifdef USE_VM_MAP_WIRE
1067 /*
1068 * Locked memory doesn't have a memory descriptor and
1069 * needs to be handled differently.
1070 */
1071 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
1072 {
1073 ppnum_t PgNo;
1074 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
1075 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1076 else
1077 {
1078 /*
1079 * From what I can tell, Apple seems to have locked up the all the
1080 * available interfaces that could help us obtain the pmap_t of a task
1081 * or vm_map_t.
1082
1083 * So, we'll have to figure out where in the vm_map_t structure it is
1084 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
1085 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
1086 * Not nice, but it will hopefully do the job in a reliable manner...
1087 *
1088 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
1089 */
1090 static int s_offPmap = -1;
1091 if (RT_UNLIKELY(s_offPmap == -1))
1092 {
1093 pmap_t const *p = (pmap_t *)kernel_map;
1094 pmap_t const * const pEnd = p + 64;
1095 for (; p < pEnd; p++)
1096 if (*p == kernel_pmap)
1097 {
1098 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
1099 break;
1100 }
1101 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
1102 }
1103 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
1104 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1105 }
1106
1107 AssertReturn(PgNo, NIL_RTHCPHYS);
1108 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
1109 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
1110 }
1111 else
1112#endif /* USE_VM_MAP_WIRE */
1113 {
1114 /*
1115 * Get the memory descriptor.
1116 */
1117 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
1118 if (!pMemDesc)
1119 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
1120 AssertReturn(pMemDesc, NIL_RTHCPHYS);
1121
1122 /*
1123 * If we've got a memory descriptor, use getPhysicalSegment64().
1124 */
1125#ifdef __LP64__ /* Grumble! */
1126 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL);
1127#else
1128 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
1129#endif
1130 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
1131 PhysAddr = Addr;
1132 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
1133 }
1134
1135 return PhysAddr;
1136}
1137
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette