VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 18130

Last change on this file since 18130 was 16329, checked in by vboxsync, 16 years ago

memobj-r0drv-darwin.cpp: A few APIs was retired or deprecated in 10.6/AMD64, simplified the allocators to all use IOBufferMemoryDescriptor::inTaskWithPhysicalMask() in various ways.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 24.4 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 16329 2009-01-28 20:20:33Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-darwin-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/alloc.h>
39#include <iprt/assert.h>
40#include <iprt/log.h>
41#include <iprt/param.h>
42#include <iprt/string.h>
43#include <iprt/process.h>
44#include "internal/memobj.h"
45
46/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/**
53 * The Darwin version of the memory object structure.
54 */
55typedef struct RTR0MEMOBJDARWIN
56{
57 /** The core structure. */
58 RTR0MEMOBJINTERNAL Core;
59 /** Pointer to the memory descriptor created for allocated and locked memory. */
60 IOMemoryDescriptor *pMemDesc;
61 /** Pointer to the memory mapping object for mapped memory. */
62 IOMemoryMap *pMemMap;
63} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
64
65
66int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
67{
68 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
69
70 /*
71 * Release the IOMemoryDescriptor/IOMemoryMap associated with the object.
72 */
73 if (pMemDarwin->pMemDesc)
74 {
75 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
76 pMemDarwin->pMemDesc->complete(); /* paranoia */
77 pMemDarwin->pMemDesc->release();
78 pMemDarwin->pMemDesc = NULL;
79 Assert(!pMemDarwin->pMemMap);
80 }
81 else if (pMemDarwin->pMemMap)
82 {
83 pMemDarwin->pMemMap->release();
84 pMemDarwin->pMemMap = NULL;
85 }
86
87 /*
88 * Release any memory that we've allocated or locked.
89 */
90 switch (pMemDarwin->Core.enmType)
91 {
92 case RTR0MEMOBJTYPE_LOW:
93 case RTR0MEMOBJTYPE_PAGE:
94 case RTR0MEMOBJTYPE_CONT:
95 break;
96
97 case RTR0MEMOBJTYPE_LOCK:
98 {
99#ifdef USE_VM_MAP_WIRE
100 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
101 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
102 : kernel_map;
103 kern_return_t kr = vm_map_unwire(Map,
104 (vm_map_offset_t)pMemDarwin->Core.pv,
105 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
106 0 /* not user */);
107 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
108#endif
109 break;
110 }
111
112 case RTR0MEMOBJTYPE_PHYS:
113 /*if (pMemDarwin->Core.u.Phys.fAllocated)
114 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
115 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
116 break;
117
118 case RTR0MEMOBJTYPE_PHYS_NC:
119 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
120 return VERR_INTERNAL_ERROR;
121
122 case RTR0MEMOBJTYPE_RES_VIRT:
123 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
124 return VERR_INTERNAL_ERROR;
125
126 case RTR0MEMOBJTYPE_MAPPING:
127 /* nothing to do here. */
128 break;
129
130 default:
131 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
132 return VERR_INTERNAL_ERROR;
133 }
134
135 return VINF_SUCCESS;
136}
137
138
139
140/**
141 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
142 *
143 * @returns IPRT status code.
144 * @retval VERR_ADDRESS_TOO_BIG try another way.
145 *
146 * @param ppMem Where to return the memory object.
147 * @param cb The page aligned memory size.
148 * @param fExecutable Whether the mapping needs to be executable.
149 * @param fContiguous Whether the backing memory needs to be contiguous.
150 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
151 * you don't care that much or is speculating.
152 * @param MaxPhysAddr The max address to verify the result against. Use
153 * UINT64_MAX if it doesn't matter.
154 * @param enmType The object type.
155 */
156static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
157 bool fExecutable, bool fContiguous,
158 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
159 RTR0MEMOBJTYPE enmType)
160{
161 /*
162 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
163 * actually respects the physical memory mask (10.5.x is certainly busted),
164 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
165 *
166 * The kIOMemorySharingTypeMask flag just forces the result to be page aligned.
167 */
168 int rc;
169 IOBufferMemoryDescriptor *pMemDesc =
170 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
171 kIOMemorySharingTypeMask
172 | kIODirectionInOut
173 | (fContiguous ? kIOMemoryPhysicallyContiguous : 0),
174 cb,
175 PhysMask);
176 if (pMemDesc)
177 {
178 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
179 if (IORet == kIOReturnSuccess)
180 {
181 void *pv = pMemDesc->getBytesNoCopy(0, cb);
182 if (pv)
183 {
184 /*
185 * Check if it's all below 4GB.
186 */
187 addr64_t AddrPrev = 0;
188 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
189 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
190 {
191#ifdef __LP64__ /* Grumble! */
192 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL);
193#else
194 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
195#endif
196 if ( Addr > MaxPhysAddr
197 || !Addr
198 || (Addr & PAGE_OFFSET_MASK)
199 || ( fContiguous
200 && !off
201 && Addr == AddrPrev + PAGE_SIZE))
202 {
203 /* Buggy API, try allocate the memory another way. */
204 pMemDesc->release();
205 if (PhysMask)
206 LogAlways(("rtR0MemObjNativeAllocLow: off=%x Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx - buggy API!\n",
207 off, Addr, AddrPrev, MaxPhysAddr, PhysMask));
208 return VERR_ADDRESS_TOO_BIG;
209 }
210 AddrPrev = Addr;
211 }
212
213 /*
214 * Create the IPRT memory object.
215 */
216 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb);
217 if (pMemDarwin)
218 {
219 if (fContiguous)
220 {
221#ifdef __LP64__ /* Grumble! */
222 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL);
223#else
224 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
225#endif
226 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
227 if (enmType == RTR0MEMOBJTYPE_CONT)
228 pMemDarwin->Core.u.Cont.Phys = PhysBase;
229 else if (enmType == RTR0MEMOBJTYPE_PHYS)
230 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
231 else
232 AssertMsgFailed(("enmType=%d\n", enmType));
233 }
234
235 pMemDarwin->pMemDesc = pMemDesc;
236 *ppMem = &pMemDarwin->Core;
237 return VINF_SUCCESS;
238 }
239
240 rc = VERR_NO_MEMORY;
241 }
242 else
243 rc = VERR_MEMOBJ_INIT_FAILED;
244 }
245 else
246 rc = RTErrConvertFromDarwinIO(IORet);
247 pMemDesc->release();
248 }
249 else
250 rc = VERR_MEMOBJ_INIT_FAILED;
251 Assert(rc != VERR_ADDRESS_TOO_BIG);
252 return rc;
253}
254
255
256int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
257{
258 return rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
259 0 /* PhysMask */, UINT64_MAX, RTR0MEMOBJTYPE_PAGE);
260}
261
262
263int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
264{
265 /*
266 * Try IOMallocPhysical/IOMallocAligned first.
267 * Then try optimistically without a physical address mask, which will always
268 * end up using IOMallocAligned.
269 *
270 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
271 */
272 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
273 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
274 if (rc == VERR_ADDRESS_TOO_BIG)
275 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
276 0 /* PhysMask */, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
277 return rc;
278}
279
280
281int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
282{
283 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
284 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
285 RTR0MEMOBJTYPE_CONT);
286
287 /*
288 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
289 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
290 */
291 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
292 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
293 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
294 RTR0MEMOBJTYPE_CONT);
295 return rc;
296}
297
298
299int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
300{
301 /*
302 * Translate the PhysHighest address into a mask.
303 */
304 int rc;
305 if (PhysHighest == NIL_RTHCPHYS)
306 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
307 0 /* PhysMask*/, UINT64_MAX, RTR0MEMOBJTYPE_PHYS);
308 else
309 {
310 mach_vm_address_t PhysMask = 0;
311 PhysMask = ~(mach_vm_address_t)0;
312 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
313 PhysMask >>= 1;
314 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
315 PhysMask &= ~(mach_vm_address_t)PAGE_OFFSET_MASK;
316
317 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
318 PhysMask, PhysHighest, RTR0MEMOBJTYPE_PHYS);
319 }
320 return rc;
321}
322
323
324int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
325{
326 /** @todo rtR0MemObjNativeAllocPhys / darwin.
327 * This might be a bit problematic and may very well require having to create our own
328 * object which we populate with pages but without mapping it into any address space.
329 * Estimate is 2-3 days.
330 */
331 return VERR_NOT_SUPPORTED;
332}
333
334
335int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
336{
337 /*
338 * Create a descriptor for it (the validation is always true on intel macs, but
339 * as it doesn't harm us keep it in).
340 */
341 int rc = VERR_ADDRESS_TOO_BIG;
342 IOAddressRange aRanges[1] = { { Phys, cb } };
343 if ( aRanges[0].address == Phys
344 && aRanges[0].length == cb)
345 {
346 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
347 kIODirectionInOut, NULL /*task*/);
348 if (pMemDesc)
349 {
350 Assert(Phys == pMemDesc->getPhysicalAddress());
351
352 /*
353 * Create the IPRT memory object.
354 */
355 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
356 if (pMemDarwin)
357 {
358 pMemDarwin->Core.u.Phys.PhysBase = Phys;
359 pMemDarwin->Core.u.Phys.fAllocated = false;
360 pMemDarwin->pMemDesc = pMemDesc;
361 *ppMem = &pMemDarwin->Core;
362 return VINF_SUCCESS;
363 }
364
365 rc = VERR_NO_MEMORY;
366 pMemDesc->release();
367 }
368 else
369 rc = VERR_MEMOBJ_INIT_FAILED;
370 }
371 else
372 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
373 return rc;
374}
375
376
377/**
378 * Internal worker for locking down pages.
379 *
380 * @return IPRT status code.
381 *
382 * @param ppMem Where to store the memory object pointer.
383 * @param pv First page.
384 * @param cb Number of bytes.
385 * @param Task The task \a pv and \a cb refers to.
386 */
387static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, task_t Task)
388{
389#ifdef USE_VM_MAP_WIRE
390 vm_map_t Map = get_task_map(Task);
391 Assert(Map);
392
393 /*
394 * First try lock the memory.
395 */
396 int rc = VERR_LOCK_FAILED;
397 kern_return_t kr = vm_map_wire(get_task_map(Task),
398 (vm_map_offset_t)pv,
399 (vm_map_offset_t)pv + cb,
400 VM_PROT_DEFAULT,
401 0 /* not user */);
402 if (kr == KERN_SUCCESS)
403 {
404 /*
405 * Create the IPRT memory object.
406 */
407 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
408 if (pMemDarwin)
409 {
410 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
411 *ppMem = &pMemDarwin->Core;
412 return VINF_SUCCESS;
413 }
414
415 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
416 Assert(kr == KERN_SUCCESS);
417 rc = VERR_NO_MEMORY;
418 }
419
420#else
421
422 /*
423 * Create a descriptor and try lock it (prepare).
424 */
425 int rc = VERR_MEMOBJ_INIT_FAILED;
426 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
427 if (pMemDesc)
428 {
429 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
430 if (IORet == kIOReturnSuccess)
431 {
432 /*
433 * Create the IPRT memory object.
434 */
435 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
436 if (pMemDarwin)
437 {
438 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
439 pMemDarwin->pMemDesc = pMemDesc;
440 *ppMem = &pMemDarwin->Core;
441 return VINF_SUCCESS;
442 }
443
444 pMemDesc->complete();
445 rc = VERR_NO_MEMORY;
446 }
447 else
448 rc = VERR_LOCK_FAILED;
449 pMemDesc->release();
450 }
451#endif
452 return rc;
453}
454
455
456int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
457{
458 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, (task_t)R0Process);
459}
460
461
462int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
463{
464 return rtR0MemObjNativeLock(ppMem, pv, cb, kernel_task);
465}
466
467
468int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
469{
470 return VERR_NOT_IMPLEMENTED;
471}
472
473
474int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
475{
476 return VERR_NOT_IMPLEMENTED;
477}
478
479
480int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
481 unsigned fProt, size_t offSub, size_t cbSub)
482{
483 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
484
485 /*
486 * Must have a memory descriptor.
487 */
488 int rc = VERR_INVALID_PARAMETER;
489 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
490 if (pMemToMapDarwin->pMemDesc)
491 {
492#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
493 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
494 0,
495 kIOMapAnywhere | kIOMapDefaultCache,
496 offSub,
497 cbSub);
498#else
499 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task, 0,
500 kIOMapAnywhere | kIOMapDefaultCache,
501 offSub, cbSub);
502#endif
503 if (pMemMap)
504 {
505 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
506 void *pv = (void *)(uintptr_t)VirtAddr;
507 if ((uintptr_t)pv == VirtAddr)
508 {
509 /*
510 * Create the IPRT memory object.
511 */
512 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
513 pv, pMemToMapDarwin->Core.cb);
514 if (pMemDarwin)
515 {
516 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
517 pMemDarwin->pMemMap = pMemMap;
518 *ppMem = &pMemDarwin->Core;
519 return VINF_SUCCESS;
520 }
521
522 rc = VERR_NO_MEMORY;
523 }
524 else
525 rc = VERR_ADDRESS_TOO_BIG;
526 pMemMap->release();
527 }
528 else
529 rc = VERR_MAP_FAILED;
530 }
531 return rc;
532}
533
534
535int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
536{
537 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
538
539 /*
540 * Must have a memory descriptor.
541 */
542 int rc = VERR_INVALID_PARAMETER;
543 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
544 if (pMemToMapDarwin->pMemDesc)
545 {
546#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
547 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
548 0,
549 kIOMapAnywhere | kIOMapDefaultCache,
550 0 /* offset */,
551 0 /* length */);
552#else
553 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process, 0,
554 kIOMapAnywhere | kIOMapDefaultCache);
555#endif
556 if (pMemMap)
557 {
558 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
559 void *pv = (void *)(uintptr_t)VirtAddr;
560 if ((uintptr_t)pv == VirtAddr)
561 {
562 /*
563 * Create the IPRT memory object.
564 */
565 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
566 pv, pMemToMapDarwin->Core.cb);
567 if (pMemDarwin)
568 {
569 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
570 pMemDarwin->pMemMap = pMemMap;
571 *ppMem = &pMemDarwin->Core;
572 return VINF_SUCCESS;
573 }
574
575 rc = VERR_NO_MEMORY;
576 }
577 else
578 rc = VERR_ADDRESS_TOO_BIG;
579 pMemMap->release();
580 }
581 else
582 rc = VERR_MAP_FAILED;
583 }
584 return rc;
585}
586
587
588RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
589{
590 RTHCPHYS PhysAddr;
591 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
592
593#ifdef USE_VM_MAP_WIRE
594 /*
595 * Locked memory doesn't have a memory descriptor and
596 * needs to be handled differently.
597 */
598 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
599 {
600 ppnum_t PgNo;
601 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
602 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
603 else
604 {
605 /*
606 * From what I can tell, Apple seems to have locked up the all the
607 * available interfaces that could help us obtain the pmap_t of a task
608 * or vm_map_t.
609
610 * So, we'll have to figure out where in the vm_map_t structure it is
611 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
612 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
613 * Not nice, but it will hopefully do the job in a reliable manner...
614 *
615 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
616 */
617 static int s_offPmap = -1;
618 if (RT_UNLIKELY(s_offPmap == -1))
619 {
620 pmap_t const *p = (pmap_t *)kernel_map;
621 pmap_t const * const pEnd = p + 64;
622 for (; p < pEnd; p++)
623 if (*p == kernel_pmap)
624 {
625 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
626 break;
627 }
628 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
629 }
630 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
631 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
632 }
633
634 AssertReturn(PgNo, NIL_RTHCPHYS);
635 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
636 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
637 }
638 else
639#endif /* USE_VM_MAP_WIRE */
640 {
641 /*
642 * Get the memory descriptor.
643 */
644 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
645 if (!pMemDesc)
646 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
647 AssertReturn(pMemDesc, NIL_RTHCPHYS);
648
649 /*
650 * If we've got a memory descriptor, use getPhysicalSegment64().
651 */
652#ifdef __LP64__ /* Grumble! */
653 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL);
654#else
655 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
656#endif
657 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
658 PhysAddr = Addr;
659 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
660 }
661
662 return PhysAddr;
663}
664
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette