VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 26430

Last change on this file since 26430 was 26430, checked in by vboxsync, 15 years ago

Introducing RTR0MemObjAllocPhysEx

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 38.3 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 26430 2010-02-11 14:23:01Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-darwin-kernel.h"
36#include "internal/iprt.h"
37#include <iprt/memobj.h>
38
39#include <iprt/alloc.h>
40#include <iprt/asm.h>
41#include <iprt/assert.h>
42#include <iprt/log.h>
43#include <iprt/param.h>
44#include <iprt/process.h>
45#include <iprt/string.h>
46#include <iprt/thread.h>
47#include "internal/memobj.h"
48
49/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
50
51
52/*******************************************************************************
53* Structures and Typedefs *
54*******************************************************************************/
55/**
56 * The Darwin version of the memory object structure.
57 */
58typedef struct RTR0MEMOBJDARWIN
59{
60 /** The core structure. */
61 RTR0MEMOBJINTERNAL Core;
62 /** Pointer to the memory descriptor created for allocated and locked memory. */
63 IOMemoryDescriptor *pMemDesc;
64 /** Pointer to the memory mapping object for mapped memory. */
65 IOMemoryMap *pMemMap;
66} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
67
68
69/**
70 * HACK ALERT!
71 *
72 * Touch the pages to force the kernel to create the page
73 * table entries. This is necessary since the kernel gets
74 * upset if we take a page fault when preemption is disabled
75 * and/or we own a simple lock. It has no problems with us
76 * disabling interrupts when taking the traps, weird stuff.
77 *
78 * @param pv Pointer to the first page.
79 * @param cb The number of bytes.
80 */
81static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb)
82{
83 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
84 for (;;)
85 {
86 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
87 if (cb <= PAGE_SIZE)
88 break;
89 cb -= PAGE_SIZE;
90 pu32 += PAGE_SIZE / sizeof(uint32_t);
91 }
92}
93
94
95/**
96 * Gets the virtual memory map the specified object is mapped into.
97 *
98 * @returns VM map handle on success, NULL if no map.
99 * @param pMem The memory object.
100 */
101DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
102{
103 switch (pMem->enmType)
104 {
105 case RTR0MEMOBJTYPE_PAGE:
106 case RTR0MEMOBJTYPE_LOW:
107 case RTR0MEMOBJTYPE_CONT:
108 return kernel_map;
109
110 case RTR0MEMOBJTYPE_PHYS:
111 case RTR0MEMOBJTYPE_PHYS_NC:
112 return NULL; /* pretend these have no mapping atm. */
113
114 case RTR0MEMOBJTYPE_LOCK:
115 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
116 ? kernel_map
117 : get_task_map((task_t)pMem->u.Lock.R0Process);
118
119 case RTR0MEMOBJTYPE_RES_VIRT:
120 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
121 ? kernel_map
122 : get_task_map((task_t)pMem->u.ResVirt.R0Process);
123
124 case RTR0MEMOBJTYPE_MAPPING:
125 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
126 ? kernel_map
127 : get_task_map((task_t)pMem->u.Mapping.R0Process);
128
129 default:
130 return NULL;
131 }
132}
133
134#if 0 /* not necessary after all*/
135/* My vm_map mockup. */
136struct my_vm_map
137{
138 struct { char pad[8]; } lock;
139 struct my_vm_map_header
140 {
141 struct vm_map_links
142 {
143 void *prev;
144 void *next;
145 vm_map_offset_t start;
146 vm_map_offset_t end;
147 } links;
148 int nentries;
149 boolean_t entries_pageable;
150 } hdr;
151 pmap_t pmap;
152 vm_map_size_t size;
153};
154
155
156/**
157 * Gets the minimum map address, this is similar to get_map_min.
158 *
159 * @returns The start address of the map.
160 * @param pMap The map.
161 */
162static vm_map_offset_t rtR0MemObjDarwinGetMapMin(vm_map_t pMap)
163{
164 /* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
165 static int32_t volatile s_offAdjust = INT32_MAX;
166 int32_t off = s_offAdjust;
167 if (off == INT32_MAX)
168 {
169 for (off = 0; ; off += sizeof(pmap_t))
170 {
171 if (*(pmap_t *)((uint8_t *)kernel_map + off) == kernel_pmap)
172 break;
173 AssertReturn(off <= RT_MAX(RT_OFFSETOF(struct my_vm_map, pmap) * 4, 1024), 0x1000);
174 }
175 ASMAtomicWriteS32(&s_offAdjust, off - RT_OFFSETOF(struct my_vm_map, pmap));
176 }
177
178 /* calculate it. */
179 struct my_vm_map *pMyMap = (struct my_vm_map *)((uint8_t *)pMap + off);
180 return pMyMap->hdr.links.start;
181}
182#endif /* unused */
183
184#ifdef RT_STRICT
185
186/**
187 * Read from a physical page.
188 *
189 * @param HCPhys The address to start reading at.
190 * @param cb How many bytes to read.
191 * @param pvDst Where to put the bytes. This is zero'ed on failure.
192 */
193static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst)
194{
195 memset(pvDst, '\0', cb);
196
197 IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN(cb, PAGE_SIZE) } };
198 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
199 kIODirectionIn, NULL /*task*/);
200 if (pMemDesc)
201 {
202#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
203 IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
204#else
205 IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
206#endif
207 if (pMemMap)
208 {
209 void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress();
210 memcpy(pvDst, pvSrc, cb);
211 pMemMap->release();
212 }
213 else
214 printf("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys);
215
216 pMemDesc->release();
217 }
218 else
219 printf("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys);
220}
221
222
223/**
224 * Gets the PTE for a page.
225 *
226 * @returns the PTE.
227 * @param pvPage The virtual address to get the PTE for.
228 */
229uint64_t rtR0MemObjDarwinGetPTE(void *pvPage)
230{
231 RTUINT64U u64;
232 RTCCUINTREG cr3 = ASMGetCR3();
233 RTCCUINTREG cr4 = ASMGetCR4();
234 bool fPAE = false;
235 bool fLMA = false;
236 if (cr4 & RT_BIT(5) /*X86_CR4_PAE*/)
237 {
238 fPAE = true;
239 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
240 if (fAmdFeatures & RT_BIT(29) /*X86_CPUID_AMD_FEATURE_EDX_LONG_MODE*/)
241 {
242 uint64_t efer = ASMRdMsr(0xc0000080 /*MSR_K6_EFER*/);
243 if (efer & RT_BIT(10) /*MSR_K6_EFER_LMA*/)
244 fLMA = true;
245 }
246 }
247
248 if (fLMA)
249 {
250 /* PML4 */
251 rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> 39) & 0x1ff) * 8, 8, &u64);
252 if (!(u64.u & RT_BIT(0) /* present */))
253 {
254 printf("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage);
255 return 0;
256 }
257
258 /* PDPTR */
259 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 30) & 0x1ff) * 8, 8, &u64);
260 if (!(u64.u & RT_BIT(0) /* present */))
261 {
262 printf("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage);
263 return 0;
264 }
265 if (u64.u & RT_BIT(7) /* big */)
266 return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1));
267
268 /* PD */
269 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 21) & 0x1ff) * 8, 8, &u64);
270 if (!(u64.u & RT_BIT(0) /* present */))
271 {
272 printf("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage);
273 return 0;
274 }
275 if (u64.u & RT_BIT(7) /* big */)
276 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
277
278 /* PD */
279 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x1ff) * 8, 8, &u64);
280 if (!(u64.u & RT_BIT(0) /* present */))
281 {
282 printf("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage);
283 return 0;
284 }
285 return u64.u;
286 }
287
288 if (fPAE)
289 {
290 /* PDPTR */
291 rtR0MemObjDarwinReadPhys((u64.u & 0xffffffe0 /*X86_CR3_PAE_PAGE_MASK*/) | (((uintptr_t)pvPage >> 30) & 0x3) * 8, 8, &u64);
292 if (!(u64.u & RT_BIT(0) /* present */))
293 return 0;
294
295 /* PD */
296 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 21) & 0x1ff) * 8, 8, &u64);
297 if (!(u64.u & RT_BIT(0) /* present */))
298 return 0;
299 if (u64.u & RT_BIT(7) /* big */)
300 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
301
302 /* PD */
303 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x1ff) * 8, 8, &u64);
304 if (!(u64.u & RT_BIT(0) /* present */))
305 return 0;
306 return u64.u;
307 }
308
309 /* PD */
310 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 22) & 0x3ff) * 4, 4, &u64);
311 if (!(u64.au32[0] & RT_BIT(0) /* present */))
312 return 0;
313 if (u64.au32[0] & RT_BIT(7) /* big */)
314 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
315
316 /* PD */
317 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x3ff) * 4, 4, &u64);
318 if (!(u64.au32[0] & RT_BIT(0) /* present */))
319 return 0;
320 return u64.au32[0];
321
322 return 0;
323}
324
325#endif /* RT_STRICT */
326
327int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
328{
329 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
330
331 /*
332 * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object.
333 */
334 if (pMemDarwin->pMemDesc)
335 {
336 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
337 pMemDarwin->pMemDesc->complete(); /* paranoia */
338 pMemDarwin->pMemDesc->release();
339 pMemDarwin->pMemDesc = NULL;
340 }
341
342 if (pMemDarwin->pMemMap)
343 {
344 pMemDarwin->pMemMap->release();
345 pMemDarwin->pMemMap = NULL;
346 }
347
348 /*
349 * Release any memory that we've allocated or locked.
350 */
351 switch (pMemDarwin->Core.enmType)
352 {
353 case RTR0MEMOBJTYPE_LOW:
354 case RTR0MEMOBJTYPE_PAGE:
355 case RTR0MEMOBJTYPE_CONT:
356 break;
357
358 case RTR0MEMOBJTYPE_LOCK:
359 {
360#ifdef USE_VM_MAP_WIRE
361 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
362 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
363 : kernel_map;
364 kern_return_t kr = vm_map_unwire(Map,
365 (vm_map_offset_t)pMemDarwin->Core.pv,
366 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
367 0 /* not user */);
368 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
369#endif
370 break;
371 }
372
373 case RTR0MEMOBJTYPE_PHYS:
374 /*if (pMemDarwin->Core.u.Phys.fAllocated)
375 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
376 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
377 break;
378
379 case RTR0MEMOBJTYPE_PHYS_NC:
380 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
381 return VERR_INTERNAL_ERROR;
382
383 case RTR0MEMOBJTYPE_RES_VIRT:
384 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
385 return VERR_INTERNAL_ERROR;
386
387 case RTR0MEMOBJTYPE_MAPPING:
388 /* nothing to do here. */
389 break;
390
391 default:
392 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
393 return VERR_INTERNAL_ERROR;
394 }
395
396 return VINF_SUCCESS;
397}
398
399
400
401/**
402 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
403 *
404 * @returns IPRT status code.
405 * @retval VERR_ADDRESS_TOO_BIG try another way.
406 *
407 * @param ppMem Where to return the memory object.
408 * @param cb The page aligned memory size.
409 * @param fExecutable Whether the mapping needs to be executable.
410 * @param fContiguous Whether the backing memory needs to be contiguous.
411 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
412 * you don't care that much or is speculating.
413 * @param MaxPhysAddr The max address to verify the result against. Use
414 * UINT64_MAX if it doesn't matter.
415 * @param enmType The object type.
416 */
417static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
418 bool fExecutable, bool fContiguous,
419 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
420 RTR0MEMOBJTYPE enmType)
421{
422 /*
423 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
424 * actually respects the physical memory mask (10.5.x is certainly busted),
425 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
426 *
427 * The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
428 */
429 int rc;
430 IOBufferMemoryDescriptor *pMemDesc =
431 IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
432 kIOMemoryKernelUserShared
433 | kIODirectionInOut
434 | (fContiguous ? kIOMemoryPhysicallyContiguous : 0),
435 cb,
436 PhysMask);
437 if (pMemDesc)
438 {
439 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
440 if (IORet == kIOReturnSuccess)
441 {
442 void *pv = pMemDesc->getBytesNoCopy(0, cb);
443 if (pv)
444 {
445 /*
446 * Check if it's all below 4GB.
447 */
448 addr64_t AddrPrev = 0;
449 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
450 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
451 {
452#ifdef __LP64__ /* Grumble! */
453 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL);
454#else
455 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
456#endif
457 if ( Addr > MaxPhysAddr
458 || !Addr
459 || (Addr & PAGE_OFFSET_MASK)
460 || ( fContiguous
461 && !off
462 && Addr == AddrPrev + PAGE_SIZE))
463 {
464 /* Buggy API, try allocate the memory another way. */
465 pMemDesc->release();
466 if (PhysMask)
467 LogAlways(("rtR0MemObjNativeAllocWorker: off=%x Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx - buggy API!\n",
468 off, Addr, AddrPrev, MaxPhysAddr, PhysMask));
469 return VERR_ADDRESS_TOO_BIG;
470 }
471 AddrPrev = Addr;
472 }
473
474#ifdef RT_STRICT
475 /* check that the memory is actually mapped. */
476 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
477 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
478 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
479 RTThreadPreemptDisable(&State);
480 rtR0MemObjDarwinTouchPages(pv, cb);
481 RTThreadPreemptRestore(&State);
482#endif
483
484 /*
485 * Create the IPRT memory object.
486 */
487 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb);
488 if (pMemDarwin)
489 {
490 if (fContiguous)
491 {
492#ifdef __LP64__ /* Grumble! */
493 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL);
494#else
495 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
496#endif
497 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
498 if (enmType == RTR0MEMOBJTYPE_CONT)
499 pMemDarwin->Core.u.Cont.Phys = PhysBase;
500 else if (enmType == RTR0MEMOBJTYPE_PHYS)
501 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
502 else
503 AssertMsgFailed(("enmType=%d\n", enmType));
504 }
505
506 pMemDarwin->pMemDesc = pMemDesc;
507 *ppMem = &pMemDarwin->Core;
508 return VINF_SUCCESS;
509 }
510
511 rc = VERR_NO_MEMORY;
512 }
513 else
514 rc = VERR_MEMOBJ_INIT_FAILED;
515 }
516 else
517 rc = RTErrConvertFromDarwinIO(IORet);
518 pMemDesc->release();
519 }
520 else
521 rc = VERR_MEMOBJ_INIT_FAILED;
522 Assert(rc != VERR_ADDRESS_TOO_BIG);
523 return rc;
524}
525
526
527int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
528{
529 return rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
530 0 /* PhysMask */, UINT64_MAX, RTR0MEMOBJTYPE_PAGE);
531}
532
533
534int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
535{
536 /*
537 * Try IOMallocPhysical/IOMallocAligned first.
538 * Then try optimistically without a physical address mask, which will always
539 * end up using IOMallocAligned.
540 *
541 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
542 */
543 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
544 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
545 if (rc == VERR_ADDRESS_TOO_BIG)
546 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */,
547 0 /* PhysMask */, _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW);
548 return rc;
549}
550
551
552int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
553{
554 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
555 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
556 RTR0MEMOBJTYPE_CONT);
557
558 /*
559 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
560 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
561 */
562 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
563 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
564 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
565 RTR0MEMOBJTYPE_CONT);
566 return rc;
567}
568
569
570int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
571{
572 /** @todo */
573 if ( uAlignment != 0
574 && uAlignment != PAGE_SIZE)
575 return VERR_NOT_SUPPORTED;
576
577 /*
578 * Translate the PhysHighest address into a mask.
579 */
580 int rc;
581 if (PhysHighest == NIL_RTHCPHYS)
582 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
583 0 /* PhysMask*/, UINT64_MAX, RTR0MEMOBJTYPE_PHYS);
584 else
585 {
586 mach_vm_address_t PhysMask = 0;
587 PhysMask = ~(mach_vm_address_t)0;
588 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
589 PhysMask >>= 1;
590 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
591 PhysMask &= ~(mach_vm_address_t)PAGE_OFFSET_MASK;
592
593 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, true /* fExecutable */, true /* fContiguous */,
594 PhysMask, PhysHighest, RTR0MEMOBJTYPE_PHYS);
595 }
596 return rc;
597}
598
599
600int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
601{
602 /** @todo rtR0MemObjNativeAllocPhys / darwin.
603 * This might be a bit problematic and may very well require having to create our own
604 * object which we populate with pages but without mapping it into any address space.
605 * Estimate is 2-3 days.
606 */
607 return VERR_NOT_SUPPORTED;
608}
609
610
611int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
612{
613 /*
614 * Create a descriptor for it (the validation is always true on intel macs, but
615 * as it doesn't harm us keep it in).
616 */
617 int rc = VERR_ADDRESS_TOO_BIG;
618 IOAddressRange aRanges[1] = { { Phys, cb } };
619 if ( aRanges[0].address == Phys
620 && aRanges[0].length == cb)
621 {
622 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
623 kIODirectionInOut, NULL /*task*/);
624 if (pMemDesc)
625 {
626#ifdef __LP64__ /* Grumble! */
627 Assert(Phys == pMemDesc->getPhysicalSegment(0, 0));
628#else
629 Assert(Phys == pMemDesc->getPhysicalSegment64(0, 0));
630#endif
631
632 /*
633 * Create the IPRT memory object.
634 */
635 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
636 if (pMemDarwin)
637 {
638 pMemDarwin->Core.u.Phys.PhysBase = Phys;
639 pMemDarwin->Core.u.Phys.fAllocated = false;
640 pMemDarwin->pMemDesc = pMemDesc;
641 *ppMem = &pMemDarwin->Core;
642 return VINF_SUCCESS;
643 }
644
645 rc = VERR_NO_MEMORY;
646 pMemDesc->release();
647 }
648 else
649 rc = VERR_MEMOBJ_INIT_FAILED;
650 }
651 else
652 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
653 return rc;
654}
655
656
657/**
658 * Internal worker for locking down pages.
659 *
660 * @return IPRT status code.
661 *
662 * @param ppMem Where to store the memory object pointer.
663 * @param pv First page.
664 * @param cb Number of bytes.
665 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
666 * and RTMEM_PROT_WRITE.
667 * @param Task The task \a pv and \a cb refers to.
668 */
669static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task)
670{
671 NOREF(fAccess);
672#ifdef USE_VM_MAP_WIRE
673 vm_map_t Map = get_task_map(Task);
674 Assert(Map);
675
676 /*
677 * First try lock the memory.
678 */
679 int rc = VERR_LOCK_FAILED;
680 kern_return_t kr = vm_map_wire(get_task_map(Task),
681 (vm_map_offset_t)pv,
682 (vm_map_offset_t)pv + cb,
683 VM_PROT_DEFAULT,
684 0 /* not user */);
685 if (kr == KERN_SUCCESS)
686 {
687 /*
688 * Create the IPRT memory object.
689 */
690 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
691 if (pMemDarwin)
692 {
693 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
694 *ppMem = &pMemDarwin->Core;
695 return VINF_SUCCESS;
696 }
697
698 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
699 Assert(kr == KERN_SUCCESS);
700 rc = VERR_NO_MEMORY;
701 }
702
703#else
704
705 /*
706 * Create a descriptor and try lock it (prepare).
707 */
708 int rc = VERR_MEMOBJ_INIT_FAILED;
709 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
710 if (pMemDesc)
711 {
712 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
713 if (IORet == kIOReturnSuccess)
714 {
715 /*
716 * Create the IPRT memory object.
717 */
718 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
719 if (pMemDarwin)
720 {
721 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
722 pMemDarwin->pMemDesc = pMemDesc;
723 *ppMem = &pMemDarwin->Core;
724 return VINF_SUCCESS;
725 }
726
727 pMemDesc->complete();
728 rc = VERR_NO_MEMORY;
729 }
730 else
731 rc = VERR_LOCK_FAILED;
732 pMemDesc->release();
733 }
734#endif
735 return rc;
736}
737
738
739int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
740{
741 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process);
742}
743
744
745int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
746{
747 return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task);
748}
749
750
751int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
752{
753 return VERR_NOT_IMPLEMENTED;
754}
755
756
757int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
758{
759 return VERR_NOT_IMPLEMENTED;
760}
761
762
763int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
764 unsigned fProt, size_t offSub, size_t cbSub)
765{
766 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
767
768 /*
769 * Check that the specified alignment is supported.
770 */
771 if (uAlignment > PAGE_SIZE)
772 return VERR_NOT_SUPPORTED;
773
774 /*
775 * Must have a memory descriptor that we can map.
776 */
777 int rc = VERR_INVALID_PARAMETER;
778 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
779 if (pMemToMapDarwin->pMemDesc)
780 {
781#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
782 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
783 0,
784 kIOMapAnywhere | kIOMapDefaultCache,
785 offSub,
786 cbSub);
787#else
788 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task,
789 0,
790 kIOMapAnywhere | kIOMapDefaultCache,
791 offSub,
792 cbSub);
793#endif
794 if (pMemMap)
795 {
796 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
797 void *pv = (void *)(uintptr_t)VirtAddr;
798 if ((uintptr_t)pv == VirtAddr)
799 {
800 //addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
801 //printf("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
802
803// /*
804// * Explicitly lock it so that we're sure it is present and that
805// * its PTEs cannot be recycled.
806// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
807// * to the options which causes prepare() to not wire the pages.
808// * This is probably a bug.
809// */
810// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
811// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
812// 1 /* count */,
813// 0 /* offset */,
814// kernel_task,
815// kIODirectionInOut | kIOMemoryTypeVirtual,
816// kIOMapperSystem);
817// if (pMemDesc)
818// {
819// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
820// if (IORet == kIOReturnSuccess)
821// {
822 /* HACK ALERT! */
823 rtR0MemObjDarwinTouchPages(pv, cbSub);
824 /** @todo First, the memory should've been mapped by now, and second, it
825 * shouild have the wired attribute in the PTE (bit 9). Neither is
826 * seems to be the case. The disabled locking code doesn't make any
827 * difference, which is extremely odd, and breaks
828 * rtR0MemObjNativeGetPagePhysAddr (getPhysicalSegment64 -> 64 for the
829 * lock descriptor. */
830 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
831 //printf("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr, 2);
832
833 /*
834 * Create the IPRT memory object.
835 */
836 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
837 pv, cbSub);
838 if (pMemDarwin)
839 {
840 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
841 pMemDarwin->pMemMap = pMemMap;
842// pMemDarwin->pMemDesc = pMemDesc;
843 *ppMem = &pMemDarwin->Core;
844 return VINF_SUCCESS;
845 }
846
847// pMemDesc->complete();
848// rc = VERR_NO_MEMORY;
849// }
850// else
851// rc = RTErrConvertFromDarwinIO(IORet);
852// pMemDesc->release();
853// }
854// else
855// rc = VERR_MEMOBJ_INIT_FAILED;
856 }
857 else
858 rc = VERR_ADDRESS_TOO_BIG;
859 pMemMap->release();
860 }
861 else
862 rc = VERR_MAP_FAILED;
863 }
864 return rc;
865}
866
867
868int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
869{
870 /*
871 * Check for unsupported things.
872 */
873 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
874 if (uAlignment > PAGE_SIZE)
875 return VERR_NOT_SUPPORTED;
876
877 /*
878 * Must have a memory descriptor.
879 */
880 int rc = VERR_INVALID_PARAMETER;
881 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
882 if (pMemToMapDarwin->pMemDesc)
883 {
884#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
885 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
886 0,
887 kIOMapAnywhere | kIOMapDefaultCache,
888 0 /* offset */,
889 0 /* length */);
890#else
891 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process,
892 0,
893 kIOMapAnywhere | kIOMapDefaultCache);
894#endif
895 if (pMemMap)
896 {
897 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
898 void *pv = (void *)(uintptr_t)VirtAddr;
899 if ((uintptr_t)pv == VirtAddr)
900 {
901 /*
902 * Create the IPRT memory object.
903 */
904 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
905 pv, pMemToMapDarwin->Core.cb);
906 if (pMemDarwin)
907 {
908 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
909 pMemDarwin->pMemMap = pMemMap;
910 *ppMem = &pMemDarwin->Core;
911 return VINF_SUCCESS;
912 }
913
914 rc = VERR_NO_MEMORY;
915 }
916 else
917 rc = VERR_ADDRESS_TOO_BIG;
918 pMemMap->release();
919 }
920 else
921 rc = VERR_MAP_FAILED;
922 }
923 return rc;
924}
925
926
927int rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
928{
929 /* Get the map for the object. */
930 vm_map_t pVmMap = rtR0MemObjDarwinGetMap(pMem);
931 if (!pVmMap)
932 return VERR_NOT_SUPPORTED;
933
934 /* Convert the protection. */
935 vm_prot_t fMachProt;
936 switch (fProt)
937 {
938 case RTMEM_PROT_NONE:
939 fMachProt = VM_PROT_NONE;
940 break;
941 case RTMEM_PROT_READ:
942 fMachProt = VM_PROT_READ;
943 break;
944 case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
945 fMachProt = VM_PROT_READ | VM_PROT_WRITE;
946 break;
947 case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
948 fMachProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
949 break;
950 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
951 fMachProt = VM_PROT_WRITE | VM_PROT_EXECUTE;
952 break;
953 case RTMEM_PROT_EXEC:
954 fMachProt = VM_PROT_EXECUTE;
955 break;
956 default:
957 AssertFailedReturn(VERR_INVALID_PARAMETER);
958 }
959
960 /* do the job. */
961 vm_offset_t Start = (uintptr_t)pMem->pv + offSub;
962 kern_return_t krc = vm_protect(pVmMap,
963 Start,
964 cbSub,
965 false,
966 fMachProt);
967 if (krc != KERN_SUCCESS)
968 return RTErrConvertFromDarwinKern(krc);
969 return VINF_SUCCESS;
970}
971
972
973RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
974{
975 RTHCPHYS PhysAddr;
976 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
977
978#ifdef USE_VM_MAP_WIRE
979 /*
980 * Locked memory doesn't have a memory descriptor and
981 * needs to be handled differently.
982 */
983 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
984 {
985 ppnum_t PgNo;
986 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
987 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
988 else
989 {
990 /*
991 * From what I can tell, Apple seems to have locked up the all the
992 * available interfaces that could help us obtain the pmap_t of a task
993 * or vm_map_t.
994
995 * So, we'll have to figure out where in the vm_map_t structure it is
996 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
997 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
998 * Not nice, but it will hopefully do the job in a reliable manner...
999 *
1000 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
1001 */
1002 static int s_offPmap = -1;
1003 if (RT_UNLIKELY(s_offPmap == -1))
1004 {
1005 pmap_t const *p = (pmap_t *)kernel_map;
1006 pmap_t const * const pEnd = p + 64;
1007 for (; p < pEnd; p++)
1008 if (*p == kernel_pmap)
1009 {
1010 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
1011 break;
1012 }
1013 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
1014 }
1015 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
1016 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1017 }
1018
1019 AssertReturn(PgNo, NIL_RTHCPHYS);
1020 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
1021 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
1022 }
1023 else
1024#endif /* USE_VM_MAP_WIRE */
1025 {
1026 /*
1027 * Get the memory descriptor.
1028 */
1029 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
1030 if (!pMemDesc)
1031 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
1032 AssertReturn(pMemDesc, NIL_RTHCPHYS);
1033
1034 /*
1035 * If we've got a memory descriptor, use getPhysicalSegment64().
1036 */
1037#ifdef __LP64__ /* Grumble! */
1038 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL);
1039#else
1040 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
1041#endif
1042 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
1043 PhysAddr = Addr;
1044 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
1045 }
1046
1047 return PhysAddr;
1048}
1049
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette