VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp@ 94155

Last change on this file since 94155 was 93115, checked in by vboxsync, 3 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 60.5 KB
Line 
1/* $Id: memobj-r0drv-darwin.cpp 93115 2022-01-01 11:31:46Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Darwin.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define RTMEM_NO_WRAP_TO_EF_APIS /* circular dependency otherwise. */
32#include "the-darwin-kernel.h"
33#include "internal/iprt.h"
34#include <iprt/memobj.h>
35
36#include <iprt/asm.h>
37#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
38# include <iprt/x86.h>
39# include <iprt/asm-amd64-x86.h>
40#endif
41#include <iprt/assert.h>
42#include <iprt/log.h>
43#include <iprt/mem.h>
44#include <iprt/param.h>
45#include <iprt/process.h>
46#include <iprt/semaphore.h>
47#include <iprt/string.h>
48#include <iprt/thread.h>
49#include "internal/memobj.h"
50
51
52/*********************************************************************************************************************************
53* Defined Constants And Macros *
54*********************************************************************************************************************************/
55#define MY_PRINTF(...) do { printf(__VA_ARGS__); kprintf(__VA_ARGS__); } while (0)
56
57/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
58
59
60/*********************************************************************************************************************************
61* Structures and Typedefs *
62*********************************************************************************************************************************/
63/**
64 * The Darwin version of the memory object structure.
65 */
66typedef struct RTR0MEMOBJDARWIN
67{
68 /** The core structure. */
69 RTR0MEMOBJINTERNAL Core;
70 /** Pointer to the memory descriptor created for allocated and locked memory. */
71 IOMemoryDescriptor *pMemDesc;
72 /** Pointer to the memory mapping object for mapped memory. */
73 IOMemoryMap *pMemMap;
74} RTR0MEMOBJDARWIN, *PRTR0MEMOBJDARWIN;
75
76/**
77 * Common thread_call_allocate/thread_call_enter argument package.
78 */
79typedef struct RTR0MEMOBJDARWINTHREADARGS
80{
81 int32_t volatile rc;
82 RTSEMEVENTMULTI hEvent;
83} RTR0MEMOBJDARWINTHREADARGS;
84
85
86/**
87 * Arguments for rtR0MemObjNativeAllockWorkOnKernelThread.
88 */
89typedef struct RTR0MEMOBJDARWINALLOCARGS
90{
91 RTR0MEMOBJDARWINTHREADARGS Core;
92 PPRTR0MEMOBJINTERNAL ppMem;
93 size_t cb;
94 bool fExecutable;
95 bool fContiguous;
96 mach_vm_address_t PhysMask;
97 uint64_t MaxPhysAddr;
98 RTR0MEMOBJTYPE enmType;
99 size_t uAlignment;
100 const char *pszTag;
101} RTR0MEMOBJDARWINALLOCARGS;
102
103/**
104 * Arguments for rtR0MemObjNativeProtectWorkOnKernelThread.
105 */
106typedef struct RTR0MEMOBJDARWINPROTECTARGS
107{
108 RTR0MEMOBJDARWINTHREADARGS Core;
109 PRTR0MEMOBJINTERNAL pMem;
110 size_t offSub;
111 size_t cbSub;
112 uint32_t fProt;
113} RTR0MEMOBJDARWINPROTECTARGS;
114
115
116/*********************************************************************************************************************************
117* Internal Functions *
118*********************************************************************************************************************************/
119static void rtR0MemObjNativeAllockWorkerOnKernelThread(void *pvUser0, void *pvUser1);
120static int rtR0MemObjNativeProtectWorker(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt);
121static void rtR0MemObjNativeProtectWorkerOnKernelThread(void *pvUser0, void *pvUser1);
122
123
124/**
125 * Touch the pages to force the kernel to create or write-enable the page table
126 * entries.
127 *
128 * This is necessary since the kernel gets upset if we take a page fault when
129 * preemption is disabled and/or we own a simple lock (same thing). It has no
130 * problems with us disabling interrupts when taking the traps, weird stuff.
131 *
132 * (This is basically a way of invoking vm_fault on a range of pages.)
133 *
134 * @param pv Pointer to the first page.
135 * @param cb The number of bytes.
136 */
137static void rtR0MemObjDarwinTouchPages(void *pv, size_t cb)
138{
139 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
140 for (;;)
141 {
142 ASMAtomicCmpXchgU32(pu32, 0xdeadbeef, 0xdeadbeef);
143 if (cb <= PAGE_SIZE)
144 break;
145 cb -= PAGE_SIZE;
146 pu32 += PAGE_SIZE / sizeof(uint32_t);
147 }
148}
149
150
151/**
152 * Read (sniff) every page in the range to make sure there are some page tables
153 * entries backing it.
154 *
155 * This is just to be sure vm_protect didn't remove stuff without re-adding it
156 * if someone should try write-protect something.
157 *
158 * @param pv Pointer to the first page.
159 * @param cb The number of bytes.
160 */
161static void rtR0MemObjDarwinSniffPages(void const *pv, size_t cb)
162{
163 uint32_t volatile *pu32 = (uint32_t volatile *)pv;
164 uint32_t volatile u32Counter = 0;
165 for (;;)
166 {
167 u32Counter += *pu32;
168
169 if (cb <= PAGE_SIZE)
170 break;
171 cb -= PAGE_SIZE;
172 pu32 += PAGE_SIZE / sizeof(uint32_t);
173 }
174}
175
176
177/**
178 * Gets the virtual memory map the specified object is mapped into.
179 *
180 * @returns VM map handle on success, NULL if no map.
181 * @param pMem The memory object.
182 */
183DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
184{
185 switch (pMem->enmType)
186 {
187 case RTR0MEMOBJTYPE_PAGE:
188 case RTR0MEMOBJTYPE_LOW:
189 case RTR0MEMOBJTYPE_CONT:
190 return kernel_map;
191
192 case RTR0MEMOBJTYPE_PHYS:
193 case RTR0MEMOBJTYPE_PHYS_NC:
194 if (pMem->pv)
195 return kernel_map;
196 return NULL;
197
198 case RTR0MEMOBJTYPE_LOCK:
199 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
200 ? kernel_map
201 : get_task_map((task_t)pMem->u.Lock.R0Process);
202
203 case RTR0MEMOBJTYPE_RES_VIRT:
204 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
205 ? kernel_map
206 : get_task_map((task_t)pMem->u.ResVirt.R0Process);
207
208 case RTR0MEMOBJTYPE_MAPPING:
209 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
210 ? kernel_map
211 : get_task_map((task_t)pMem->u.Mapping.R0Process);
212
213 default:
214 return NULL;
215 }
216}
217
218#if 0 /* not necessary after all*/
219/* My vm_map mockup. */
220struct my_vm_map
221{
222 struct { char pad[8]; } lock;
223 struct my_vm_map_header
224 {
225 struct vm_map_links
226 {
227 void *prev;
228 void *next;
229 vm_map_offset_t start;
230 vm_map_offset_t end;
231 } links;
232 int nentries;
233 boolean_t entries_pageable;
234 } hdr;
235 pmap_t pmap;
236 vm_map_size_t size;
237};
238
239
240/**
241 * Gets the minimum map address, this is similar to get_map_min.
242 *
243 * @returns The start address of the map.
244 * @param pMap The map.
245 */
246static vm_map_offset_t rtR0MemObjDarwinGetMapMin(vm_map_t pMap)
247{
248 /* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
249 static int32_t volatile s_offAdjust = INT32_MAX;
250 int32_t off = s_offAdjust;
251 if (off == INT32_MAX)
252 {
253 for (off = 0; ; off += sizeof(pmap_t))
254 {
255 if (*(pmap_t *)((uint8_t *)kernel_map + off) == kernel_pmap)
256 break;
257 AssertReturn(off <= RT_MAX(RT_OFFSETOF(struct my_vm_map, pmap) * 4, 1024), 0x1000);
258 }
259 ASMAtomicWriteS32(&s_offAdjust, off - RT_OFFSETOF(struct my_vm_map, pmap));
260 }
261
262 /* calculate it. */
263 struct my_vm_map *pMyMap = (struct my_vm_map *)((uint8_t *)pMap + off);
264 return pMyMap->hdr.links.start;
265}
266#endif /* unused */
267
268#ifdef RT_STRICT
269# if 0 /* unused */
270
271/**
272 * Read from a physical page.
273 *
274 * @param HCPhys The address to start reading at.
275 * @param cb How many bytes to read.
276 * @param pvDst Where to put the bytes. This is zero'd on failure.
277 */
278static void rtR0MemObjDarwinReadPhys(RTHCPHYS HCPhys, size_t cb, void *pvDst)
279{
280 memset(pvDst, '\0', cb);
281
282 IOAddressRange aRanges[1] = { { (mach_vm_address_t)HCPhys, RT_ALIGN_Z(cb, PAGE_SIZE) } };
283 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
284 kIODirectionIn, NULL /*task*/);
285 if (pMemDesc)
286 {
287#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
288 IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
289#else
290 IOMemoryMap *pMemMap = pMemDesc->map(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
291#endif
292 if (pMemMap)
293 {
294 void const *pvSrc = (void const *)(uintptr_t)pMemMap->getVirtualAddress();
295 memcpy(pvDst, pvSrc, cb);
296 pMemMap->release();
297 }
298 else
299 MY_PRINTF("rtR0MemObjDarwinReadPhys: createMappingInTask failed; HCPhys=%llx\n", HCPhys);
300
301 pMemDesc->release();
302 }
303 else
304 MY_PRINTF("rtR0MemObjDarwinReadPhys: withAddressRanges failed; HCPhys=%llx\n", HCPhys);
305}
306
307
308/**
309 * Gets the PTE for a page.
310 *
311 * @returns the PTE.
312 * @param pvPage The virtual address to get the PTE for.
313 */
314static uint64_t rtR0MemObjDarwinGetPTE(void *pvPage)
315{
316 RTUINT64U u64;
317 RTCCUINTREG cr3 = ASMGetCR3();
318 RTCCUINTREG cr4 = ASMGetCR4();
319 bool fPAE = false;
320 bool fLMA = false;
321 if (cr4 & X86_CR4_PAE)
322 {
323 fPAE = true;
324 uint32_t fExtFeatures = ASMCpuId_EDX(0x80000001);
325 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
326 {
327 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
328 if (efer & MSR_K6_EFER_LMA)
329 fLMA = true;
330 }
331 }
332
333 if (fLMA)
334 {
335 /* PML4 */
336 rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> X86_PML4_SHIFT) & X86_PML4_MASK) * 8, 8, &u64);
337 if (!(u64.u & X86_PML4E_P))
338 {
339 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PML4E !p\n", pvPage);
340 return 0;
341 }
342
343 /* PDPTR */
344 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64) * 8, 8, &u64);
345 if (!(u64.u & X86_PDPE_P))
346 {
347 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDPTE !p\n", pvPage);
348 return 0;
349 }
350 if (u64.u & X86_PDPE_LM_PS)
351 return (u64.u & ~(uint64_t)(_1G -1)) | ((uintptr_t)pvPage & (_1G -1));
352
353 /* PD */
354 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
355 if (!(u64.u & X86_PDE_P))
356 {
357 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PDE !p\n", pvPage);
358 return 0;
359 }
360 if (u64.u & X86_PDE_PS)
361 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
362
363 /* PT */
364 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
365 if (!(u64.u & X86_PTE_P))
366 {
367 MY_PRINTF("rtR0MemObjDarwinGetPTE: %p -> PTE !p\n", pvPage);
368 return 0;
369 }
370 return u64.u;
371 }
372
373 if (fPAE)
374 {
375 /* PDPTR */
376 rtR0MemObjDarwinReadPhys((u64.u & X86_CR3_PAE_PAGE_MASK) | (((uintptr_t)pvPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE) * 8, 8, &u64);
377 if (!(u64.u & X86_PDE_P))
378 return 0;
379
380 /* PD */
381 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK) * 8, 8, &u64);
382 if (!(u64.u & X86_PDE_P))
383 return 0;
384 if (u64.u & X86_PDE_PS)
385 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
386
387 /* PT */
388 rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK) * 8, 8, &u64);
389 if (!(u64.u & X86_PTE_P))
390 return 0;
391 return u64.u;
392 }
393
394 /* PD */
395 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PD_SHIFT) & X86_PD_MASK) * 4, 4, &u64);
396 if (!(u64.au32[0] & X86_PDE_P))
397 return 0;
398 if (u64.au32[0] & X86_PDE_PS)
399 return (u64.u & ~(uint64_t)(_2M -1)) | ((uintptr_t)pvPage & (_2M -1));
400
401 /* PT */
402 rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> X86_PT_SHIFT) & X86_PT_MASK) * 4, 4, &u64);
403 if (!(u64.au32[0] & X86_PTE_P))
404 return 0;
405 return u64.au32[0];
406
407 return 0;
408}
409
410# endif /* unused */
411#endif /* RT_STRICT */
412
413DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
414{
415 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
416 IPRT_DARWIN_SAVE_EFL_AC();
417
418 /*
419 * Release the IOMemoryDescriptor or/and IOMemoryMap associated with the object.
420 */
421 if (pMemDarwin->pMemDesc)
422 {
423 pMemDarwin->pMemDesc->complete();
424 pMemDarwin->pMemDesc->release();
425 pMemDarwin->pMemDesc = NULL;
426 }
427
428 if (pMemDarwin->pMemMap)
429 {
430 pMemDarwin->pMemMap->release();
431 pMemDarwin->pMemMap = NULL;
432 }
433
434 /*
435 * Release any memory that we've allocated or locked.
436 */
437 switch (pMemDarwin->Core.enmType)
438 {
439 case RTR0MEMOBJTYPE_LOW:
440 case RTR0MEMOBJTYPE_PAGE:
441 case RTR0MEMOBJTYPE_CONT:
442 break;
443
444 case RTR0MEMOBJTYPE_LOCK:
445 {
446#ifdef USE_VM_MAP_WIRE
447 vm_map_t Map = pMemDarwin->Core.u.Lock.R0Process != NIL_RTR0PROCESS
448 ? get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process)
449 : kernel_map;
450 kern_return_t kr = vm_map_unwire(Map,
451 (vm_map_offset_t)pMemDarwin->Core.pv,
452 (vm_map_offset_t)pMemDarwin->Core.pv + pMemDarwin->Core.cb,
453 0 /* not user */);
454 AssertRC(kr == KERN_SUCCESS); /** @todo don't ignore... */
455#endif
456 break;
457 }
458
459 case RTR0MEMOBJTYPE_PHYS:
460 /*if (pMemDarwin->Core.u.Phys.fAllocated)
461 IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
462 Assert(!pMemDarwin->Core.u.Phys.fAllocated);
463 break;
464
465 case RTR0MEMOBJTYPE_PHYS_NC:
466 AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
467 IPRT_DARWIN_RESTORE_EFL_AC();
468 return VERR_INTERNAL_ERROR;
469
470 case RTR0MEMOBJTYPE_RES_VIRT:
471 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
472 IPRT_DARWIN_RESTORE_EFL_AC();
473 return VERR_INTERNAL_ERROR;
474
475 case RTR0MEMOBJTYPE_MAPPING:
476 /* nothing to do here. */
477 break;
478
479 default:
480 AssertMsgFailed(("enmType=%d\n", pMemDarwin->Core.enmType));
481 IPRT_DARWIN_RESTORE_EFL_AC();
482 return VERR_INTERNAL_ERROR;
483 }
484
485 IPRT_DARWIN_RESTORE_EFL_AC();
486 return VINF_SUCCESS;
487}
488
489
490/**
491 * This is a helper function to executes @a pfnWorker in the context of the
492 * kernel_task
493 *
494 * @returns IPRT status code - result from pfnWorker or dispatching error.
495 * @param pfnWorker The function to call.
496 * @param pArgs The arguments to pass to the function.
497 */
498static int rtR0MemObjDarwinDoInKernelTaskThread(thread_call_func_t pfnWorker, RTR0MEMOBJDARWINTHREADARGS *pArgs)
499{
500 pArgs->rc = VERR_IPE_UNINITIALIZED_STATUS;
501 pArgs->hEvent = NIL_RTSEMEVENTMULTI;
502 int rc = RTSemEventMultiCreate(&pArgs->hEvent);
503 if (RT_SUCCESS(rc))
504 {
505 thread_call_t hCall = thread_call_allocate(pfnWorker, (void *)pArgs);
506 if (hCall)
507 {
508 boolean_t fRc = thread_call_enter(hCall);
509 AssertLogRel(fRc == FALSE);
510
511 rc = RTSemEventMultiWaitEx(pArgs->hEvent, RTSEMWAIT_FLAGS_INDEFINITE | RTSEMWAIT_FLAGS_UNINTERRUPTIBLE,
512 RT_INDEFINITE_WAIT);
513 AssertLogRelRC(rc);
514
515 rc = pArgs->rc;
516 thread_call_free(hCall);
517 }
518 else
519 rc = VERR_NO_MEMORY;
520 RTSemEventMultiDestroy(pArgs->hEvent);
521 }
522 return rc;
523}
524
525
526/**
527 * Signals result to thread waiting in rtR0MemObjDarwinDoInKernelTaskThread.
528 *
529 * @param pArgs The argument structure.
530 * @param rc The IPRT status code to signal.
531 */
532static void rtR0MemObjDarwinSignalThreadWaitinOnTask(RTR0MEMOBJDARWINTHREADARGS volatile *pArgs, int rc)
533{
534 if (ASMAtomicCmpXchgS32(&pArgs->rc, rc, VERR_IPE_UNINITIALIZED_STATUS))
535 {
536 rc = RTSemEventMultiSignal(pArgs->hEvent);
537 AssertLogRelRC(rc);
538 }
539}
540
541
542/**
543 * Kernel memory alloc worker that uses inTaskWithPhysicalMask.
544 *
545 * @returns IPRT status code.
546 * @retval VERR_ADDRESS_TOO_BIG try another way.
547 *
548 * @param ppMem Where to return the memory object.
549 * @param cb The page aligned memory size.
550 * @param fExecutable Whether the mapping needs to be executable.
551 * @param fContiguous Whether the backing memory needs to be contiguous.
552 * @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
553 * you don't care that much or is speculating.
554 * @param MaxPhysAddr The max address to verify the result against. Use
555 * UINT64_MAX if it doesn't matter.
556 * @param enmType The object type.
557 * @param uAlignment The allocation alignment (in bytes).
558 * @param pszTag Allocation tag used for statistics and such.
559 * @param fOnKernelThread Set if we're already on the kernel thread.
560 */
561static int rtR0MemObjNativeAllocWorker(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
562 bool fExecutable, bool fContiguous,
563 mach_vm_address_t PhysMask, uint64_t MaxPhysAddr,
564 RTR0MEMOBJTYPE enmType, size_t uAlignment, const char *pszTag, bool fOnKernelThread)
565{
566 int rc;
567
568 /*
569 * Because of process code signing properties leaking into kernel space in
570 * in XNU's vm_fault.c code, we have to defer allocations of exec memory to
571 * a thread running in the kernel_task to get consistent results here.
572 *
573 * Trouble strikes in vm_fault_enter() when cs_enforcement_enabled is determined
574 * to be true because current process has the CS_ENFORCEMENT flag, the page flag
575 * vmp_cs_validated is clear, and the protection mask includes VM_PROT_EXECUTE
576 * (pmap_cs_enforced does not apply to macOS it seems). This test seems to go
577 * back to 10.5, though I'm not sure whether it's enabled for macOS that early
578 * on. Only VM_PROT_EXECUTE is problematic for kernel memory, (though
579 * VM_PROT_WRITE on code signed pages is also problematic in theory). As long as
580 * kernel_task doesn't have CS_ENFORCEMENT enabled, we'll be fine switching to it.
581 */
582 if (!fExecutable || fOnKernelThread)
583 { /* likely */ }
584 else
585 {
586 RTR0MEMOBJDARWINALLOCARGS Args;
587 Args.ppMem = ppMem;
588 Args.cb = cb;
589 Args.fExecutable = fExecutable;
590 Args.fContiguous = fContiguous;
591 Args.PhysMask = PhysMask;
592 Args.MaxPhysAddr = MaxPhysAddr;
593 Args.enmType = enmType;
594 Args.uAlignment = uAlignment;
595 Args.pszTag = pszTag;
596 return rtR0MemObjDarwinDoInKernelTaskThread(rtR0MemObjNativeAllockWorkerOnKernelThread, &Args.Core);
597 }
598
599 /*
600 * Try inTaskWithPhysicalMask first, but since we don't quite trust that it
601 * actually respects the physical memory mask (10.5.x is certainly busted),
602 * we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
603 *
604 * The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
605 *
606 * The kIOMemoryMapperNone flag is required since 10.8.2 (IOMMU changes?).
607 */
608
609 /* This is an old fudge from the snow leoard days: "Is it only on snow leopard?
610 Seen allocating memory for the VM structure, last page corrupted or
611 inaccessible." Made it only apply to snow leopard and older for now. */
612 size_t cbFudged = cb;
613 if (version_major >= 11 /* 10 = 10.7.x = Lion. */)
614 { /* likely */ }
615 else
616 cbFudged += PAGE_SIZE;
617
618 IOOptionBits fOptions = kIOMemoryKernelUserShared | kIODirectionInOut;
619 if (fContiguous)
620 {
621 fOptions |= kIOMemoryPhysicallyContiguous;
622 if ( version_major > 12
623 || (version_major == 12 && version_minor >= 2) /* 10.8.2 = Mountain Kitten */ )
624 fOptions |= kIOMemoryHostPhysicallyContiguous; /* (Just to make ourselves clear, in case the xnu code changes.) */
625 }
626 if (version_major >= 12 /* 12 = 10.8.x = Mountain Kitten */)
627 fOptions |= kIOMemoryMapperNone;
628
629#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 && 0 /* enable when/if necessary */
630 /* Paranoia: Don't misrepresent our intentions, we won't map kernel executable memory into ring-0. */
631 if (fExecutable && version_major >= 11 /* 10.7.x = Lion, as below */)
632 {
633 fOptions &= ~kIOMemoryKernelUserShared;
634 if (uAlignment < PAGE_SIZE)
635 uAlignment = PAGE_SIZE;
636 }
637#endif
638
639 /* The public initWithPhysicalMask virtual method appeared in 10.7.0, in
640 versions 10.5.0 up to 10.7.0 it was private, and 10.4.8-10.5.0 it was
641 x86 only and didn't have the alignment parameter (slot was different too). */
642 uint64_t uAlignmentActual = uAlignment;
643 IOBufferMemoryDescriptor *pMemDesc;
644#if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
645 if (version_major >= 11 /* 11 = 10.7.x = Lion, could probably allow 10.5.0+ here if we really wanted to. */)
646 {
647 /* Starting with 10.6.x the physical mask is ignored if alignment is higher
648 than 1. The assumption seems to be that inTaskWithPhysicalMask() should
649 be used and the alignment inferred from the PhysMask argument. */
650 if (MaxPhysAddr != UINT64_MAX)
651 {
652 Assert(RT_ALIGN_64(PhysMask, uAlignment) == PhysMask);
653 uAlignmentActual = 1;
654 }
655
656 pMemDesc = new IOBufferMemoryDescriptor;
657 if (pMemDesc)
658 {
659 if (pMemDesc->initWithPhysicalMask(kernel_task, fOptions, cbFudged, uAlignmentActual, PhysMask))
660 { /* likely */ }
661 else
662 {
663 pMemDesc->release();
664 pMemDesc = NULL;
665 }
666 }
667 }
668 else
669#endif
670 pMemDesc = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, fOptions, cbFudged, PhysMask);
671 if (pMemDesc)
672 {
673 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
674 if (IORet == kIOReturnSuccess)
675 {
676 void *pv = pMemDesc->getBytesNoCopy(0, cbFudged);
677 if (pv)
678 {
679 /*
680 * Check if it's all below 4GB.
681 */
682 addr64_t AddrPrev = 0;
683 MaxPhysAddr &= ~(uint64_t)PAGE_OFFSET_MASK;
684 for (IOByteCount off = 0; off < cb; off += PAGE_SIZE)
685 {
686#ifdef __LP64__
687 addr64_t Addr = pMemDesc->getPhysicalSegment(off, NULL, kIOMemoryMapperNone);
688#else
689 addr64_t Addr = pMemDesc->getPhysicalSegment64(off, NULL);
690#endif
691 if ( Addr > MaxPhysAddr
692 || !Addr
693 || (Addr & PAGE_OFFSET_MASK)
694 || ( fContiguous
695 && !off
696 && Addr == AddrPrev + PAGE_SIZE))
697 {
698 /* Buggy API, try allocate the memory another way. */
699 pMemDesc->complete();
700 pMemDesc->release();
701 if (PhysMask)
702 {
703 kprintf("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x - buggy API!\n",
704 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions);
705 LogRel(("rtR0MemObjNativeAllocWorker: off=%zx Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x - buggy API!\n",
706 (size_t)off, Addr, AddrPrev, MaxPhysAddr, PhysMask, fContiguous, fOptions));
707 }
708 return VERR_ADDRESS_TOO_BIG;
709 }
710 AddrPrev = Addr;
711 }
712
713 /*
714 * Check that it's aligned correctly.
715 */
716 if ((uintptr_t)pv & (uAlignment - 1))
717 {
718 pMemDesc->complete();
719 pMemDesc->release();
720 if (PhysMask)
721 {
722 kprintf("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%d fOptions=%#x) - buggy API!!\n",
723 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions);
724 LogRel(("rtR0MemObjNativeAllocWorker: pv=%p uAlignment=%#zx (MaxPhysAddr=%llx PhysMas=%llx fContiguous=%RTbool fOptions=%#x) - buggy API!\n",
725 pv, uAlignment, MaxPhysAddr, PhysMask, fContiguous, fOptions));
726 }
727 return VERR_NOT_SUPPORTED;
728 }
729
730#ifdef RT_STRICT
731 /* check that the memory is actually mapped. */
732 //addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
733 //printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
734 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
735 RTThreadPreemptDisable(&State);
736 rtR0MemObjDarwinTouchPages(pv, cb);
737 RTThreadPreemptRestore(&State);
738#endif
739
740 /*
741 * Create the IPRT memory object.
742 */
743 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb, pszTag);
744 if (pMemDarwin)
745 {
746 if (fOptions & kIOMemoryKernelUserShared)
747 pMemDarwin->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
748 else
749 pMemDarwin->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
750 if (fContiguous)
751 {
752#ifdef __LP64__
753 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone);
754#else
755 addr64_t PhysBase64 = pMemDesc->getPhysicalSegment64(0, NULL);
756#endif
757 RTHCPHYS PhysBase = PhysBase64; Assert(PhysBase == PhysBase64);
758 if (enmType == RTR0MEMOBJTYPE_CONT)
759 pMemDarwin->Core.u.Cont.Phys = PhysBase;
760 else if (enmType == RTR0MEMOBJTYPE_PHYS)
761 pMemDarwin->Core.u.Phys.PhysBase = PhysBase;
762 else
763 AssertMsgFailed(("enmType=%d\n", enmType));
764 }
765
766 if (fExecutable)
767 {
768 rc = rtR0MemObjNativeProtectWorker(&pMemDarwin->Core, 0, cb,
769 RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
770#ifdef RT_STRICT
771 if (RT_SUCCESS(rc))
772 {
773 /* check that the memory is actually mapped. */
774 RTTHREADPREEMPTSTATE State2 = RTTHREADPREEMPTSTATE_INITIALIZER;
775 RTThreadPreemptDisable(&State2);
776 rtR0MemObjDarwinTouchPages(pv, cb);
777 RTThreadPreemptRestore(&State2);
778 }
779#endif
780 /* Bug 6226: Ignore KERN_PROTECTION_FAILURE on Leopard and older. */
781 if ( rc == VERR_PERMISSION_DENIED
782 && version_major <= 10 /* 10 = 10.6.x = Snow Leopard. */)
783 rc = VINF_SUCCESS;
784 }
785 else
786 rc = VINF_SUCCESS;
787 if (RT_SUCCESS(rc))
788 {
789 pMemDarwin->pMemDesc = pMemDesc;
790 *ppMem = &pMemDarwin->Core;
791 return VINF_SUCCESS;
792 }
793
794 rtR0MemObjDelete(&pMemDarwin->Core);
795 }
796
797 if (enmType == RTR0MEMOBJTYPE_PHYS_NC)
798 rc = VERR_NO_PHYS_MEMORY;
799 else if (enmType == RTR0MEMOBJTYPE_LOW)
800 rc = VERR_NO_LOW_MEMORY;
801 else if (enmType == RTR0MEMOBJTYPE_CONT)
802 rc = VERR_NO_CONT_MEMORY;
803 else
804 rc = VERR_NO_MEMORY;
805 }
806 else
807 rc = VERR_MEMOBJ_INIT_FAILED;
808
809 pMemDesc->complete();
810 }
811 else
812 rc = RTErrConvertFromDarwinIO(IORet);
813 pMemDesc->release();
814 }
815 else
816 rc = VERR_MEMOBJ_INIT_FAILED;
817 Assert(rc != VERR_ADDRESS_TOO_BIG);
818 return rc;
819}
820
821
822/**
823 * rtR0MemObjNativeAllocWorker kernel_task wrapper function.
824 */
825static void rtR0MemObjNativeAllockWorkerOnKernelThread(void *pvUser0, void *pvUser1)
826{
827 AssertPtr(pvUser0); Assert(pvUser1 == NULL); NOREF(pvUser1);
828 RTR0MEMOBJDARWINALLOCARGS volatile *pArgs = (RTR0MEMOBJDARWINALLOCARGS volatile *)pvUser0;
829 int rc = rtR0MemObjNativeAllocWorker(pArgs->ppMem, pArgs->cb, pArgs->fExecutable, pArgs->fContiguous, pArgs->PhysMask,
830 pArgs->MaxPhysAddr, pArgs->enmType, pArgs->uAlignment, pArgs->pszTag,
831 true /*fOnKernelThread*/);
832 rtR0MemObjDarwinSignalThreadWaitinOnTask(&pArgs->Core, rc);
833}
834
835
836DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
837{
838 IPRT_DARWIN_SAVE_EFL_AC();
839
840 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, 0 /* PhysMask */, UINT64_MAX,
841 RTR0MEMOBJTYPE_PAGE, PAGE_SIZE, pszTag, false /*fOnKernelThread*/);
842
843 IPRT_DARWIN_RESTORE_EFL_AC();
844 return rc;
845}
846
847
848DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
849 const char *pszTag)
850{
851 return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
852}
853
854
855DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
856{
857 IPRT_DARWIN_SAVE_EFL_AC();
858
859 /*
860 * Try IOMallocPhysical/IOMallocAligned first.
861 * Then try optimistically without a physical address mask, which will always
862 * end up using IOMallocAligned.
863 *
864 * (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
865 */
866 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, ~(uint32_t)PAGE_OFFSET_MASK,
867 _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE, pszTag, false /*fOnKernelThread*/);
868 if (rc == VERR_ADDRESS_TOO_BIG)
869 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, false /* fContiguous */, 0 /* PhysMask */,
870 _4G - PAGE_SIZE, RTR0MEMOBJTYPE_LOW, PAGE_SIZE, pszTag, false /*fOnKernelThread*/);
871
872 IPRT_DARWIN_RESTORE_EFL_AC();
873 return rc;
874}
875
876
877DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
878{
879 IPRT_DARWIN_SAVE_EFL_AC();
880
881 int rc = rtR0MemObjNativeAllocWorker(ppMem, cb, fExecutable, true /* fContiguous */,
882 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
883 RTR0MEMOBJTYPE_CONT, PAGE_SIZE, pszTag, false /*fOnKernelThread*/);
884
885 /*
886 * Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
887 * cb <= PAGE_SIZE allocations take a different path, using a different allocator.
888 */
889 if (RT_FAILURE(rc) && cb <= PAGE_SIZE)
890 rc = rtR0MemObjNativeAllocWorker(ppMem, cb + PAGE_SIZE, fExecutable, true /* fContiguous */,
891 ~(uint32_t)PAGE_OFFSET_MASK, _4G - PAGE_SIZE,
892 RTR0MEMOBJTYPE_CONT, PAGE_SIZE, pszTag, false /*fOnKernelThread*/);
893 IPRT_DARWIN_RESTORE_EFL_AC();
894 return rc;
895}
896
897
898DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment,
899 const char *pszTag)
900{
901 if (uAlignment != PAGE_SIZE)
902 {
903 /* See rtR0MemObjNativeAllocWorker: */
904 if (version_major < 9 /* 9 = 10.5.x = Snow Leopard */)
905 return VERR_NOT_SUPPORTED;
906 }
907
908 IPRT_DARWIN_SAVE_EFL_AC();
909
910 /*
911 * Translate the PhysHighest address into a mask.
912 */
913 int rc;
914 if (PhysHighest == NIL_RTHCPHYS)
915 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
916 uAlignment <= PAGE_SIZE ? 0 : ~(mach_vm_address_t)(uAlignment - 1) /* PhysMask*/,
917 UINT64_MAX, RTR0MEMOBJTYPE_PHYS, uAlignment, pszTag, false /*fOnKernelThread*/);
918 else
919 {
920 mach_vm_address_t PhysMask = 0;
921 PhysMask = ~(mach_vm_address_t)0;
922 while (PhysMask > (PhysHighest | PAGE_OFFSET_MASK))
923 PhysMask >>= 1;
924 AssertReturn(PhysMask + 1 <= cb, VERR_INVALID_PARAMETER);
925 PhysMask &= ~(mach_vm_address_t)(uAlignment - 1);
926
927 rc = rtR0MemObjNativeAllocWorker(ppMem, cb, false /* fExecutable */, true /* fContiguous */,
928 PhysMask, PhysHighest,
929 RTR0MEMOBJTYPE_PHYS, uAlignment, pszTag, false /*fOnKernelThread*/);
930 }
931
932 IPRT_DARWIN_RESTORE_EFL_AC();
933 return rc;
934}
935
936
937DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
938{
939 /** @todo rtR0MemObjNativeAllocPhys / darwin.
940 * This might be a bit problematic and may very well require having to create our own
941 * object which we populate with pages but without mapping it into any address space.
942 * Estimate is 2-3 days.
943 */
944 RT_NOREF(ppMem, cb, PhysHighest, pszTag);
945 return VERR_NOT_SUPPORTED;
946}
947
948
949DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy,
950 const char *pszTag)
951{
952 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
953 IPRT_DARWIN_SAVE_EFL_AC();
954
955 /*
956 * Create a descriptor for it (the validation is always true on intel macs, but
957 * as it doesn't harm us keep it in).
958 */
959 int rc = VERR_ADDRESS_TOO_BIG;
960 IOAddressRange aRanges[1] = { { Phys, cb } };
961 if ( aRanges[0].address == Phys
962 && aRanges[0].length == cb)
963 {
964 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
965 kIODirectionInOut, NULL /*task*/);
966 if (pMemDesc)
967 {
968#ifdef __LP64__
969 Assert(Phys == pMemDesc->getPhysicalSegment(0, NULL, kIOMemoryMapperNone));
970#else
971 Assert(Phys == pMemDesc->getPhysicalSegment64(0, NULL));
972#endif
973
974 /*
975 * Create the IPRT memory object.
976 */
977 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS,
978 NULL, cb, pszTag);
979 if (pMemDarwin)
980 {
981 pMemDarwin->Core.u.Phys.PhysBase = Phys;
982 pMemDarwin->Core.u.Phys.fAllocated = false;
983 pMemDarwin->Core.u.Phys.uCachePolicy = uCachePolicy;
984 pMemDarwin->pMemDesc = pMemDesc;
985 *ppMem = &pMemDarwin->Core;
986 IPRT_DARWIN_RESTORE_EFL_AC();
987 return VINF_SUCCESS;
988 }
989
990 rc = VERR_NO_MEMORY;
991 pMemDesc->release();
992 }
993 else
994 rc = VERR_MEMOBJ_INIT_FAILED;
995 }
996 else
997 AssertMsgFailed(("%#llx %llx\n", (unsigned long long)Phys, (unsigned long long)cb));
998 IPRT_DARWIN_RESTORE_EFL_AC();
999 return rc;
1000}
1001
1002
1003/**
1004 * Internal worker for locking down pages.
1005 *
1006 * @return IPRT status code.
1007 *
1008 * @param ppMem Where to store the memory object pointer.
1009 * @param pv First page.
1010 * @param cb Number of bytes.
1011 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
1012 * and RTMEM_PROT_WRITE.
1013 * @param Task The task \a pv and \a cb refers to.
1014 * @param pszTag Allocation tag used for statistics and such.
1015 */
1016static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task,
1017 const char *pszTag)
1018{
1019 IPRT_DARWIN_SAVE_EFL_AC();
1020 NOREF(fAccess);
1021#ifdef USE_VM_MAP_WIRE
1022 vm_map_t Map = get_task_map(Task);
1023 Assert(Map);
1024
1025 /*
1026 * First try lock the memory.
1027 */
1028 int rc = VERR_LOCK_FAILED;
1029 kern_return_t kr = vm_map_wire(get_task_map(Task),
1030 (vm_map_offset_t)pv,
1031 (vm_map_offset_t)pv + cb,
1032 VM_PROT_DEFAULT,
1033 0 /* not user */);
1034 if (kr == KERN_SUCCESS)
1035 {
1036 /*
1037 * Create the IPRT memory object.
1038 */
1039 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag);
1040 if (pMemDarwin)
1041 {
1042 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
1043 *ppMem = &pMemDarwin->Core;
1044
1045 IPRT_DARWIN_RESTORE_EFL_AC();
1046 return VINF_SUCCESS;
1047 }
1048
1049 kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
1050 Assert(kr == KERN_SUCCESS);
1051 rc = VERR_NO_MEMORY;
1052 }
1053
1054#else
1055
1056 /*
1057 * Create a descriptor and try lock it (prepare).
1058 */
1059 int rc = VERR_MEMOBJ_INIT_FAILED;
1060 IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
1061 if (pMemDesc)
1062 {
1063 IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
1064 if (IORet == kIOReturnSuccess)
1065 {
1066 /*
1067 * Create the IPRT memory object.
1068 */
1069 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK,
1070 pv, cb, pszTag);
1071 if (pMemDarwin)
1072 {
1073 pMemDarwin->Core.u.Lock.R0Process = (RTR0PROCESS)Task;
1074 pMemDarwin->pMemDesc = pMemDesc;
1075 *ppMem = &pMemDarwin->Core;
1076
1077 IPRT_DARWIN_RESTORE_EFL_AC();
1078 return VINF_SUCCESS;
1079 }
1080
1081 pMemDesc->complete();
1082 rc = VERR_NO_MEMORY;
1083 }
1084 else
1085 rc = VERR_LOCK_FAILED;
1086 pMemDesc->release();
1087 }
1088#endif
1089 IPRT_DARWIN_RESTORE_EFL_AC();
1090 return rc;
1091}
1092
1093
1094DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
1095 RTR0PROCESS R0Process, const char *pszTag)
1096{
1097 return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process, pszTag);
1098}
1099
1100
1101DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
1102{
1103 return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task, pszTag);
1104}
1105
1106
1107DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
1108 const char *pszTag)
1109{
1110 RT_NOREF(ppMem, pvFixed, cb, uAlignment, pszTag);
1111 return VERR_NOT_SUPPORTED;
1112}
1113
1114
1115DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
1116 RTR0PROCESS R0Process, const char *pszTag)
1117{
1118 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process, pszTag);
1119 return VERR_NOT_SUPPORTED;
1120}
1121
1122
1123DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
1124 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
1125{
1126 RT_NOREF(fProt);
1127 AssertReturn(pvFixed == (void *)-1, VERR_NOT_SUPPORTED);
1128
1129 /*
1130 * Check that the specified alignment is supported.
1131 */
1132 if (uAlignment > PAGE_SIZE)
1133 return VERR_NOT_SUPPORTED;
1134 Assert(!offSub || cbSub);
1135
1136 IPRT_DARWIN_SAVE_EFL_AC();
1137
1138 /*
1139 * Must have a memory descriptor that we can map.
1140 */
1141 int rc = VERR_INVALID_PARAMETER;
1142 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
1143 if (pMemToMapDarwin->pMemDesc)
1144 {
1145 /* The kIOMapPrefault option was added in 10.10.0; causes PTEs to be populated with
1146 INTEL_PTE_WIRED to be set, just like we desire (see further down). However, till
1147 10.13.0 it was not available for use on kernel mappings. Oh, fudge. */
1148#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
1149 static uint32_t volatile s_fOptions = UINT32_MAX;
1150 uint32_t fOptions = s_fOptions;
1151 if (RT_UNLIKELY(fOptions == UINT32_MAX))
1152 s_fOptions = fOptions = version_major >= 17 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.13.0 (High Sierra). */
1153
1154 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask(kernel_task,
1155 0,
1156 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
1157 offSub,
1158 cbSub);
1159#else
1160 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map(kernel_task,
1161 0,
1162 kIOMapAnywhere | kIOMapDefaultCache,
1163 offSub,
1164 cbSub);
1165#endif
1166 if (pMemMap)
1167 {
1168 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
1169 void *pv = (void *)(uintptr_t)VirtAddr;
1170 if ((uintptr_t)pv == VirtAddr && pv != NULL)
1171 {
1172//#ifdef __LP64__
1173// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
1174//#else
1175// addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
1176//#endif
1177// MY_PRINTF("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
1178
1179// /*
1180// * Explicitly lock it so that we're sure it is present and that
1181// * its PTEs cannot be recycled.
1182// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
1183// * to the options which causes prepare() to not wire the pages.
1184// * This is probably a bug.
1185// */
1186// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
1187// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
1188// 1 /* count */,
1189// 0 /* offset */,
1190// kernel_task,
1191// kIODirectionInOut | kIOMemoryTypeVirtual,
1192// kIOMapperSystem);
1193// if (pMemDesc)
1194// {
1195// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
1196// if (IORet == kIOReturnSuccess)
1197// {
1198 /* HACK ALERT! On kernels older than 10.10 (xnu version 14), we need to fault in
1199 the pages here so they can safely be accessed from inside simple
1200 locks and when preemption is disabled (no page-ins allowed).
1201 Note! This touching does not cause INTEL_PTE_WIRED (bit 10) to be set as we go
1202 thru general #PF and vm_fault doesn't figure it should be wired or something. */
1203 rtR0MemObjDarwinTouchPages(pv, cbSub ? cbSub : pMemToMap->cb);
1204 /** @todo First, the memory should've been mapped by now, and second, it
1205 * should have the wired attribute in the PTE (bit 10). Neither seems to
1206 * be the case. The disabled locking code doesn't make any difference,
1207 * which is extremely odd, and breaks rtR0MemObjNativeGetPagePhysAddr
1208 * (getPhysicalSegment64 -> 64 for the lock descriptor. */
1209//#ifdef __LP64__
1210// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment(offSub, NULL, kIOMemoryMapperNone);
1211//#else
1212// addr64_t Addr2 = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
1213//#endif
1214// MY_PRINTF("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr2, 2);
1215
1216 /*
1217 * Create the IPRT memory object.
1218 */
1219 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1220 pv, cbSub ? cbSub : pMemToMap->cb, pszTag);
1221 if (pMemDarwin)
1222 {
1223 pMemDarwin->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1224 pMemDarwin->pMemMap = pMemMap;
1225// pMemDarwin->pMemDesc = pMemDesc;
1226 *ppMem = &pMemDarwin->Core;
1227
1228 IPRT_DARWIN_RESTORE_EFL_AC();
1229 return VINF_SUCCESS;
1230 }
1231
1232// pMemDesc->complete();
1233// rc = VERR_NO_MEMORY;
1234// }
1235// else
1236// rc = RTErrConvertFromDarwinIO(IORet);
1237// pMemDesc->release();
1238// }
1239// else
1240// rc = VERR_MEMOBJ_INIT_FAILED;
1241 }
1242 else if (pv)
1243 rc = VERR_ADDRESS_TOO_BIG;
1244 else
1245 rc = VERR_MAP_FAILED;
1246 pMemMap->release();
1247 }
1248 else
1249 rc = VERR_MAP_FAILED;
1250 }
1251
1252 IPRT_DARWIN_RESTORE_EFL_AC();
1253 return rc;
1254}
1255
1256
1257DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
1258 unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub, const char *pszTag)
1259{
1260 RT_NOREF(fProt);
1261
1262 /*
1263 * Check for unsupported things.
1264 */
1265 AssertReturn(R3PtrFixed == (RTR3PTR)-1, VERR_NOT_SUPPORTED);
1266 if (uAlignment > PAGE_SIZE)
1267 return VERR_NOT_SUPPORTED;
1268 Assert(!offSub || cbSub);
1269
1270 IPRT_DARWIN_SAVE_EFL_AC();
1271
1272 /*
1273 * Must have a memory descriptor.
1274 */
1275 int rc = VERR_INVALID_PARAMETER;
1276 PRTR0MEMOBJDARWIN pMemToMapDarwin = (PRTR0MEMOBJDARWIN)pMemToMap;
1277 if (pMemToMapDarwin->pMemDesc)
1278 {
1279#if MAC_OS_X_VERSION_MIN_REQUIRED >= 101000 /* The kIOMapPrefault option was added in 10.10.0. */
1280 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1281 0,
1282 kIOMapAnywhere | kIOMapDefaultCache | kIOMapPrefault,
1283 offSub,
1284 cbSub);
1285#elif MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
1286 static uint32_t volatile s_fOptions = UINT32_MAX;
1287 uint32_t fOptions = s_fOptions;
1288 if (RT_UNLIKELY(fOptions == UINT32_MAX))
1289 s_fOptions = fOptions = version_major >= 14 ? 0x10000000 /*kIOMapPrefault*/ : 0; /* Since 10.10.0. */
1290 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->createMappingInTask((task_t)R0Process,
1291 0,
1292 kIOMapAnywhere | kIOMapDefaultCache | fOptions,
1293 offSub,
1294 cbSub);
1295#else
1296 IOMemoryMap *pMemMap = pMemToMapDarwin->pMemDesc->map((task_t)R0Process,
1297 0,
1298 kIOMapAnywhere | kIOMapDefaultCache,
1299 offSub,
1300 cbSub);
1301#endif
1302 if (pMemMap)
1303 {
1304 IOVirtualAddress VirtAddr = pMemMap->getVirtualAddress();
1305 void *pv = (void *)(uintptr_t)VirtAddr;
1306 if ((uintptr_t)pv == VirtAddr && pv != NULL)
1307 {
1308 /*
1309 * Create the IPRT memory object.
1310 */
1311 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
1312 pv, cbSub ? cbSub : pMemToMap->cb, pszTag);
1313 if (pMemDarwin)
1314 {
1315 pMemDarwin->Core.u.Mapping.R0Process = R0Process;
1316 pMemDarwin->pMemMap = pMemMap;
1317 *ppMem = &pMemDarwin->Core;
1318
1319 IPRT_DARWIN_RESTORE_EFL_AC();
1320 return VINF_SUCCESS;
1321 }
1322
1323 rc = VERR_NO_MEMORY;
1324 }
1325 else if (pv)
1326 rc = VERR_ADDRESS_TOO_BIG;
1327 else
1328 rc = VERR_MAP_FAILED;
1329 pMemMap->release();
1330 }
1331 else
1332 rc = VERR_MAP_FAILED;
1333 }
1334
1335 IPRT_DARWIN_RESTORE_EFL_AC();
1336 return rc;
1337}
1338
1339
1340/**
1341 * Worker for rtR0MemObjNativeProtect that's typically called in a different
1342 * context.
1343 */
1344static int rtR0MemObjNativeProtectWorker(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1345{
1346 IPRT_DARWIN_SAVE_EFL_AC();
1347
1348 /* Get the map for the object. */
1349 vm_map_t pVmMap = rtR0MemObjDarwinGetMap(pMem);
1350 if (!pVmMap)
1351 {
1352 IPRT_DARWIN_RESTORE_EFL_AC();
1353 return VERR_NOT_SUPPORTED;
1354 }
1355
1356 /*
1357 * Convert the protection.
1358 */
1359 vm_prot_t fMachProt;
1360 switch (fProt)
1361 {
1362 case RTMEM_PROT_NONE:
1363 fMachProt = VM_PROT_NONE;
1364 break;
1365 case RTMEM_PROT_READ:
1366 fMachProt = VM_PROT_READ;
1367 break;
1368 case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
1369 fMachProt = VM_PROT_READ | VM_PROT_WRITE;
1370 break;
1371 case RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1372 fMachProt = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1373 break;
1374 case RTMEM_PROT_WRITE:
1375 fMachProt = VM_PROT_WRITE | VM_PROT_READ; /* never write-only */
1376 break;
1377 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
1378 fMachProt = VM_PROT_WRITE | VM_PROT_EXECUTE | VM_PROT_READ; /* never write-only or execute-only */
1379 break;
1380 case RTMEM_PROT_EXEC:
1381 fMachProt = VM_PROT_EXECUTE | VM_PROT_READ; /* never execute-only */
1382 break;
1383 default:
1384 AssertFailedReturn(VERR_INVALID_PARAMETER);
1385 }
1386
1387 /*
1388 * Do the job.
1389 */
1390 vm_offset_t Start = (uintptr_t)pMem->pv + offSub;
1391 kern_return_t krc = vm_protect(pVmMap,
1392 Start,
1393 cbSub,
1394 false,
1395 fMachProt);
1396 if (krc != KERN_SUCCESS)
1397 {
1398 static int s_cComplaints = 0;
1399 if (s_cComplaints < 10)
1400 {
1401 s_cComplaints++;
1402 printf("rtR0MemObjNativeProtect: vm_protect(%p,%p,%p,false,%#x) -> %d\n",
1403 (void *)pVmMap, (void *)Start, (void *)cbSub, fMachProt, krc);
1404
1405 kern_return_t krc2;
1406 vm_offset_t pvReal = Start;
1407 vm_size_t cbReal = 0;
1408 mach_msg_type_number_t cInfo = VM_REGION_BASIC_INFO_COUNT;
1409 struct vm_region_basic_info Info;
1410 RT_ZERO(Info);
1411 krc2 = vm_region(pVmMap, &pvReal, &cbReal, VM_REGION_BASIC_INFO, (vm_region_info_t)&Info, &cInfo, NULL);
1412 printf("rtR0MemObjNativeProtect: basic info - krc2=%d pv=%p cb=%p prot=%#x max=%#x inh=%#x shr=%d rvd=%d off=%#x behavior=%#x wired=%#x\n",
1413 krc2, (void *)pvReal, (void *)cbReal, Info.protection, Info.max_protection, Info.inheritance,
1414 Info.shared, Info.reserved, Info.offset, Info.behavior, Info.user_wired_count);
1415 }
1416 IPRT_DARWIN_RESTORE_EFL_AC();
1417 return RTErrConvertFromDarwinKern(krc);
1418 }
1419
1420 /*
1421 * Touch the pages if they should be writable afterwards and accessible
1422 * from code which should never fault. vm_protect() may leave pages
1423 * temporarily write protected, possibly due to pmap no-upgrade rules?
1424 *
1425 * This is the same trick (or HACK ALERT if you like) as applied in
1426 * rtR0MemObjNativeMapKernel.
1427 */
1428 if ( pMem->enmType != RTR0MEMOBJTYPE_MAPPING
1429 || pMem->u.Mapping.R0Process == NIL_RTR0PROCESS)
1430 {
1431 if (fProt & RTMEM_PROT_WRITE)
1432 rtR0MemObjDarwinTouchPages((void *)Start, cbSub);
1433 /*
1434 * Sniff (read) read-only pages too, just to be sure.
1435 */
1436 else if (fProt & (RTMEM_PROT_READ | RTMEM_PROT_EXEC))
1437 rtR0MemObjDarwinSniffPages((void const *)Start, cbSub);
1438 }
1439
1440 IPRT_DARWIN_RESTORE_EFL_AC();
1441 return VINF_SUCCESS;
1442}
1443
1444
1445/**
1446 * rtR0MemObjNativeProtect kernel_task wrapper function.
1447 */
1448static void rtR0MemObjNativeProtectWorkerOnKernelThread(void *pvUser0, void *pvUser1)
1449{
1450 AssertPtr(pvUser0); Assert(pvUser1 == NULL); NOREF(pvUser1);
1451 RTR0MEMOBJDARWINPROTECTARGS *pArgs = (RTR0MEMOBJDARWINPROTECTARGS *)pvUser0;
1452 int rc = rtR0MemObjNativeProtectWorker(pArgs->pMem, pArgs->offSub, pArgs->cbSub, pArgs->fProt);
1453 rtR0MemObjDarwinSignalThreadWaitinOnTask(&pArgs->Core, rc);
1454}
1455
1456
1457DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1458{
1459 /*
1460 * The code won't work right because process codesigning properties leaks
1461 * into kernel_map memory management. So, if the user process we're running
1462 * in has CS restrictions active, we cannot play around with the EXEC
1463 * protection because some vm_fault.c think we're modifying the process map
1464 * or something.
1465 */
1466 int rc;
1467 if (rtR0MemObjDarwinGetMap(pMem) == kernel_map)
1468 {
1469 RTR0MEMOBJDARWINPROTECTARGS Args;
1470 Args.pMem = pMem;
1471 Args.offSub = offSub;
1472 Args.cbSub = cbSub;
1473 Args.fProt = fProt;
1474 rc = rtR0MemObjDarwinDoInKernelTaskThread(rtR0MemObjNativeProtectWorkerOnKernelThread, &Args.Core);
1475 }
1476 else
1477 rc = rtR0MemObjNativeProtectWorker(pMem, offSub, cbSub, fProt);
1478 return rc;
1479}
1480
1481
1482DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1483{
1484 RTHCPHYS PhysAddr;
1485 PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)pMem;
1486 IPRT_DARWIN_SAVE_EFL_AC();
1487
1488#ifdef USE_VM_MAP_WIRE
1489 /*
1490 * Locked memory doesn't have a memory descriptor and
1491 * needs to be handled differently.
1492 */
1493 if (pMemDarwin->Core.enmType == RTR0MEMOBJTYPE_LOCK)
1494 {
1495 ppnum_t PgNo;
1496 if (pMemDarwin->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
1497 PgNo = pmap_find_phys(kernel_pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1498 else
1499 {
1500 /*
1501 * From what I can tell, Apple seems to have locked up the all the
1502 * available interfaces that could help us obtain the pmap_t of a task
1503 * or vm_map_t.
1504
1505 * So, we'll have to figure out where in the vm_map_t structure it is
1506 * and read it our selves. ASSUMING that kernel_pmap is pointed to by
1507 * kernel_map->pmap, we scan kernel_map to locate the structure offset.
1508 * Not nice, but it will hopefully do the job in a reliable manner...
1509 *
1510 * (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
1511 */
1512 static int s_offPmap = -1;
1513 if (RT_UNLIKELY(s_offPmap == -1))
1514 {
1515 pmap_t const *p = (pmap_t *)kernel_map;
1516 pmap_t const * const pEnd = p + 64;
1517 for (; p < pEnd; p++)
1518 if (*p == kernel_pmap)
1519 {
1520 s_offPmap = (uintptr_t)p - (uintptr_t)kernel_map;
1521 break;
1522 }
1523 AssertReturn(s_offPmap >= 0, NIL_RTHCPHYS);
1524 }
1525 pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
1526 PgNo = pmap_find_phys(Pmap, (uintptr_t)pMemDarwin->Core.pv + iPage * PAGE_SIZE);
1527 }
1528
1529 IPRT_DARWIN_RESTORE_EFL_AC();
1530 AssertReturn(PgNo, NIL_RTHCPHYS);
1531 PhysAddr = (RTHCPHYS)PgNo << PAGE_SHIFT;
1532 Assert((PhysAddr >> PAGE_SHIFT) == PgNo);
1533 }
1534 else
1535#endif /* USE_VM_MAP_WIRE */
1536 {
1537 /*
1538 * Get the memory descriptor.
1539 */
1540 IOMemoryDescriptor *pMemDesc = pMemDarwin->pMemDesc;
1541 if (!pMemDesc)
1542 pMemDesc = pMemDarwin->pMemMap->getMemoryDescriptor();
1543 AssertReturn(pMemDesc, NIL_RTHCPHYS);
1544
1545 /*
1546 * If we've got a memory descriptor, use getPhysicalSegment64().
1547 */
1548#ifdef __LP64__
1549 addr64_t Addr = pMemDesc->getPhysicalSegment(iPage * PAGE_SIZE, NULL, kIOMemoryMapperNone);
1550#else
1551 addr64_t Addr = pMemDesc->getPhysicalSegment64(iPage * PAGE_SIZE, NULL);
1552#endif
1553 IPRT_DARWIN_RESTORE_EFL_AC();
1554 AssertMsgReturn(Addr, ("iPage=%u\n", iPage), NIL_RTHCPHYS);
1555 PhysAddr = Addr;
1556 AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
1557 }
1558
1559 return PhysAddr;
1560}
1561
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette