VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c@ 32736

Last change on this file since 32736 was 32348, checked in by vboxsync, 14 years ago

RTR0MemObj*: Return VERR_NOT_SUPPORTED instead of VERR_NOT_IMPLEMENTED in a bunch of situations where the former is documented.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 32.1 KB
Line 
1/* $Id: memobj-r0drv-freebsd.c 32348 2010-09-09 12:28:05Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, FreeBSD.
4 */
5
6/*
7 * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-freebsd-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/mem.h>
39#include <iprt/err.h>
40#include <iprt/assert.h>
41#include <iprt/log.h>
42#include <iprt/param.h>
43#include <iprt/process.h>
44#include "internal/memobj.h"
45
46/*******************************************************************************
47* Structures and Typedefs *
48*******************************************************************************/
49/**
50 * The FreeBSD version of the memory object structure.
51 */
52typedef struct RTR0MEMOBJFREEBSD
53{
54 /** The core structure. */
55 RTR0MEMOBJINTERNAL Core;
56 /** Type dependent data */
57 union
58 {
59 /** Non physical memory allocations */
60 struct
61 {
62 /** The VM object associated with the allocation. */
63 vm_object_t pObject;
64 } NonPhys;
65 /** Physical memory allocations */
66 struct
67 {
68 /** Number of pages */
69 uint32_t cPages;
70 /** Array of pages - variable */
71 vm_page_t apPages[1];
72 } Phys;
73 } u;
74} RTR0MEMOBJFREEBSD, *PRTR0MEMOBJFREEBSD;
75
76
77MALLOC_DEFINE(M_IPRTMOBJ, "iprtmobj", "IPRT - R0MemObj");
78
79/*******************************************************************************
80* Internal Functions *
81*******************************************************************************/
82
83/**
84 * Gets the virtual memory map the specified object is mapped into.
85 *
86 * @returns VM map handle on success, NULL if no map.
87 * @param pMem The memory object.
88 */
89static vm_map_t rtR0MemObjFreeBSDGetMap(PRTR0MEMOBJINTERNAL pMem)
90{
91 switch (pMem->enmType)
92 {
93 case RTR0MEMOBJTYPE_PAGE:
94 case RTR0MEMOBJTYPE_LOW:
95 case RTR0MEMOBJTYPE_CONT:
96 return kernel_map;
97
98 case RTR0MEMOBJTYPE_PHYS:
99 case RTR0MEMOBJTYPE_PHYS_NC:
100 return NULL; /* pretend these have no mapping atm. */
101
102 case RTR0MEMOBJTYPE_LOCK:
103 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
104 ? kernel_map
105 : &((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map;
106
107 case RTR0MEMOBJTYPE_RES_VIRT:
108 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
109 ? kernel_map
110 : &((struct proc *)pMem->u.ResVirt.R0Process)->p_vmspace->vm_map;
111
112 case RTR0MEMOBJTYPE_MAPPING:
113 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
114 ? kernel_map
115 : &((struct proc *)pMem->u.Mapping.R0Process)->p_vmspace->vm_map;
116
117 default:
118 return NULL;
119 }
120}
121
122int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
123{
124 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
125 int rc;
126
127 switch (pMemFreeBSD->Core.enmType)
128 {
129 case RTR0MEMOBJTYPE_CONT:
130 contigfree(pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb, M_IPRTMOBJ);
131 break;
132
133 case RTR0MEMOBJTYPE_PAGE:
134 {
135 rc = vm_map_remove(kernel_map,
136 (vm_offset_t)pMemFreeBSD->Core.pv,
137 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
138 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
139
140 vm_page_lock_queues();
141 for (uint32_t iPage = 0; iPage < pMemFreeBSD->u.Phys.cPages; iPage++)
142 {
143 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage];
144 vm_page_unwire(pPage, 0);
145 vm_page_free(pPage);
146 }
147 vm_page_unlock_queues();
148 break;
149 }
150
151 case RTR0MEMOBJTYPE_LOCK:
152 {
153 vm_map_t pMap = kernel_map;
154
155 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
156 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
157
158 rc = vm_map_unwire(pMap,
159 (vm_offset_t)pMemFreeBSD->Core.pv,
160 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb,
161 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
162 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
163 break;
164 }
165
166 case RTR0MEMOBJTYPE_RES_VIRT:
167 {
168 vm_map_t pMap = kernel_map;
169 if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
170 pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;
171 rc = vm_map_remove(pMap,
172 (vm_offset_t)pMemFreeBSD->Core.pv,
173 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
174 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
175 break;
176 }
177
178 case RTR0MEMOBJTYPE_MAPPING:
179 {
180 vm_map_t pMap = kernel_map;
181
182 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
183 pMap = &((struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process)->p_vmspace->vm_map;
184
185 rc = vm_map_remove(pMap,
186 (vm_offset_t)pMemFreeBSD->Core.pv,
187 (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
188 AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
189 break;
190 }
191
192 case RTR0MEMOBJTYPE_PHYS:
193 case RTR0MEMOBJTYPE_PHYS_NC:
194 {
195 vm_page_lock_queues();
196 for (uint32_t iPage = 0; iPage < pMemFreeBSD->u.Phys.cPages; iPage++)
197 {
198 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage];
199 vm_page_unwire(pPage, 0);
200 vm_page_free(pPage);
201 }
202 vm_page_unlock_queues();
203 break;
204 }
205
206#ifdef USE_KMEM_ALLOC_ATTR
207 case RTR0MEMOBJTYPE_LOW:
208 {
209 kmem_free(kernel_map, (vm_offset_t)pMemFreeBSD->Core.pv, pMemFreeBSD->Core.cb);
210 break;
211 }
212#else
213 case RTR0MEMOBJTYPE_LOW: /* unused */
214#endif
215 default:
216 AssertMsgFailed(("enmType=%d\n", pMemFreeBSD->Core.enmType));
217 return VERR_INTERNAL_ERROR;
218 }
219
220 return VINF_SUCCESS;
221}
222
223int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
224{
225 int rc;
226 size_t cPages = cb >> PAGE_SHIFT;
227
228 /* create the object. */
229 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJFREEBSD, u.Phys.apPages[cPages]),
230 RTR0MEMOBJTYPE_PAGE, NULL, cb);
231 if (!pMemFreeBSD)
232 return VERR_NO_MEMORY;
233
234 pMemFreeBSD->u.Phys.cPages = cPages;
235
236 vm_offset_t MapAddress = vm_map_min(kernel_map);
237 rc = vm_map_find(kernel_map, /* map */
238 NULL, /* object */
239 0, /* offset */
240 &MapAddress, /* addr (IN/OUT) */
241 cb, /* length */
242 TRUE, /* find_space */
243 fExecutable /* protection */
244 ? VM_PROT_ALL
245 : VM_PROT_RW,
246 VM_PROT_ALL, /* max(_prot) */
247 0); /* cow (copy-on-write) */
248 if (rc == KERN_SUCCESS)
249 {
250 rc = VINF_SUCCESS;
251
252 for (size_t iPage = 0; iPage < cPages; iPage++)
253 {
254 vm_page_t pPage;
255
256 pPage = vm_page_alloc(NULL, iPage,
257 VM_ALLOC_SYSTEM |
258 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
259
260 if (!pPage)
261 {
262 /*
263 * Out of pages
264 * Remove already allocated pages
265 */
266 while (iPage-- > 0)
267 {
268 pPage = pMemFreeBSD->u.Phys.apPages[iPage];
269 vm_page_lock_queues();
270 vm_page_unwire(pPage, 0);
271 vm_page_free(pPage);
272 vm_page_unlock_queues();
273 }
274 rc = VERR_NO_MEMORY;
275 break;
276 }
277
278 pPage->valid = VM_PAGE_BITS_ALL;
279 pMemFreeBSD->u.Phys.apPages[iPage] = pPage;
280 }
281
282 if (rc == VINF_SUCCESS)
283 {
284 vm_offset_t AddressDst = MapAddress;
285
286 for (size_t iPage = 0; iPage < cPages; iPage++)
287 {
288 vm_page_t pPage = pMemFreeBSD->u.Phys.apPages[iPage];
289
290 MY_PMAP_ENTER(kernel_map->pmap, AddressDst, pPage,
291 fExecutable
292 ? VM_PROT_ALL
293 : VM_PROT_RW,
294 TRUE);
295
296 AddressDst += PAGE_SIZE;
297 }
298
299 /* Store start address */
300 pMemFreeBSD->Core.pv = (void *)MapAddress;
301 *ppMem = &pMemFreeBSD->Core;
302 return VINF_SUCCESS;
303 }
304 }
305 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
306
307 rtR0MemObjDelete(&pMemFreeBSD->Core);
308 return rc;
309}
310
311int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
312{
313#ifdef USE_KMEM_ALLOC_ATTR
314 /*
315 * Use kmem_alloc_attr, fExectuable is not needed because the
316 * memory will be executable by default
317 */
318 NOREF(fExecutable);
319
320 /* create the object. */
321 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOW, NULL, cb);
322 if (!pMemFreeBSD)
323 return VERR_NO_MEMORY;
324
325 pMemFreeBSD->Core.pv = (void *)kmem_alloc_attr(kernel_map, /* Kernel */
326 cb, /* Amount */
327 M_ZERO, /* Zero memory */
328 0, /* Low physical address */
329 _4G - PAGE_SIZE, /* Highest physical address */
330 VM_MEMATTR_DEFAULT); /* Default memory attributes */
331 if (!pMemFreeBSD->Core.pv)
332 return VERR_NO_MEMORY;
333
334 *ppMem = &pMemFreeBSD->Core;
335
336 return VINF_SUCCESS;
337#else
338 /*
339 * Try a Alloc first and see if we get luck, if not try contigmalloc.
340 * Might wish to try find our own pages or something later if this
341 * turns into a problemspot on AMD64 boxes.
342 */
343 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
344 if (RT_SUCCESS(rc))
345 {
346 size_t iPage = cb >> PAGE_SHIFT;
347 while (iPage-- > 0)
348 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) > (_4G - PAGE_SIZE))
349 {
350 RTR0MemObjFree(*ppMem, false);
351 *ppMem = NULL;
352 rc = VERR_NO_MEMORY;
353 break;
354 }
355 }
356 if (RT_FAILURE(rc))
357 rc = rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
358 return rc;
359#endif
360}
361
362
363int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
364{
365 /* create the object. */
366 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
367 if (!pMemFreeBSD)
368 return VERR_NO_MEMORY;
369
370 /* do the allocation. */
371 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
372 M_IPRTMOBJ, /* type */
373 M_NOWAIT | M_ZERO, /* flags */
374 0, /* lowest physical address*/
375 _4G-1, /* highest physical address */
376 PAGE_SIZE, /* alignment. */
377 0); /* boundrary */
378 if (pMemFreeBSD->Core.pv)
379 {
380 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
381 *ppMem = &pMemFreeBSD->Core;
382 return VINF_SUCCESS;
383 }
384
385 NOREF(fExecutable);
386 rtR0MemObjDelete(&pMemFreeBSD->Core);
387 return VERR_NO_MEMORY;
388}
389
390static void rtR0MemObjFreeBSDPhysPageInit(vm_page_t pPage, vm_pindex_t iPage)
391{
392 pPage->wire_count = 1;
393 pPage->pindex = iPage;
394 pPage->act_count = 0;
395 pPage->oflags = 0;
396 pPage->flags = PG_UNMANAGED;
397 atomic_add_int(&cnt.v_wire_count, 1);
398}
399
400static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
401 size_t cb,
402 RTHCPHYS PhysHighest, size_t uAlignment,
403 bool fContiguous)
404{
405 int rc = VINF_SUCCESS;
406 uint32_t cPages = cb >> PAGE_SHIFT;
407 vm_paddr_t VmPhysAddrHigh;
408
409 /* create the object. */
410 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJFREEBSD, u.Phys.apPages[cPages]),
411 enmType, NULL, cb);
412 if (!pMemFreeBSD)
413 return VERR_NO_MEMORY;
414
415 pMemFreeBSD->u.Phys.cPages = cPages;
416
417 if (PhysHighest != NIL_RTHCPHYS)
418 VmPhysAddrHigh = PhysHighest;
419 else
420 VmPhysAddrHigh = ~(vm_paddr_t)0;
421
422 if (fContiguous)
423 {
424 vm_page_t pPage = vm_phys_alloc_contig(cPages, 0, VmPhysAddrHigh, uAlignment, 0);
425
426 if (pPage)
427 for (uint32_t iPage = 0; iPage < cPages; iPage++)
428 {
429 rtR0MemObjFreeBSDPhysPageInit(&pPage[iPage], iPage);
430 pMemFreeBSD->u.Phys.apPages[iPage] = &pPage[iPage];
431 }
432 else
433 rc = VERR_NO_MEMORY;
434 }
435 else
436 {
437 /* Allocate page by page */
438 for (uint32_t iPage = 0; iPage < cPages; iPage++)
439 {
440 vm_page_t pPage = vm_phys_alloc_contig(1, 0, VmPhysAddrHigh, uAlignment, 0);
441
442 if (!pPage)
443 {
444 /* Free all allocated pages */
445 while (iPage-- > 0)
446 {
447 pPage = pMemFreeBSD->u.Phys.apPages[iPage];
448 vm_page_lock_queues();
449 vm_page_unwire(pPage, 0);
450 vm_page_free(pPage);
451 vm_page_unlock_queues();
452 }
453 rc = VERR_NO_MEMORY;
454 break;
455 }
456 rtR0MemObjFreeBSDPhysPageInit(pPage, iPage);
457 pMemFreeBSD->u.Phys.apPages[iPage] = pPage;
458 }
459 }
460
461 if (RT_FAILURE(rc))
462 rtR0MemObjDelete(&pMemFreeBSD->Core);
463 else
464 {
465 if (enmType == RTR0MEMOBJTYPE_PHYS)
466 {
467 pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pMemFreeBSD->u.Phys.apPages[0]);
468 pMemFreeBSD->Core.u.Phys.fAllocated = true;
469 }
470
471 *ppMem = &pMemFreeBSD->Core;
472 }
473
474 return rc;
475}
476
477int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
478{
479#if 1
480 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true);
481#else
482 /* create the object. */
483 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
484 if (!pMemFreeBSD)
485 return VERR_NO_MEMORY;
486
487 /* do the allocation. */
488 pMemFreeBSD->Core.pv = contigmalloc(cb, /* size */
489 M_IPRTMOBJ, /* type */
490 M_NOWAIT | M_ZERO, /* flags */
491 0, /* lowest physical address*/
492 _4G-1, /* highest physical address */
493 uAlignment, /* alignment. */
494 0); /* boundrary */
495 if (pMemFreeBSD->Core.pv)
496 {
497 pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
498 *ppMem = &pMemFreeBSD->Core;
499 return VINF_SUCCESS;
500 }
501
502 rtR0MemObjDelete(&pMemFreeBSD->Core);
503 return VERR_NO_MEMORY;
504#endif
505}
506
507
508int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
509{
510#if 1
511 return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false);
512#else
513 return VERR_NOT_SUPPORTED;
514#endif
515}
516
517
518int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
519{
520 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
521
522 /* create the object. */
523 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
524 if (!pMemFreeBSD)
525 return VERR_NO_MEMORY;
526
527 /* there is no allocation here, it needs to be mapped somewhere first. */
528 pMemFreeBSD->Core.u.Phys.fAllocated = false;
529 pMemFreeBSD->Core.u.Phys.PhysBase = Phys;
530 pMemFreeBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
531 *ppMem = &pMemFreeBSD->Core;
532 return VINF_SUCCESS;
533}
534
535
536/**
537 * Worker locking the memory in either kernel or user maps.
538 */
539static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, vm_map_t pVmMap,
540 vm_offset_t AddrStart, size_t cb, uint32_t fAccess,
541 RTR0PROCESS R0Process, int fFlags)
542{
543 int rc;
544 NOREF(fAccess);
545
546 /* create the object. */
547 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)AddrStart, cb);
548 if (!pMemFreeBSD)
549 return VERR_NO_MEMORY;
550
551 /*
552 * We could've used vslock here, but we don't wish to be subject to
553 * resource usage restrictions, so we'll call vm_map_wire directly.
554 */
555 rc = vm_map_wire(pVmMap, /* the map */
556 AddrStart, /* start */
557 AddrStart + cb, /* end */
558 fFlags); /* flags */
559 if (rc == KERN_SUCCESS)
560 {
561 pMemFreeBSD->Core.u.Lock.R0Process = R0Process;
562 *ppMem = &pMemFreeBSD->Core;
563 return VINF_SUCCESS;
564 }
565 rtR0MemObjDelete(&pMemFreeBSD->Core);
566 return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
567}
568
569
570int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
571{
572 return rtR0MemObjNativeLockInMap(ppMem,
573 &((struct proc *)R0Process)->p_vmspace->vm_map,
574 (vm_offset_t)R3Ptr,
575 cb,
576 fAccess,
577 R0Process,
578 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
579}
580
581
582int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
583{
584 return rtR0MemObjNativeLockInMap(ppMem,
585 kernel_map,
586 (vm_offset_t)pv,
587 cb,
588 fAccess,
589 NIL_RTR0PROCESS,
590 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
591}
592
593
594/**
595 * Worker for the two virtual address space reservers.
596 *
597 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
598 */
599static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
600{
601 int rc;
602
603 /*
604 * The pvFixed address range must be within the VM space when specified.
605 */
606 if (pvFixed != (void *)-1
607 && ( (vm_offset_t)pvFixed < vm_map_min(pMap)
608 || (vm_offset_t)pvFixed + cb > vm_map_max(pMap)))
609 return VERR_INVALID_PARAMETER;
610
611 /*
612 * Check that the specified alignment is supported.
613 */
614 if (uAlignment > PAGE_SIZE)
615 return VERR_NOT_SUPPORTED;
616
617 /*
618 * Create the object.
619 */
620 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
621 if (!pMemFreeBSD)
622 return VERR_NO_MEMORY;
623
624 /*
625 * Allocate an empty VM object and map it into the requested map.
626 */
627 pMemFreeBSD->u.NonPhys.pObject = vm_object_allocate(OBJT_DEFAULT, cb >> PAGE_SHIFT);
628 if (pMemFreeBSD->u.NonPhys.pObject)
629 {
630 vm_offset_t MapAddress = pvFixed != (void *)-1
631 ? (vm_offset_t)pvFixed
632 : vm_map_min(pMap);
633 if (pvFixed != (void *)-1)
634 vm_map_remove(pMap,
635 MapAddress,
636 MapAddress + cb);
637
638 rc = vm_map_find(pMap, /* map */
639 pMemFreeBSD->u.NonPhys.pObject, /* object */
640 0, /* offset */
641 &MapAddress, /* addr (IN/OUT) */
642 cb, /* length */
643 pvFixed == (void *)-1, /* find_space */
644 VM_PROT_NONE, /* protection */
645 VM_PROT_ALL, /* max(_prot) ?? */
646 0); /* cow (copy-on-write) */
647 if (rc == KERN_SUCCESS)
648 {
649 if (R0Process != NIL_RTR0PROCESS)
650 {
651 rc = vm_map_inherit(pMap,
652 MapAddress,
653 MapAddress + cb,
654 VM_INHERIT_SHARE);
655 AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
656 }
657 pMemFreeBSD->Core.pv = (void *)MapAddress;
658 pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process;
659 *ppMem = &pMemFreeBSD->Core;
660 return VINF_SUCCESS;
661 }
662 vm_object_deallocate(pMemFreeBSD->u.NonPhys.pObject);
663 rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
664 }
665 else
666 rc = VERR_NO_MEMORY;
667 rtR0MemObjDelete(&pMemFreeBSD->Core);
668 return rc;
669
670}
671
672int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
673{
674 return rtR0MemObjNativeReserveInMap(ppMem, pvFixed, cb, uAlignment, NIL_RTR0PROCESS, kernel_map);
675}
676
677
678int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
679{
680 return rtR0MemObjNativeReserveInMap(ppMem, (void *)R3PtrFixed, cb, uAlignment, R0Process,
681 &((struct proc *)R0Process)->p_vmspace->vm_map);
682}
683
684
685int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
686 unsigned fProt, size_t offSub, size_t cbSub)
687{
688 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
689 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
690
691 /*
692 * Check that the specified alignment is supported.
693 */
694 if (uAlignment > PAGE_SIZE)
695 return VERR_NOT_SUPPORTED;
696
697/* Phys: see pmap_mapdev in i386/i386/pmap.c (http://fxr.watson.org/fxr/source/i386/i386/pmap.c?v=RELENG62#L2860) */
698/** @todo finish the implementation. */
699
700 return VERR_NOT_SUPPORTED;
701}
702
703
704/* see http://markmail.org/message/udhq33tefgtyfozs */
705int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
706{
707 /*
708 * Check for unsupported stuff.
709 */
710 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
711 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
712 if (uAlignment > PAGE_SIZE)
713 return VERR_NOT_SUPPORTED;
714
715 int rc;
716 PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;
717 struct proc *pProc = (struct proc *)R0Process;
718 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
719
720 /* calc protection */
721 vm_prot_t ProtectionFlags = 0;
722 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
723 ProtectionFlags = VM_PROT_NONE;
724 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
725 ProtectionFlags |= VM_PROT_READ;
726 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
727 ProtectionFlags |= VM_PROT_WRITE;
728 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
729 ProtectionFlags |= VM_PROT_EXECUTE;
730
731 /* calc mapping address */
732 PROC_LOCK(pProc);
733 vm_offset_t AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
734 PROC_UNLOCK(pProc);
735
736 /* Insert the object in the map. */
737 rc = vm_map_find(pProcMap, /* Map to insert the object in */
738 NULL, /* Object to map */
739 0, /* Start offset in the object */
740 &AddrR3, /* Start address IN/OUT */
741 pMemToMap->cb, /* Size of the mapping */
742 TRUE, /* Whether a suitable address should be searched for first */
743 ProtectionFlags, /* protection flags */
744 VM_PROT_ALL, /* Maximum protection flags */
745 0); /* Copy on write */
746
747 /* Map the memory page by page into the destination map. */
748 if (rc == KERN_SUCCESS)
749 {
750 size_t cPages = pMemToMap->cb >> PAGE_SHIFT;;
751 pmap_t pPhysicalMap = pProcMap->pmap;
752 vm_offset_t AddrR3Dst = AddrR3;
753
754 if ( pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS
755 || pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS_NC
756 || pMemToMap->enmType == RTR0MEMOBJTYPE_PAGE)
757 {
758 /* Mapping physical allocations */
759 Assert(cPages == pMemToMapFreeBSD->u.Phys.cPages);
760
761 /* Insert the memory page by page into the mapping. */
762 for (uint32_t iPage = 0; iPage < cPages; iPage++)
763 {
764 vm_page_t pPage = pMemToMapFreeBSD->u.Phys.apPages[iPage];
765
766 MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);
767 AddrR3Dst += PAGE_SIZE;
768 }
769 }
770 else
771 {
772 /* Mapping cont or low memory types */
773 vm_offset_t AddrToMap = (vm_offset_t)pMemToMap->pv;
774
775 for (uint32_t iPage = 0; iPage < cPages; iPage++)
776 {
777 vm_page_t pPage = PHYS_TO_VM_PAGE(vtophys(AddrToMap));
778
779 MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);
780 AddrR3Dst += PAGE_SIZE;
781 AddrToMap += PAGE_SIZE;
782 }
783 }
784 }
785
786 if (RT_SUCCESS(rc))
787 {
788 /*
789 * Create a mapping object for it.
790 */
791 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
792 RTR0MEMOBJTYPE_MAPPING,
793 (void *)AddrR3,
794 pMemToMap->cb);
795 if (pMemFreeBSD)
796 {
797 Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3);
798 pMemFreeBSD->Core.u.Mapping.R0Process = R0Process;
799 *ppMem = &pMemFreeBSD->Core;
800 return VINF_SUCCESS;
801 }
802
803 rc = vm_map_remove(pProcMap, ((vm_offset_t)AddrR3), ((vm_offset_t)AddrR3) + pMemToMap->cb);
804 AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
805 }
806
807 return VERR_NO_MEMORY;
808}
809
810
811int rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
812{
813 vm_prot_t ProtectionFlags = 0;
814 vm_offset_t AddrStart = (uintptr_t)pMem->pv + offSub;
815 vm_offset_t AddrEnd = AddrStart + cbSub;
816 vm_map_t pVmMap = rtR0MemObjFreeBSDGetMap(pMem);
817
818 if (!pVmMap)
819 return VERR_NOT_SUPPORTED;
820
821 if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
822 ProtectionFlags = VM_PROT_NONE;
823 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
824 ProtectionFlags |= VM_PROT_READ;
825 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
826 ProtectionFlags |= VM_PROT_WRITE;
827 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
828 ProtectionFlags |= VM_PROT_EXECUTE;
829
830 int krc = vm_map_protect(pVmMap, AddrStart, AddrEnd, ProtectionFlags, FALSE);
831 if (krc == KERN_SUCCESS)
832 return VINF_SUCCESS;
833
834 return VERR_NOT_SUPPORTED;
835}
836
837
838RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
839{
840 PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
841
842 switch (pMemFreeBSD->Core.enmType)
843 {
844 case RTR0MEMOBJTYPE_LOCK:
845 {
846 if ( pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS
847 && pMemFreeBSD->Core.u.Lock.R0Process != (RTR0PROCESS)curproc)
848 {
849 /* later */
850 return NIL_RTHCPHYS;
851 }
852
853 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
854
855 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Lock.R0Process;
856 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
857 pmap_t pPhysicalMap = pProcMap->pmap;
858
859 return pmap_extract(pPhysicalMap, pb);
860 }
861
862 case RTR0MEMOBJTYPE_MAPPING:
863 {
864 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
865
866 if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
867 {
868 struct proc *pProc = (struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process;
869 struct vm_map *pProcMap = &pProc->p_vmspace->vm_map;
870 pmap_t pPhysicalMap = pProcMap->pmap;
871
872 return pmap_extract(pPhysicalMap, pb);
873 }
874 return vtophys(pb);
875 }
876
877 case RTR0MEMOBJTYPE_CONT:
878 return pMemFreeBSD->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
879
880 case RTR0MEMOBJTYPE_PHYS:
881 return pMemFreeBSD->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
882
883 case RTR0MEMOBJTYPE_PAGE:
884 case RTR0MEMOBJTYPE_PHYS_NC:
885 return VM_PAGE_TO_PHYS(pMemFreeBSD->u.Phys.apPages[iPage]);
886
887#ifdef USE_KMEM_ALLOC_ATTR
888 case RTR0MEMOBJTYPE_LOW:
889 {
890 vm_offset_t pb = (vm_offset_t)pMemFreeBSD->Core.pv + (iPage << PAGE_SHIFT);
891 return vtophys(pb);
892 }
893#else
894 case RTR0MEMOBJTYPE_LOW:
895#endif
896 case RTR0MEMOBJTYPE_RES_VIRT:
897 default:
898 return NIL_RTHCPHYS;
899 }
900}
901
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette