VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/netbsd/memobj-r0drv-netbsd.c@ 69111

Last change on this file since 69111 was 63558, checked in by vboxsync, 8 years ago

r0drv/netbsd: re-import memobj-r0drv-netbsd.c on top of an svn copy of
the FreeBSD version it's based on. Since kernel vocabularies are
different, there's quite a bit of diff, but the general shape is more
or less the same to make it useful to keep the FreeBSD history.

From Haomai Wang GSoC project with additional changes by Arto Huusko.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 18.3 KB
Line 
1/* $Id: memobj-r0drv-netbsd.c 63558 2016-08-16 13:51:47Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NetBSD.
4 */
5
6/*
7 * Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
8 * Copyright (c) 2011 Andriy Gapon <avg@FreeBSD.org>
9 * Copyright (c) 2014 Arto Huusko
10 *
11 * Permission is hereby granted, free of charge, to any person
12 * obtaining a copy of this software and associated documentation
13 * files (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use,
15 * copy, modify, merge, publish, distribute, sublicense, and/or sell
16 * copies of the Software, and to permit persons to whom the
17 * Software is furnished to do so, subject to the following
18 * conditions:
19 *
20 * The above copyright notice and this permission notice shall be
21 * included in all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
25 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
27 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
28 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
30 * OTHER DEALINGS IN THE SOFTWARE.
31 */
32
33
34/*********************************************************************************************************************************
35* Header Files *
36*********************************************************************************************************************************/
37#include "the-netbsd-kernel.h"
38
39#include <iprt/memobj.h>
40#include <iprt/mem.h>
41#include <iprt/err.h>
42#include <iprt/assert.h>
43#include <iprt/log.h>
44#include <iprt/param.h>
45#include <iprt/process.h>
46#include "internal/memobj.h"
47
48
49/*********************************************************************************************************************************
50* Structures and Typedefs *
51*********************************************************************************************************************************/
52/**
53 * The NetBSD version of the memory object structure.
54 */
55typedef struct RTR0MEMOBJNETBSD
56{
57 /** The core structure. */
58 RTR0MEMOBJINTERNAL Core;
59 size_t size;
60 struct pglist pglist;
61} RTR0MEMOBJNETBSD, *PRTR0MEMOBJNETBSD;
62
63
64typedef struct vm_map* vm_map_t;
65
66/**
67 * Gets the virtual memory map the specified object is mapped into.
68 *
69 * @returns VM map handle on success, NULL if no map.
70 * @param pMem The memory object.
71 */
72static vm_map_t rtR0MemObjNetBSDGetMap(PRTR0MEMOBJINTERNAL pMem)
73{
74 switch (pMem->enmType)
75 {
76 case RTR0MEMOBJTYPE_PAGE:
77 case RTR0MEMOBJTYPE_LOW:
78 case RTR0MEMOBJTYPE_CONT:
79 return kernel_map;
80
81 case RTR0MEMOBJTYPE_PHYS:
82 case RTR0MEMOBJTYPE_PHYS_NC:
83 return NULL; /* pretend these have no mapping atm. */
84
85 case RTR0MEMOBJTYPE_LOCK:
86 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
87 ? kernel_map
88 : &((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map;
89
90 case RTR0MEMOBJTYPE_RES_VIRT:
91 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
92 ? kernel_map
93 : &((struct proc *)pMem->u.ResVirt.R0Process)->p_vmspace->vm_map;
94
95 case RTR0MEMOBJTYPE_MAPPING:
96 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
97 ? kernel_map
98 : &((struct proc *)pMem->u.Mapping.R0Process)->p_vmspace->vm_map;
99
100 default:
101 return NULL;
102 }
103}
104
105
106DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
107{
108 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)pMem;
109 int rc;
110
111 switch (pMemNetBSD->Core.enmType)
112 {
113 case RTR0MEMOBJTYPE_PAGE:
114 {
115 kmem_free(pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
116 break;
117 }
118 case RTR0MEMOBJTYPE_LOW:
119 case RTR0MEMOBJTYPE_CONT:
120 {
121 /* Unmap */
122 pmap_kremove((vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
123 /* Free the virtual space */
124 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
125 /* Free the physical pages */
126 uvm_pglistfree(&pMemNetBSD->pglist);
127 break;
128 }
129 case RTR0MEMOBJTYPE_PHYS:
130 case RTR0MEMOBJTYPE_PHYS_NC:
131 {
132 /* Free the physical pages */
133 uvm_pglistfree(&pMemNetBSD->pglist);
134 break;
135 }
136 case RTR0MEMOBJTYPE_LOCK:
137 if (pMemNetBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
138 {
139 uvm_map_pageable(
140 &((struct proc *)pMemNetBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map,
141 (vaddr_t)pMemNetBSD->Core.pv,
142 ((vaddr_t)pMemNetBSD->Core.pv) + pMemNetBSD->Core.cb,
143 1, 0);
144 }
145 break;
146 case RTR0MEMOBJTYPE_RES_VIRT:
147 if (pMemNetBSD->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
148 {
149 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
150 }
151 break;
152 case RTR0MEMOBJTYPE_MAPPING:
153 if (pMemNetBSD->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
154 {
155 pmap_kremove((vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
156 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
157 }
158 break;
159
160 default:
161 AssertMsgFailed(("enmType=%d\n", pMemNetBSD->Core.enmType));
162 return VERR_INTERNAL_ERROR;
163 }
164
165 return VINF_SUCCESS;
166}
167
168static int rtR0MemObjNetBSDAllocHelper(PRTR0MEMOBJNETBSD pMemNetBSD, size_t cb, bool fExecutable,
169 paddr_t VmPhysAddrHigh, bool fContiguous)
170{
171 /* Virtual space first */
172 vaddr_t virt = uvm_km_alloc(kernel_map, cb, 0,
173 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
174 if (virt == 0)
175 return VERR_NO_MEMORY;
176
177 struct pglist *rlist = &pMemNetBSD->pglist;
178
179 int nsegs = fContiguous ? 1 : INT_MAX;
180
181 /* Physical pages */
182 if (uvm_pglistalloc(cb, 0, VmPhysAddrHigh,
183 PAGE_SIZE, 0, rlist, nsegs, 1) != 0)
184 {
185 uvm_km_free(kernel_map, virt, cb, UVM_KMF_VAONLY);
186 return VERR_NO_MEMORY;
187 }
188
189 /* Map */
190 struct vm_page *page;
191 vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
192 if (fExecutable)
193 prot |= VM_PROT_EXECUTE;
194 vaddr_t virt2 = virt;
195 TAILQ_FOREACH(page, rlist, pageq.queue)
196 {
197 pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0);
198 virt2 += PAGE_SIZE;
199 }
200
201 pMemNetBSD->Core.pv = (void *)virt;
202 if (fContiguous)
203 {
204 page = TAILQ_FIRST(rlist);
205 pMemNetBSD->Core.u.Cont.Phys = VM_PAGE_TO_PHYS(page);
206 }
207 return VINF_SUCCESS;
208}
209
210DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
211{
212 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
213 RTR0MEMOBJTYPE_PAGE, NULL, cb);
214 if (!pMemNetBSD)
215 return VERR_NO_MEMORY;
216
217 void *pvMem = kmem_alloc(cb, KM_SLEEP);
218 if (RT_UNLIKELY(!pvMem))
219 {
220 rtR0MemObjDelete(&pMemNetBSD->Core);
221 return VERR_NO_PAGE_MEMORY;
222 }
223 if (fExecutable)
224 {
225 pmap_protect(pmap_kernel(), (vaddr_t)pvMem, ((vaddr_t)pvMem) + cb,
226 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
227 }
228
229 pMemNetBSD->Core.pv = pvMem;
230 *ppMem = &pMemNetBSD->Core;
231 return VINF_SUCCESS;
232}
233
234
235DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
236{
237 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
238 RTR0MEMOBJTYPE_LOW, NULL, cb);
239 if (!pMemNetBSD)
240 return VERR_NO_MEMORY;
241
242 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, false);
243 if (rc)
244 {
245 rtR0MemObjDelete(&pMemNetBSD->Core);
246 return rc;
247 }
248
249 *ppMem = &pMemNetBSD->Core;
250 return VINF_SUCCESS;
251}
252
253
254DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
255{
256 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
257 RTR0MEMOBJTYPE_CONT, NULL, cb);
258 if (!pMemNetBSD)
259 return VERR_NO_MEMORY;
260
261 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, true);
262 if (rc)
263 {
264 rtR0MemObjDelete(&pMemNetBSD->Core);
265 return rc;
266 }
267
268 *ppMem = &pMemNetBSD->Core;
269 return VINF_SUCCESS;
270}
271
272
273static int rtR0MemObjNetBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
274 size_t cb,
275 RTHCPHYS PhysHighest, size_t uAlignment,
276 bool fContiguous)
277{
278 paddr_t VmPhysAddrHigh;
279
280 /* create the object. */
281 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
282 enmType, NULL, cb);
283 if (!pMemNetBSD)
284 return VERR_NO_MEMORY;
285
286 if (PhysHighest != NIL_RTHCPHYS)
287 VmPhysAddrHigh = PhysHighest;
288 else
289 VmPhysAddrHigh = ~(paddr_t)0;
290
291 int nsegs = fContiguous ? 1 : INT_MAX;
292
293 int error = uvm_pglistalloc(cb, 0, VmPhysAddrHigh, uAlignment, 0, &pMemNetBSD->pglist, nsegs, 1);
294 if (error)
295 {
296 rtR0MemObjDelete(&pMemNetBSD->Core);
297 return VERR_NO_MEMORY;
298 }
299
300 if (fContiguous)
301 {
302 Assert(enmType == RTR0MEMOBJTYPE_PHYS);
303 const struct vm_page * const pg = TAILQ_FIRST(&pMemNetBSD->pglist);
304 pMemNetBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pg);
305 pMemNetBSD->Core.u.Phys.fAllocated = true;
306 }
307 *ppMem = &pMemNetBSD->Core;
308
309 return VINF_SUCCESS;
310}
311
312
313DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
314{
315 return rtR0MemObjNetBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true);
316}
317
318
319DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
320{
321 return rtR0MemObjNetBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false);
322}
323
324
325DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
326{
327 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
328
329 /* create the object. */
330 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
331 if (!pMemNetBSD)
332 return VERR_NO_MEMORY;
333
334 /* there is no allocation here, it needs to be mapped somewhere first. */
335 pMemNetBSD->Core.u.Phys.fAllocated = false;
336 pMemNetBSD->Core.u.Phys.PhysBase = Phys;
337 pMemNetBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
338 TAILQ_INIT(&pMemNetBSD->pglist);
339 *ppMem = &pMemNetBSD->Core;
340 return VINF_SUCCESS;
341}
342
343
344DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
345{
346 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
347 if (!pMemNetBSD)
348 return VERR_NO_MEMORY;
349
350 int rc = uvm_map_pageable(
351 &((struct proc *)R0Process)->p_vmspace->vm_map,
352 R3Ptr,
353 R3Ptr + cb,
354 0, 0);
355 if (rc)
356 {
357 rtR0MemObjDelete(&pMemNetBSD->Core);
358 return VERR_NO_MEMORY;
359 }
360
361 pMemNetBSD->Core.u.Lock.R0Process = R0Process;
362 *ppMem = &pMemNetBSD->Core;
363 return VINF_SUCCESS;
364}
365
366
367DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
368{
369 /* Kernel memory (always?) wired; all memory allocated by vbox code is? */
370 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, pv, cb);
371 if (!pMemNetBSD)
372 return VERR_NO_MEMORY;
373
374 pMemNetBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
375 pMemNetBSD->Core.pv = pv;
376 *ppMem = &pMemNetBSD->Core;
377 return VINF_SUCCESS;
378}
379
380DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
381{
382 if (pvFixed != (void *)-1)
383 {
384 /* can we support this? or can we assume the virtual space is already reserved? */
385 printf("reserve specified kernel virtual address not supported\n");
386 return VERR_NOT_SUPPORTED;
387 }
388
389 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
390 if (!pMemNetBSD)
391 return VERR_NO_MEMORY;
392
393 vaddr_t virt = uvm_km_alloc(kernel_map, cb, uAlignment,
394 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
395 if (virt == 0)
396 {
397 rtR0MemObjDelete(&pMemNetBSD->Core);
398 return VERR_NO_MEMORY;
399 }
400
401 pMemNetBSD->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
402 pMemNetBSD->Core.pv = (void *)virt;
403 *ppMem = &pMemNetBSD->Core;
404 return VINF_SUCCESS;
405}
406
407
408DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
409{
410 printf("NativeReserveUser\n");
411 return VERR_NOT_SUPPORTED;
412}
413
414
415DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
416 unsigned fProt, size_t offSub, size_t cbSub)
417{
418 if (pvFixed != (void *)-1)
419 {
420 /* can we support this? or can we assume the virtual space is already reserved? */
421 printf("map to specified kernel virtual address not supported\n");
422 return VERR_NOT_SUPPORTED;
423 }
424
425 PRTR0MEMOBJNETBSD pMemNetBSD0 = (PRTR0MEMOBJNETBSD)pMemToMap;
426 if ((pMemNetBSD0->Core.enmType != RTR0MEMOBJTYPE_PHYS)
427 && (pMemNetBSD0->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC))
428 {
429 printf("memory to map is not physical\n");
430 return VERR_NOT_SUPPORTED;
431 }
432 size_t sz = cbSub > 0 ? cbSub : pMemNetBSD0->Core.cb;
433
434 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_MAPPING, NULL, sz);
435
436 vaddr_t virt = uvm_km_alloc(kernel_map, sz, uAlignment,
437 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
438 if (virt == 0)
439 {
440 rtR0MemObjDelete(&pMemNetBSD->Core);
441 return VERR_NO_MEMORY;
442 }
443
444 vm_prot_t prot = 0;
445
446 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
447 prot |= VM_PROT_READ;
448 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
449 prot |= VM_PROT_WRITE;
450 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
451 prot |= VM_PROT_EXECUTE;
452
453 struct vm_page *page;
454 vaddr_t virt2 = virt;
455 size_t map_pos = 0;
456 TAILQ_FOREACH(page, &pMemNetBSD0->pglist, pageq.queue)
457 {
458 if (map_pos >= offSub)
459 {
460 if (cbSub > 0 && (map_pos >= offSub + cbSub))
461 break;
462
463 pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0);
464 virt2 += PAGE_SIZE;
465 }
466 map_pos += PAGE_SIZE;
467 }
468
469 pMemNetBSD->Core.pv = (void *)virt;
470 pMemNetBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
471 *ppMem = &pMemNetBSD->Core;
472
473 return VINF_SUCCESS;
474}
475
476
477DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
478 unsigned fProt, RTR0PROCESS R0Process)
479{
480 printf("NativeMapUser\n");
481 return VERR_NOT_SUPPORTED;
482}
483
484
485DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
486{
487 vm_prot_t ProtectionFlags = 0;
488 vaddr_t AddrStart = (vaddr_t)pMem->pv + offSub;
489 vm_map_t pVmMap = rtR0MemObjNetBSDGetMap(pMem);
490
491 if (!pVmMap)
492 return VERR_NOT_SUPPORTED;
493
494 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
495 ProtectionFlags |= UVM_PROT_R;
496 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
497 ProtectionFlags |= UVM_PROT_W;
498 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
499 ProtectionFlags |= UVM_PROT_X;
500
501 int error = uvm_map_protect(pVmMap, AddrStart, AddrStart + cbSub,
502 ProtectionFlags, 0);
503 if (!error)
504 return VINF_SUCCESS;
505
506 return VERR_NOT_SUPPORTED;
507}
508
509
510DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
511{
512 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)pMem;
513
514 switch (pMemNetBSD->Core.enmType)
515 {
516 case RTR0MEMOBJTYPE_PAGE:
517 case RTR0MEMOBJTYPE_LOW:
518 {
519 vaddr_t va = (vaddr_t)pMemNetBSD->Core.pv + ptoa(iPage);
520 paddr_t pa = 0;
521 pmap_extract(pmap_kernel(), va, &pa);
522 return pa;
523 }
524 case RTR0MEMOBJTYPE_CONT:
525 return pMemNetBSD->Core.u.Cont.Phys + ptoa(iPage);
526 case RTR0MEMOBJTYPE_PHYS:
527 return pMemNetBSD->Core.u.Phys.PhysBase + ptoa(iPage);
528 case RTR0MEMOBJTYPE_PHYS_NC:
529 {
530 struct vm_page *page;
531 size_t i = 0;
532 TAILQ_FOREACH(page, &pMemNetBSD->pglist, pageq.queue)
533 {
534 if (i == iPage)
535 break;
536 i++;
537 }
538 return VM_PAGE_TO_PHYS(page);
539 }
540 case RTR0MEMOBJTYPE_LOCK:
541 case RTR0MEMOBJTYPE_MAPPING:
542 {
543 pmap_t pmap;
544 if (pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
545 pmap = pmap_kernel();
546 else
547 pmap = ((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map.pmap;
548 vaddr_t va = (vaddr_t)pMemNetBSD->Core.pv + ptoa(iPage);
549 paddr_t pa = 0;
550 pmap_extract(pmap, va, &pa);
551 return pa;
552 }
553 case RTR0MEMOBJTYPE_RES_VIRT:
554 return NIL_RTHCPHYS;
555 default:
556 return NIL_RTHCPHYS;
557 }
558}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette