VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c@ 13665

Last change on this file since 13665 was 13009, checked in by vboxsync, 16 years ago

@todo is written always in a / */ or / comment, doxygen won't pick it up otherwise.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Rev
File size: 37.2 KB
Line 
1/* $Revision: 13009 $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Linux.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-linux-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/alloc.h>
39#include <iprt/assert.h>
40#include <iprt/log.h>
41#include <iprt/string.h>
42#include <iprt/process.h>
43#include "internal/memobj.h"
44
45/* early 2.6 kernels */
46#ifndef PAGE_SHARED_EXEC
47# define PAGE_SHARED_EXEC PAGE_SHARED
48#endif
49#ifndef PAGE_READONLY_EXEC
50# define PAGE_READONLY_EXEC PAGE_READONLY
51#endif
52
53
54/*******************************************************************************
55* Structures and Typedefs *
56*******************************************************************************/
57/**
58 * The Darwin version of the memory object structure.
59 */
60typedef struct RTR0MEMOBJLNX
61{
62 /** The core structure. */
63 RTR0MEMOBJINTERNAL Core;
64 /** Set if the allocation is contiguous.
65 * This means it has to be given back as one chunk. */
66 bool fContiguous;
67 /** Set if we've vmap'ed thed memory into ring-0. */
68 bool fMappedToRing0;
69 /** The pages in the apPages array. */
70 size_t cPages;
71 /** Array of struct page pointers. (variable size) */
72 struct page *apPages[1];
73} RTR0MEMOBJLNX, *PRTR0MEMOBJLNX;
74
75
76/**
77 * Helper that converts from a RTR0PROCESS handle to a linux task.
78 *
79 * @returns The corresponding Linux task.
80 * @param R0Process IPRT ring-0 process handle.
81 */
82struct task_struct *rtR0ProcessToLinuxTask(RTR0PROCESS R0Process)
83{
84 /** @todo fix rtR0ProcessToLinuxTask!! */
85 return R0Process == RTR0ProcHandleSelf() ? current : NULL;
86}
87
88
89/**
90 * Compute order. Some functions allocate 2^order pages.
91 *
92 * @returns order.
93 * @param cPages Number of pages.
94 */
95static int rtR0MemObjLinuxOrder(size_t cPages)
96{
97 int iOrder;
98 size_t cTmp;
99
100 for (iOrder = 0, cTmp = cPages; cTmp >>= 1; ++iOrder)
101 ;
102 if (cPages & ~((size_t)1 << iOrder))
103 ++iOrder;
104
105 return iOrder;
106}
107
108
109/**
110 * Converts from RTMEM_PROT_* to Linux PAGE_*.
111 *
112 * @returns Linux page protection constant.
113 * @param fProt The IPRT protection mask.
114 * @param fKernel Whether it applies to kernel or user space.
115 */
116static pgprot_t rtR0MemObjLinuxConvertProt(unsigned fProt, bool fKernel)
117{
118 switch (fProt)
119 {
120 default:
121 AssertMsgFailed(("%#x %d\n", fProt, fKernel));
122 case RTMEM_PROT_NONE:
123 return PAGE_NONE;
124
125 case RTMEM_PROT_READ:
126 return fKernel ? PAGE_KERNEL_RO : PAGE_READONLY;
127
128 case RTMEM_PROT_WRITE:
129 case RTMEM_PROT_WRITE | RTMEM_PROT_READ:
130 return fKernel ? PAGE_KERNEL : PAGE_SHARED;
131
132 case RTMEM_PROT_EXEC:
133 case RTMEM_PROT_EXEC | RTMEM_PROT_READ:
134#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
135 if (fKernel)
136 {
137 pgprot_t fPg = MY_PAGE_KERNEL_EXEC;
138 pgprot_val(fPg) &= ~_PAGE_RW;
139 return fPg;
140 }
141 return PAGE_READONLY_EXEC;
142#else
143 return fKernel ? MY_PAGE_KERNEL_EXEC : PAGE_READONLY_EXEC;
144#endif
145
146 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
147 case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_READ:
148 return fKernel ? MY_PAGE_KERNEL_EXEC : PAGE_SHARED_EXEC;
149 }
150}
151
152
153/**
154 * Internal worker that allocates physical pages and creates the memory object for them.
155 *
156 * @returns IPRT status code.
157 * @param ppMemLnx Where to store the memory object pointer.
158 * @param enmType The object type.
159 * @param cb The number of bytes to allocate.
160 * @param fFlagsLnx The page allocation flags (GPFs).
161 * @param fContiguous Whether the allocation must be contiguous.
162 */
163static int rtR0MemObjLinuxAllocPages(PRTR0MEMOBJLNX *ppMemLnx, RTR0MEMOBJTYPE enmType, size_t cb, unsigned fFlagsLnx, bool fContiguous)
164{
165 size_t iPage;
166 size_t cPages = cb >> PAGE_SHIFT;
167 struct page *paPages;
168
169 /*
170 * Allocate a memory object structure that's large enough to contain
171 * the page pointer array.
172 */
173 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), enmType, NULL, cb);
174 if (!pMemLnx)
175 return VERR_NO_MEMORY;
176 pMemLnx->cPages = cPages;
177
178 /*
179 * Allocate the pages.
180 * For small allocations we'll try contiguous first and then fall back on page by page.
181 */
182#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
183 if ( fContiguous
184 || cb <= PAGE_SIZE * 2)
185 {
186 paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cb >> PAGE_SHIFT));
187 if (paPages)
188 {
189 fContiguous = true;
190 for (iPage = 0; iPage < cPages; iPage++)
191 pMemLnx->apPages[iPage] = &paPages[iPage];
192 }
193 else if (fContiguous)
194 {
195 rtR0MemObjDelete(&pMemLnx->Core);
196 return VERR_NO_MEMORY;
197 }
198 }
199
200 if (!fContiguous)
201 {
202 for (iPage = 0; iPage < cPages; iPage++)
203 {
204 pMemLnx->apPages[iPage] = alloc_page(fFlagsLnx);
205 if (RT_UNLIKELY(!pMemLnx->apPages[iPage]))
206 {
207 while (iPage-- > 0)
208 __free_page(pMemLnx->apPages[iPage]);
209 rtR0MemObjDelete(&pMemLnx->Core);
210 return VERR_NO_MEMORY;
211 }
212 }
213 }
214
215#else /* < 2.4.22 */
216 /** @todo figure out why we didn't allocate page-by-page on 2.4.21 and older... */
217 paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cb >> PAGE_SHIFT));
218 if (!paPages)
219 {
220 rtR0MemObjDelete(&pMemLnx->Core);
221 return VERR_NO_MEMORY;
222 }
223 for (iPage = 0; iPage < cPages; iPage++)
224 {
225 pMemLnx->apPages[iPage] = &paPages[iPage];
226 MY_SET_PAGES_EXEC(pMemLnx->apPages[iPage], 1);
227 if (PageHighMem(pMemLnx->apPages[iPage]))
228 BUG();
229 }
230
231 fContiguous = true;
232#endif /* < 2.4.22 */
233 pMemLnx->fContiguous = fContiguous;
234
235 /*
236 * Reserve the pages.
237 */
238 for (iPage = 0; iPage < cPages; iPage++)
239 SetPageReserved(pMemLnx->apPages[iPage]);
240
241 *ppMemLnx = pMemLnx;
242 return VINF_SUCCESS;
243}
244
245
246/**
247 * Frees the physical pages allocated by the rtR0MemObjLinuxAllocPages() call.
248 *
249 * This method does NOT free the object.
250 *
251 * @param pMemLnx The object which physical pages should be freed.
252 */
253static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx)
254{
255 size_t iPage = pMemLnx->cPages;
256 if (iPage > 0)
257 {
258 /*
259 * Restore the page flags.
260 */
261 while (iPage-- > 0)
262 {
263 ClearPageReserved(pMemLnx->apPages[iPage]);
264#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
265#else
266 MY_SET_PAGES_NOEXEC(pMemLnx->apPages[iPage], 1);
267#endif
268 }
269
270 /*
271 * Free the pages.
272 */
273#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
274 if (!pMemLnx->fContiguous)
275 {
276 iPage = pMemLnx->cPages;
277 while (iPage-- > 0)
278 __free_page(pMemLnx->apPages[iPage]);
279 }
280 else
281#endif
282 __free_pages(pMemLnx->apPages[0], rtR0MemObjLinuxOrder(pMemLnx->cPages));
283
284 pMemLnx->cPages = 0;
285 }
286}
287
288
289/**
290 * Maps the allocation into ring-0.
291 *
292 * This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
293 *
294 * Contiguous mappings that isn't in 'high' memory will already be mapped into kernel
295 * space, so we'll use that mapping if possible. If execute access is required, we'll
296 * play safe and do our own mapping.
297 *
298 * @returns IPRT status code.
299 * @param pMemLnx The linux memory object to map.
300 * @param fExecutable Whether execute access is required.
301 */
302static int rtR0MemObjLinuxVMap(PRTR0MEMOBJLNX pMemLnx, bool fExecutable)
303{
304 int rc = VINF_SUCCESS;
305
306 /*
307 * Choose mapping strategy.
308 */
309 bool fMustMap = fExecutable
310 || !pMemLnx->fContiguous;
311 if (!fMustMap)
312 {
313 size_t iPage = pMemLnx->cPages;
314 while (iPage-- > 0)
315 if (PageHighMem(pMemLnx->apPages[iPage]))
316 {
317 fMustMap = true;
318 break;
319 }
320 }
321
322 Assert(!pMemLnx->Core.pv);
323 Assert(!pMemLnx->fMappedToRing0);
324
325 if (fMustMap)
326 {
327 /*
328 * Use vmap - 2.4.22 and later.
329 */
330#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
331 pgprot_t fPg;
332 pgprot_val(fPg) = _PAGE_PRESENT | _PAGE_RW;
333# ifdef _PAGE_NX
334 if (!fExecutable)
335 pgprot_val(fPg) |= _PAGE_NX;
336# endif
337
338# ifdef VM_MAP
339 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
340# else
341 pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_ALLOC, fPg);
342# endif
343 if (pMemLnx->Core.pv)
344 pMemLnx->fMappedToRing0 = true;
345 else
346 rc = VERR_MAP_FAILED;
347#else /* < 2.4.22 */
348 rc = VERR_NOT_SUPPORTED;
349#endif
350 }
351 else
352 {
353 /*
354 * Use the kernel RAM mapping.
355 */
356 pMemLnx->Core.pv = phys_to_virt(page_to_phys(pMemLnx->apPages[0]));
357 Assert(pMemLnx->Core.pv);
358 }
359
360 return rc;
361}
362
363
364/**
365 * Undos what rtR0MemObjLinuxVMap() did.
366 *
367 * @param pMemLnx The linux memory object.
368 */
369static void rtR0MemObjLinuxVUnmap(PRTR0MEMOBJLNX pMemLnx)
370{
371#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
372 if (pMemLnx->fMappedToRing0)
373 {
374 Assert(pMemLnx->Core.pv);
375 vunmap(pMemLnx->Core.pv);
376 pMemLnx->fMappedToRing0 = false;
377 }
378#else /* < 2.4.22 */
379 Assert(!pMemLnx->fMappedToRing0);
380#endif
381 pMemLnx->Core.pv = NULL;
382}
383
384
385int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
386{
387 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
388
389 /*
390 * Release any memory that we've allocated or locked.
391 */
392 switch (pMemLnx->Core.enmType)
393 {
394 case RTR0MEMOBJTYPE_LOW:
395 case RTR0MEMOBJTYPE_PAGE:
396 case RTR0MEMOBJTYPE_CONT:
397 case RTR0MEMOBJTYPE_PHYS:
398 case RTR0MEMOBJTYPE_PHYS_NC:
399 rtR0MemObjLinuxVUnmap(pMemLnx);
400 rtR0MemObjLinuxFreePages(pMemLnx);
401 break;
402
403 case RTR0MEMOBJTYPE_LOCK:
404 if (pMemLnx->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
405 {
406 size_t iPage;
407 struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process);
408 Assert(pTask);
409 if (pTask && pTask->mm)
410 down_read(&pTask->mm->mmap_sem);
411
412 iPage = pMemLnx->cPages;
413 while (iPage-- > 0)
414 {
415 if (!PageReserved(pMemLnx->apPages[iPage]))
416 SetPageDirty(pMemLnx->apPages[iPage]);
417 page_cache_release(pMemLnx->apPages[iPage]);
418 }
419
420 if (pTask && pTask->mm)
421 up_read(&pTask->mm->mmap_sem);
422 }
423 else
424 AssertFailed(); /* not implemented for R0 */
425 break;
426
427 case RTR0MEMOBJTYPE_RES_VIRT:
428 Assert(pMemLnx->Core.pv);
429 if (pMemLnx->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS)
430 {
431 struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process);
432 Assert(pTask);
433 if (pTask && pTask->mm)
434 {
435 down_write(&pTask->mm->mmap_sem);
436 MY_DO_MUNMAP(pTask->mm, (unsigned long)pMemLnx->Core.pv, pMemLnx->Core.cb);
437 up_write(&pTask->mm->mmap_sem);
438 }
439 }
440 else
441 {
442 vunmap(pMemLnx->Core.pv);
443
444 Assert(pMemLnx->cPages == 1 && pMemLnx->apPages[0] != NULL);
445 __free_page(pMemLnx->apPages[0]);
446 pMemLnx->apPages[0] = NULL;
447 pMemLnx->cPages = 0;
448 }
449 pMemLnx->Core.pv = NULL;
450 break;
451
452 case RTR0MEMOBJTYPE_MAPPING:
453 Assert(pMemLnx->cPages == 0); Assert(pMemLnx->Core.pv);
454 if (pMemLnx->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS)
455 {
456 struct task_struct *pTask = rtR0ProcessToLinuxTask(pMemLnx->Core.u.Lock.R0Process);
457 Assert(pTask);
458 if (pTask && pTask->mm)
459 {
460 down_write(&pTask->mm->mmap_sem);
461 MY_DO_MUNMAP(pTask->mm, (unsigned long)pMemLnx->Core.pv, pMemLnx->Core.cb);
462 up_write(&pTask->mm->mmap_sem);
463 }
464 }
465 else
466 vunmap(pMemLnx->Core.pv);
467 pMemLnx->Core.pv = NULL;
468 break;
469
470 default:
471 AssertMsgFailed(("enmType=%d\n", pMemLnx->Core.enmType));
472 return VERR_INTERNAL_ERROR;
473 }
474 return VINF_SUCCESS;
475}
476
477
478int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
479{
480 PRTR0MEMOBJLNX pMemLnx;
481 int rc;
482
483#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
484 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_PAGE, cb, GFP_HIGHUSER, false /* non-contiguous */);
485#else
486 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_PAGE, cb, GFP_USER, false /* non-contiguous */);
487#endif
488 if (RT_SUCCESS(rc))
489 {
490 rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
491 if (RT_SUCCESS(rc))
492 {
493 *ppMem = &pMemLnx->Core;
494 return rc;
495 }
496
497 rtR0MemObjLinuxFreePages(pMemLnx);
498 rtR0MemObjDelete(&pMemLnx->Core);
499 }
500
501 return rc;
502}
503
504
505int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
506{
507 PRTR0MEMOBJLNX pMemLnx;
508 int rc;
509
510#ifdef RT_ARCH_AMD64
511# ifdef GFP_DMA32
512 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, GFP_DMA32, false /* non-contiguous */);
513 if (RT_FAILURE(rc))
514# endif
515 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, GFP_DMA, false /* non-contiguous */);
516#else
517 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, GFP_USER, false /* non-contiguous */);
518#endif
519 if (RT_SUCCESS(rc))
520 {
521 rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
522 if (RT_SUCCESS(rc))
523 {
524 *ppMem = &pMemLnx->Core;
525 return rc;
526 }
527
528 rtR0MemObjLinuxFreePages(pMemLnx);
529 rtR0MemObjDelete(&pMemLnx->Core);
530 }
531
532 return rc;
533}
534
535
536int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
537{
538 PRTR0MEMOBJLNX pMemLnx;
539 int rc;
540
541#ifdef RT_ARCH_AMD64
542# ifdef GFP_DMA32
543 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, GFP_DMA32, true /* contiguous */);
544 if (RT_FAILURE(rc))
545# endif
546 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, GFP_DMA, true /* contiguous */);
547#else
548 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, GFP_USER, true /* contiguous */);
549#endif
550 if (RT_SUCCESS(rc))
551 {
552 rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
553 if (RT_SUCCESS(rc))
554 {
555#ifdef RT_STRICT
556 size_t iPage = pMemLnx->cPages;
557 while (iPage-- > 0)
558 Assert(page_to_phys(pMemLnx->apPages[iPage]) < _4G);
559#endif
560 pMemLnx->Core.u.Cont.Phys = page_to_phys(pMemLnx->apPages[0]);
561 *ppMem = &pMemLnx->Core;
562 return rc;
563 }
564
565 rtR0MemObjLinuxFreePages(pMemLnx);
566 rtR0MemObjDelete(&pMemLnx->Core);
567 }
568
569 return rc;
570}
571
572
573/**
574 * Worker for rtR0MemObjLinuxAllocPhysSub that tries one allocation strategy.
575 *
576 * @returns IPRT status.
577 * @param ppMemLnx Where to
578 * @param enmType The object type.
579 * @param cb The size of the allocation.
580 * @param PhysHighest See rtR0MemObjNativeAllocPhys.
581 * @param fGfp The Linux GFP flags to use for the allocation.
582 */
583static int rtR0MemObjLinuxAllocPhysSub2(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType, size_t cb, RTHCPHYS PhysHighest, unsigned fGfp)
584{
585 PRTR0MEMOBJLNX pMemLnx;
586 int rc;
587
588 rc = rtR0MemObjLinuxAllocPages(&pMemLnx, enmType, cb, fGfp,
589 enmType == RTR0MEMOBJTYPE_PHYS /* contiguous / non-contiguous */);
590 if (RT_FAILURE(rc))
591 return rc;
592
593 /*
594 * Check the addresses if necessary. (Can be optimized a bit for PHYS.)
595 */
596 if (PhysHighest != NIL_RTHCPHYS)
597 {
598 size_t iPage = pMemLnx->cPages;
599 while (iPage-- > 0)
600 if (page_to_phys(pMemLnx->apPages[iPage]) >= PhysHighest)
601 {
602 rtR0MemObjLinuxFreePages(pMemLnx);
603 rtR0MemObjDelete(&pMemLnx->Core);
604 return VERR_NO_MEMORY;
605 }
606 }
607
608 /*
609 * Complete the object.
610 */
611 if (enmType == RTR0MEMOBJTYPE_PHYS)
612 {
613 pMemLnx->Core.u.Phys.PhysBase = page_to_phys(pMemLnx->apPages[0]);
614 pMemLnx->Core.u.Phys.fAllocated = true;
615 }
616 *ppMem = &pMemLnx->Core;
617 return rc;
618}
619
620
621/**
622 * Worker for rtR0MemObjNativeAllocPhys and rtR0MemObjNativeAllocPhysNC.
623 *
624 * @returns IPRT status.
625 * @param ppMem Where to store the memory object pointer on success.
626 * @param enmType The object type.
627 * @param cb The size of the allocation.
628 * @param PhysHighest See rtR0MemObjNativeAllocPhys.
629 */
630static int rtR0MemObjLinuxAllocPhysSub(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType, size_t cb, RTHCPHYS PhysHighest)
631{
632 int rc;
633
634 /*
635 * There are two clear cases and that's the <=16MB and anything-goes ones.
636 * When the physical address limit is somewhere inbetween those two we'll
637 * just have to try, starting with HIGHUSER and working our way thru the
638 * different types, hoping we'll get lucky.
639 *
640 * We should probably move this physical address restriction logic up to
641 * the page alloc function as it would be more efficient there. But since
642 * we don't expect this to be a performance issue just yet it can wait.
643 */
644 if (PhysHighest == NIL_RTHCPHYS)
645 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, PhysHighest, GFP_HIGHUSER);
646 else if (PhysHighest <= _1M * 16)
647 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, PhysHighest, GFP_DMA);
648 else
649 {
650 rc = VERR_NO_MEMORY;
651 if (RT_FAILURE(rc))
652 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, PhysHighest, GFP_HIGHUSER);
653 if (RT_FAILURE(rc))
654 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, PhysHighest, GFP_USER);
655#ifdef GFP_DMA32
656 if (RT_FAILURE(rc))
657 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, PhysHighest, GFP_DMA32);
658#endif
659 if (RT_FAILURE(rc))
660 rc = rtR0MemObjLinuxAllocPhysSub2(ppMem, enmType, cb, PhysHighest, GFP_DMA);
661 }
662 return rc;
663}
664
665
666int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
667{
668 return rtR0MemObjLinuxAllocPhysSub(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest);
669}
670
671
672int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
673{
674 return rtR0MemObjLinuxAllocPhysSub(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest);
675}
676
677
678int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
679{
680 /*
681 * All we need to do here is to validate that we can use
682 * ioremap on the specified address (32/64-bit dma_addr_t).
683 */
684 PRTR0MEMOBJLNX pMemLnx;
685 dma_addr_t PhysAddr = Phys;
686 AssertMsgReturn(PhysAddr == Phys, ("%#llx\n", (unsigned long long)Phys), VERR_ADDRESS_TOO_BIG);
687
688 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_PHYS, NULL, cb);
689 if (!pMemLnx)
690 return VERR_NO_MEMORY;
691
692 pMemLnx->Core.u.Phys.PhysBase = PhysAddr;
693 pMemLnx->Core.u.Phys.fAllocated = false;
694 Assert(!pMemLnx->cPages);
695 *ppMem = &pMemLnx->Core;
696 return VINF_SUCCESS;
697}
698
699
700int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
701{
702 const int cPages = cb >> PAGE_SHIFT;
703 struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
704 struct vm_area_struct **papVMAs;
705 PRTR0MEMOBJLNX pMemLnx;
706 int rc = VERR_NO_MEMORY;
707
708 /*
709 * Check for valid task and size overflows.
710 */
711 if (!pTask)
712 return VERR_NOT_SUPPORTED;
713 if (((size_t)cPages << PAGE_SHIFT) != cb)
714 return VERR_OUT_OF_RANGE;
715
716 /*
717 * Allocate the memory object and a temporary buffer for the VMAs.
718 */
719 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
720 if (!pMemLnx)
721 return VERR_NO_MEMORY;
722
723 papVMAs = (struct vm_area_struct **)RTMemAlloc(sizeof(*papVMAs) * cPages);
724 if (papVMAs)
725 {
726 down_read(&pTask->mm->mmap_sem);
727
728 /*
729 * Get user pages.
730 */
731 rc = get_user_pages(pTask, /* Task for fault acounting. */
732 pTask->mm, /* Whose pages. */
733 R3Ptr, /* Where from. */
734 cPages, /* How many pages. */
735 1, /* Write to memory. */
736 0, /* force. */
737 &pMemLnx->apPages[0], /* Page array. */
738 papVMAs); /* vmas */
739 if (rc == cPages)
740 {
741 /*
742 * Flush dcache (required?) and protect against fork.
743 */
744 /** @todo The Linux fork() protection will require more work if this API
745 * is to be used for anything but locking VM pages. */
746 while (rc-- > 0)
747 {
748 flush_dcache_page(pMemLnx->apPages[rc]);
749 papVMAs[rc]->vm_flags |= VM_DONTCOPY;
750 }
751
752 up_read(&pTask->mm->mmap_sem);
753
754 RTMemFree(papVMAs);
755
756 pMemLnx->Core.u.Lock.R0Process = R0Process;
757 pMemLnx->cPages = cPages;
758 Assert(!pMemLnx->fMappedToRing0);
759 *ppMem = &pMemLnx->Core;
760
761 return VINF_SUCCESS;
762 }
763
764 /*
765 * Failed - we need to unlock any pages that we succeeded to lock.
766 */
767 while (rc-- > 0)
768 {
769 if (!PageReserved(pMemLnx->apPages[rc]))
770 SetPageDirty(pMemLnx->apPages[rc]);
771 page_cache_release(pMemLnx->apPages[rc]);
772 }
773
774 up_read(&pTask->mm->mmap_sem);
775
776 RTMemFree(papVMAs);
777 rc = VERR_LOCK_FAILED;
778 }
779
780 rtR0MemObjDelete(&pMemLnx->Core);
781 return rc;
782}
783
784
785int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
786{
787 /* What is there to lock? Should/Can we fake this? */
788 return VERR_NOT_SUPPORTED;
789}
790
791
792int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
793{
794#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
795 const size_t cPages = cb >> PAGE_SHIFT;
796 struct page *pDummyPage;
797 struct page **papPages;
798
799 /* check for unsupported stuff. */
800 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
801 AssertMsgReturn(uAlignment <= PAGE_SIZE, ("%#x\n", uAlignment), VERR_NOT_SUPPORTED);
802
803 /*
804 * Allocate a dummy page and create a page pointer array for vmap such that
805 * the dummy page is mapped all over the reserved area.
806 */
807 pDummyPage = alloc_page(GFP_HIGHUSER);
808 if (!pDummyPage)
809 return VERR_NO_MEMORY;
810 papPages = RTMemAlloc(sizeof(*papPages) * cPages);
811 if (papPages)
812 {
813 void *pv;
814 size_t iPage = cPages;
815 while (iPage-- > 0)
816 papPages[iPage] = pDummyPage;
817# ifdef VM_MAP
818 pv = vmap(papPages, cPages, VM_MAP, PAGE_KERNEL_RO);
819# else
820 pv = vmap(papPages, cPages, VM_ALLOC, PAGE_KERNEL_RO);
821# endif
822 RTMemFree(papPages);
823 if (pv)
824 {
825 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
826 if (pMemLnx)
827 {
828 pMemLnx->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
829 pMemLnx->cPages = 1;
830 pMemLnx->apPages[0] = pDummyPage;
831 *ppMem = &pMemLnx->Core;
832 return VINF_SUCCESS;
833 }
834 vunmap(pv);
835 }
836 }
837 __free_page(pDummyPage);
838 return VERR_NO_MEMORY;
839
840#else /* < 2.4.22 */
841 /*
842 * Could probably use ioremap here, but the caller is in a better position than us
843 * to select some safe physical memory.
844 */
845 return VERR_NOT_SUPPORTED;
846#endif
847}
848
849
850/**
851 * Worker for rtR0MemObjNativeReserveUser and rtR0MemObjNativerMapUser that creates
852 * an empty user space mapping.
853 *
854 * The caller takes care of acquiring the mmap_sem of the task.
855 *
856 * @returns Pointer to the mapping.
857 * (void *)-1 on failure.
858 * @param R3PtrFixed (RTR3PTR)-1 if anywhere, otherwise a specific location.
859 * @param cb The size of the mapping.
860 * @param uAlignment The alignment of the mapping.
861 * @param pTask The Linux task to create this mapping in.
862 * @param fProt The RTMEM_PROT_* mask.
863 */
864static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, struct task_struct *pTask, unsigned fProt)
865{
866 unsigned fLnxProt;
867 unsigned long ulAddr;
868
869 /*
870 * Convert from IPRT protection to mman.h PROT_ and call do_mmap.
871 */
872 fProt &= (RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
873 if (fProt == RTMEM_PROT_NONE)
874 fLnxProt = PROT_NONE;
875 else
876 {
877 fLnxProt = 0;
878 if (fProt & RTMEM_PROT_READ)
879 fLnxProt |= PROT_READ;
880 if (fProt & RTMEM_PROT_WRITE)
881 fLnxProt |= PROT_WRITE;
882 if (fProt & RTMEM_PROT_EXEC)
883 fLnxProt |= PROT_EXEC;
884 }
885
886 if (R3PtrFixed != (RTR3PTR)-1)
887 ulAddr = do_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
888 else
889 {
890 ulAddr = do_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
891 if ( !(ulAddr & ~PAGE_MASK)
892 && (ulAddr & (uAlignment - 1)))
893 {
894 /** @todo implement uAlignment properly... We'll probably need to make some dummy mappings to fill
895 * up alignment gaps. This is of course complicated by fragmentation (which we might have cause
896 * ourselves) and further by there begin two mmap strategies (top / bottom). */
897 /* For now, just ignore uAlignment requirements... */
898 }
899 }
900 if (ulAddr & ~PAGE_MASK) /* ~PAGE_MASK == PAGE_OFFSET_MASK */
901 return (void *)-1;
902 return (void *)ulAddr;
903}
904
905
906int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
907{
908 PRTR0MEMOBJLNX pMemLnx;
909 void *pv;
910 struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
911 if (!pTask)
912 return VERR_NOT_SUPPORTED;
913
914 /*
915 * Let rtR0MemObjLinuxDoMmap do the difficult bits.
916 */
917 down_write(&pTask->mm->mmap_sem);
918 pv = rtR0MemObjLinuxDoMmap(R3PtrFixed, cb, uAlignment, pTask, RTMEM_PROT_NONE);
919 up_write(&pTask->mm->mmap_sem);
920 if (pv == (void *)-1)
921 return VERR_NO_MEMORY;
922
923 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
924 if (!pMemLnx)
925 {
926 down_write(&pTask->mm->mmap_sem);
927 MY_DO_MUNMAP(pTask->mm, (unsigned long)pv, cb);
928 up_write(&pTask->mm->mmap_sem);
929 return VERR_NO_MEMORY;
930 }
931
932 pMemLnx->Core.u.ResVirt.R0Process = R0Process;
933 *ppMem = &pMemLnx->Core;
934 return VINF_SUCCESS;
935}
936
937
938int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
939{
940 int rc = VERR_NO_MEMORY;
941 PRTR0MEMOBJLNX pMemLnxToMap = (PRTR0MEMOBJLNX)pMemToMap;
942 PRTR0MEMOBJLNX pMemLnx;
943
944 /* Fail if requested to do something we can't. */
945 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
946 AssertMsgReturn(uAlignment <= PAGE_SIZE, ("%#x\n", uAlignment), VERR_NOT_SUPPORTED);
947
948 /*
949 * Create the IPRT memory object.
950 */
951 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_MAPPING, NULL, pMemLnxToMap->Core.cb);
952 if (pMemLnx)
953 {
954 if (pMemLnxToMap->cPages)
955 {
956#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
957 /*
958 * Use vmap - 2.4.22 and later.
959 */
960 pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, true /* kernel */);
961# ifdef VM_MAP
962 pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_MAP, fPg);
963# else
964 pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_ALLOC, fPg);
965# endif
966 if (pMemLnx->Core.pv)
967 {
968 pMemLnx->fMappedToRing0 = true;
969 rc = VINF_SUCCESS;
970 }
971 else
972 rc = VERR_MAP_FAILED;
973
974#else /* < 2.4.22 */
975 /*
976 * Only option here is to share mappings if possible and forget about fProt.
977 */
978 if (rtR0MemObjIsRing3(pMemToMap))
979 rc = VERR_NOT_SUPPORTED;
980 else
981 {
982 rc = VINF_SUCCESS;
983 if (!pMemLnxToMap->Core.pv)
984 rc = rtR0MemObjLinuxVMap(pMemLnxToMap, !!(fProt & RTMEM_PROT_EXEC));
985 if (RT_SUCCESS(rc))
986 {
987 Assert(pMemLnxToMap->Core.pv);
988 pMemLnx->Core.pv = pMemLnxToMap->Core.pv;
989 }
990 }
991#endif
992 }
993 else
994 {
995 /*
996 * MMIO / physical memory.
997 */
998 Assert(pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS && !pMemLnxToMap->Core.u.Phys.fAllocated);
999 pMemLnx->Core.pv = ioremap(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb);
1000 if (pMemLnx->Core.pv)
1001 {
1002 /** @todo fix protection. */
1003 rc = VINF_SUCCESS;
1004 }
1005 }
1006 if (RT_SUCCESS(rc))
1007 {
1008 pMemLnx->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1009 *ppMem = &pMemLnx->Core;
1010 return VINF_SUCCESS;
1011 }
1012 rtR0MemObjDelete(&pMemLnx->Core);
1013 }
1014
1015 return rc;
1016}
1017
1018
1019int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
1020{
1021 struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
1022 PRTR0MEMOBJLNX pMemLnxToMap = (PRTR0MEMOBJLNX)pMemToMap;
1023 int rc = VERR_NO_MEMORY;
1024 PRTR0MEMOBJLNX pMemLnx;
1025
1026 /*
1027 * Check for restrictions.
1028 */
1029 if (!pTask)
1030 return VERR_NOT_SUPPORTED;
1031
1032 /*
1033 * Create the IPRT memory object.
1034 */
1035 pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_MAPPING, NULL, pMemLnxToMap->Core.cb);
1036 if (pMemLnx)
1037 {
1038 /*
1039 * Allocate user space mapping.
1040 */
1041 void *pv;
1042 down_write(&pTask->mm->mmap_sem);
1043 pv = rtR0MemObjLinuxDoMmap(R3PtrFixed, pMemLnxToMap->Core.cb, uAlignment, pTask, fProt);
1044 if (pv != (void *)-1)
1045 {
1046 /*
1047 * Map page by page into the mmap area.
1048 * This is generic, paranoid and not very efficient.
1049 */
1050 pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, false /* user */);
1051 unsigned long ulAddrCur = (unsigned long)pv;
1052 const size_t cPages = pMemLnxToMap->Core.cb >> PAGE_SHIFT;
1053 size_t iPage;
1054 rc = 0;
1055 if (pMemLnxToMap->cPages)
1056 {
1057 for (iPage = 0; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE)
1058 {
1059#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1060 struct vm_area_struct *vma = find_vma(pTask->mm, ulAddrCur); /* this is probably the same for all the pages... */
1061 AssertBreakStmt(vma, rc = VERR_INTERNAL_ERROR);
1062#endif
1063
1064#if defined(VBOX_USE_INSERT_PAGE) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1065 rc = vm_insert_page(vma, ulAddrCur, pMemLnxToMap->apPages[iPage]);
1066 /** @todo nike: not sure if really needed to have this flag */
1067 vma->vm_flags |= VM_RESERVED;
1068#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
1069 rc = remap_pfn_range(vma, ulAddrCur, page_to_pfn(pMemLnxToMap->apPages[iPage]), PAGE_SIZE, fPg);
1070#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1071 rc = remap_page_range(vma, ulAddrCur, page_to_phys(pMemLnxToMap->apPages[iPage]), PAGE_SIZE, fPg);
1072#else /* 2.4 */
1073 rc = remap_page_range(ulAddrCur, page_to_phys(pMemLnxToMap->apPages[iPage]), PAGE_SIZE, fPg);
1074#endif
1075 if (rc)
1076 break;
1077 }
1078 }
1079 else
1080 {
1081 RTHCPHYS Phys;
1082 if (pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS)
1083 Phys = pMemLnxToMap->Core.u.Phys.PhysBase;
1084 else if (pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_CONT)
1085 Phys = pMemLnxToMap->Core.u.Cont.Phys;
1086 else
1087 {
1088 AssertMsgFailed(("%d\n", pMemLnxToMap->Core.enmType));
1089 Phys = NIL_RTHCPHYS;
1090 }
1091 if (Phys != NIL_RTHCPHYS)
1092 {
1093 for (iPage = 0; iPage < cPages; iPage++, ulAddrCur += PAGE_SIZE, Phys += PAGE_SIZE)
1094 {
1095#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1096 struct vm_area_struct *vma = find_vma(pTask->mm, ulAddrCur); /* this is probably the same for all the pages... */
1097 AssertBreakStmt(vma, rc = VERR_INTERNAL_ERROR);
1098#endif
1099
1100#if defined(VBOX_USE_INSERT_PAGE) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
1101 rc = vm_insert_page(vma, ulAddrCur, pMemLnxToMap->apPages[iPage]);
1102 /** @todo nike: not sure if really needed to have this flag */
1103 vma->vm_flags |= VM_RESERVED;
1104#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
1105 rc = remap_pfn_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg);
1106#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(HAVE_26_STYLE_REMAP_PAGE_RANGE)
1107 rc = remap_page_range(vma, ulAddrCur, Phys, PAGE_SIZE, fPg);
1108#else /* 2.4 */
1109 rc = remap_page_range(ulAddrCur, Phys, PAGE_SIZE, fPg);
1110#endif
1111 if (rc)
1112 break;
1113 }
1114 }
1115 }
1116 if (!rc)
1117 {
1118 up_write(&pTask->mm->mmap_sem);
1119
1120 pMemLnx->Core.pv = pv;
1121 pMemLnx->Core.u.Mapping.R0Process = R0Process;
1122 *ppMem = &pMemLnx->Core;
1123 return VINF_SUCCESS;
1124 }
1125
1126 /*
1127 * Bail out.
1128 */
1129 MY_DO_MUNMAP(pTask->mm, (unsigned long)pv, pMemLnxToMap->Core.cb);
1130 if (rc != VERR_INTERNAL_ERROR)
1131 rc = VERR_NO_MEMORY;
1132 }
1133
1134 up_write(&pTask->mm->mmap_sem);
1135
1136 rtR0MemObjDelete(&pMemLnx->Core);
1137 }
1138
1139 return rc;
1140}
1141
1142
1143RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1144{
1145 PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
1146
1147 if (pMemLnx->cPages)
1148 return page_to_phys(pMemLnx->apPages[iPage]);
1149
1150 switch (pMemLnx->Core.enmType)
1151 {
1152 case RTR0MEMOBJTYPE_CONT:
1153 return pMemLnx->Core.u.Cont.Phys + (iPage << PAGE_SHIFT);
1154
1155 case RTR0MEMOBJTYPE_PHYS:
1156 return pMemLnx->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
1157
1158 /* the parent knows */
1159 case RTR0MEMOBJTYPE_MAPPING:
1160 return rtR0MemObjNativeGetPagePhysAddr(pMemLnx->Core.uRel.Child.pParent, iPage);
1161
1162 /* cPages > 0 */
1163 case RTR0MEMOBJTYPE_LOW:
1164 case RTR0MEMOBJTYPE_LOCK:
1165 case RTR0MEMOBJTYPE_PHYS_NC:
1166 case RTR0MEMOBJTYPE_PAGE:
1167 default:
1168 AssertMsgFailed(("%d\n", pMemLnx->Core.enmType));
1169 /* fall thru */
1170
1171 case RTR0MEMOBJTYPE_RES_VIRT:
1172 return NIL_RTHCPHYS;
1173 }
1174}
1175
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette