VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c@ 96407

Last change on this file since 96407 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 42.4 KB
Line 
1/* $Id: memobj-r0drv-solaris.c 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Solaris.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "the-solaris-kernel.h"
42#include "internal/iprt.h"
43#include <iprt/memobj.h>
44
45#include <iprt/asm.h>
46#include <iprt/assert.h>
47#include <iprt/err.h>
48#include <iprt/log.h>
49#include <iprt/mem.h>
50#include <iprt/param.h>
51#include <iprt/process.h>
52#include "internal/memobj.h"
53#include "memobj-r0drv-solaris.h"
54
55
56/*********************************************************************************************************************************
57* Defined Constants And Macros *
58*********************************************************************************************************************************/
59#define SOL_IS_KRNL_ADDR(vx) ((uintptr_t)(vx) >= kernelbase)
60
61
62/*********************************************************************************************************************************
63* Structures and Typedefs *
64*********************************************************************************************************************************/
65/**
66 * The Solaris version of the memory object structure.
67 */
68typedef struct RTR0MEMOBJSOL
69{
70 /** The core structure. */
71 RTR0MEMOBJINTERNAL Core;
72 /** Pointer to kernel memory cookie. */
73 ddi_umem_cookie_t Cookie;
74 /** Shadow locked pages. */
75 void *pvHandle;
76 /** Access during locking. */
77 int fAccess;
78 /** Set if large pages are involved in an RTR0MEMOBJTYPE_PHYS
79 * allocation. */
80 bool fLargePage;
81 /** Whether we have individual pages or a kernel-mapped virtual memory block in
82 * an RTR0MEMOBJTYPE_PHYS_NC allocation. */
83 bool fIndivPages;
84} RTR0MEMOBJSOL, *PRTR0MEMOBJSOL;
85
86
87/*********************************************************************************************************************************
88* Global Variables *
89*********************************************************************************************************************************/
90static vnode_t g_PageVnode;
91static kmutex_t g_OffsetMtx;
92static u_offset_t g_offPage;
93
94static vnode_t g_LargePageVnode;
95static kmutex_t g_LargePageOffsetMtx;
96static u_offset_t g_offLargePage;
97static bool g_fLargePageNoReloc;
98
99
100/**
101 * Returns the physical address for a virtual address.
102 *
103 * @param pv The virtual address.
104 *
105 * @returns The physical address corresponding to @a pv.
106 */
107static uint64_t rtR0MemObjSolVirtToPhys(void *pv)
108{
109 struct hat *pHat = NULL;
110 pfn_t PageFrameNum = 0;
111 uintptr_t uVirtAddr = (uintptr_t)pv;
112
113 if (SOL_IS_KRNL_ADDR(pv))
114 pHat = kas.a_hat;
115 else
116 {
117 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
118 AssertRelease(pProcess);
119 pHat = pProcess->p_as->a_hat;
120 }
121
122 PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK));
123 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv));
124 return (((uint64_t)PageFrameNum << PAGE_SHIFT) | (uVirtAddr & PAGE_OFFSET_MASK));
125}
126
127
128/**
129 * Returns the physical address for a page.
130 *
131 * @param pPage Pointer to the page.
132 *
133 * @returns The physical address for a page.
134 */
135static inline uint64_t rtR0MemObjSolPagePhys(page_t *pPage)
136{
137 AssertPtr(pPage);
138 pfn_t PageFrameNum = page_pptonum(pPage);
139 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPagePhys failed pPage=%p\n"));
140 return (uint64_t)PageFrameNum << PAGE_SHIFT;
141}
142
143
144/**
145 * Allocates one page.
146 *
147 * @param virtAddr The virtual address to which this page maybe mapped in
148 * the future.
149 *
150 * @returns Pointer to the allocated page, NULL on failure.
151 */
152static page_t *rtR0MemObjSolPageAlloc(caddr_t virtAddr)
153{
154 u_offset_t offPage;
155 seg_t KernelSeg;
156
157 /*
158 * 16777215 terabytes of total memory for all VMs or
159 * restart 8000 1GB VMs 2147483 times until wraparound!
160 */
161 mutex_enter(&g_OffsetMtx);
162 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
163 g_offPage = RT_ALIGN_64(g_offPage, PAGE_SIZE) + PAGE_SIZE;
164 offPage = g_offPage;
165 mutex_exit(&g_OffsetMtx);
166
167 KernelSeg.s_as = &kas;
168 page_t *pPage = page_create_va(&g_PageVnode, offPage, PAGE_SIZE, PG_WAIT | PG_NORELOC, &KernelSeg, virtAddr);
169 if (RT_LIKELY(pPage))
170 {
171 /*
172 * Lock this page into memory "long term" to prevent this page from being paged out
173 * when we drop the page lock temporarily (during free). Downgrade to a shared lock
174 * to prevent page relocation.
175 */
176 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
177 page_io_unlock(pPage);
178 page_downgrade(pPage);
179 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
180 }
181
182 return pPage;
183}
184
185
186/**
187 * Destroys an allocated page.
188 *
189 * @param pPage Pointer to the page to be destroyed.
190 * @remarks This function expects page in @c pPage to be shared locked.
191 */
192static void rtR0MemObjSolPageDestroy(page_t *pPage)
193{
194 /*
195 * We need to exclusive lock the pages before freeing them, if upgrading the shared lock to exclusive fails,
196 * drop the page lock and look it up from the hash. Record the page offset before we drop the page lock as
197 * we cannot touch any page_t members once the lock is dropped.
198 */
199 AssertPtr(pPage);
200 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
201
202 u_offset_t offPage = pPage->p_offset;
203 int rc = page_tryupgrade(pPage);
204 if (!rc)
205 {
206 page_unlock(pPage);
207 page_t *pFoundPage = page_lookup(&g_PageVnode, offPage, SE_EXCL);
208
209 /*
210 * Since we allocated the pages as PG_NORELOC we should only get back the exact page always.
211 */
212 AssertReleaseMsg(pFoundPage == pPage, ("Page lookup failed %p:%llx returned %p, expected %p\n",
213 &g_PageVnode, offPage, pFoundPage, pPage));
214 }
215 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
216 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
217 page_destroy(pPage, 0 /* move it to the free list */);
218}
219
220
221/* Currently not used on 32-bits, define it to shut up gcc. */
222#if HC_ARCH_BITS == 64
223/**
224 * Allocates physical, non-contiguous memory of pages.
225 *
226 * @param puPhys Where to store the physical address of first page. Optional,
227 * can be NULL.
228 * @param cb The size of the allocation.
229 *
230 * @return Array of allocated pages, NULL on failure.
231 */
232static page_t **rtR0MemObjSolPagesAlloc(uint64_t *puPhys, size_t cb)
233{
234 /*
235 * VM1:
236 * The page freelist and cachelist both hold pages that are not mapped into any address space.
237 * The cachelist is not really free pages but when memory is exhausted they'll be moved to the
238 * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat.
239 *
240 * VM2:
241 * @todo Document what happens behind the scenes in VM2 regarding the free and cachelist.
242 */
243
244 /*
245 * Non-pageable memory reservation request for _4K pages, don't sleep.
246 */
247 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
248 int rc = page_resv(cPages, KM_NOSLEEP);
249 if (rc)
250 {
251 size_t cbPages = cPages * sizeof(page_t *);
252 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
253 if (RT_LIKELY(ppPages))
254 {
255 /*
256 * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately
257 * we don't yet have the 'virtAddr' to which this memory may be mapped.
258 */
259 caddr_t virtAddr = 0;
260 for (size_t i = 0; i < cPages; i++, virtAddr += PAGE_SIZE)
261 {
262 /*
263 * Get a page from the free list locked exclusively. The page will be named (hashed in)
264 * and we rely on it during free. The page we get will be shared locked to prevent the page
265 * from being relocated.
266 */
267 page_t *pPage = rtR0MemObjSolPageAlloc(virtAddr);
268 if (RT_UNLIKELY(!pPage))
269 {
270 /*
271 * No page found, release whatever pages we grabbed so far.
272 */
273 for (size_t k = 0; k < i; k++)
274 rtR0MemObjSolPageDestroy(ppPages[k]);
275 kmem_free(ppPages, cbPages);
276 page_unresv(cPages);
277 return NULL;
278 }
279
280 ppPages[i] = pPage;
281 }
282
283 if (puPhys)
284 *puPhys = rtR0MemObjSolPagePhys(ppPages[0]);
285 return ppPages;
286 }
287
288 page_unresv(cPages);
289 }
290
291 return NULL;
292}
293#endif /* HC_ARCH_BITS == 64 */
294
295
296/**
297 * Frees the allocates pages.
298 *
299 * @param ppPages Pointer to the page list.
300 * @param cbPages Size of the allocation.
301 */
302static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb)
303{
304 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
305 size_t cbPages = cPages * sizeof(page_t *);
306 for (size_t iPage = 0; iPage < cPages; iPage++)
307 rtR0MemObjSolPageDestroy(ppPages[iPage]);
308
309 kmem_free(ppPages, cbPages);
310 page_unresv(cPages);
311}
312
313
314/**
315 * Allocates one large page.
316 *
317 * @param puPhys Where to store the physical address of the allocated
318 * page. Optional, can be NULL.
319 * @param cbLargePage Size of the large page.
320 *
321 * @returns Pointer to a list of pages that cover the large page, NULL on
322 * failure.
323 */
324static page_t **rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cbLargePage)
325{
326 /*
327 * Check PG_NORELOC support for large pages. Using this helps prevent _1G page
328 * fragementation on systems that support it.
329 */
330 static bool fPageNoRelocChecked = false;
331 if (fPageNoRelocChecked == false)
332 {
333 fPageNoRelocChecked = true;
334 g_fLargePageNoReloc = false;
335 if ( g_pfnrtR0Sol_page_noreloc_supported
336 && g_pfnrtR0Sol_page_noreloc_supported(cbLargePage))
337 {
338 g_fLargePageNoReloc = true;
339 }
340 }
341
342 /*
343 * Non-pageable memory reservation request for _4K pages, don't sleep.
344 */
345 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
346 size_t cbPages = cPages * sizeof(page_t *);
347 u_offset_t offPage = 0;
348 int rc = page_resv(cPages, KM_NOSLEEP);
349 if (rc)
350 {
351 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
352 if (RT_LIKELY(ppPages))
353 {
354 mutex_enter(&g_LargePageOffsetMtx);
355 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
356 g_offLargePage = RT_ALIGN_64(g_offLargePage, cbLargePage) + cbLargePage;
357 offPage = g_offLargePage;
358 mutex_exit(&g_LargePageOffsetMtx);
359
360 seg_t KernelSeg;
361 KernelSeg.s_as = &kas;
362 page_t *pRootPage = page_create_va_large(&g_LargePageVnode, offPage, cbLargePage,
363 PG_EXCL | (g_fLargePageNoReloc ? PG_NORELOC : 0), &KernelSeg,
364 0 /* vaddr */,NULL /* locality group */);
365 if (pRootPage)
366 {
367 /*
368 * Split it into sub-pages, downgrade each page to a shared lock to prevent page relocation.
369 */
370 page_t *pPageList = pRootPage;
371 for (size_t iPage = 0; iPage < cPages; iPage++)
372 {
373 page_t *pPage = pPageList;
374 AssertPtr(pPage);
375 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
376 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
377 AssertMsg(pPage->p_szc == pRootPage->p_szc, ("Size code mismatch %p %d %d\n", pPage,
378 (int)pPage->p_szc, (int)pRootPage->p_szc));
379
380 /*
381 * Lock the page into memory "long term". This prevents callers of page_try_demote_pages() (such as the
382 * pageout scanner) from demoting the large page into smaller pages while we temporarily release the
383 * exclusive lock (during free). We pass "0, 1" since we've already accounted for availrmem during
384 * page_resv().
385 */
386 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
387
388 page_sub(&pPageList, pPage);
389 page_io_unlock(pPage);
390 page_downgrade(pPage);
391 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
392
393 ppPages[iPage] = pPage;
394 }
395 Assert(pPageList == NULL);
396 Assert(ppPages[0] == pRootPage);
397
398 uint64_t uPhys = rtR0MemObjSolPagePhys(pRootPage);
399 AssertMsg(!(uPhys & (cbLargePage - 1)), ("%llx %zx\n", uPhys, cbLargePage));
400 if (puPhys)
401 *puPhys = uPhys;
402 return ppPages;
403 }
404
405 /*
406 * Don't restore offPrev in case of failure (race condition), we have plenty of offset space.
407 * The offset must be unique (for the same vnode) or we'll encounter panics on page_create_va_large().
408 */
409 kmem_free(ppPages, cbPages);
410 }
411
412 page_unresv(cPages);
413 }
414 return NULL;
415}
416
417
418/**
419 * Frees the large page.
420 *
421 * @param ppPages Pointer to the list of small pages that cover the
422 * large page.
423 * @param cbLargePage Size of the allocation (i.e. size of the large
424 * page).
425 */
426static void rtR0MemObjSolLargePageFree(page_t **ppPages, size_t cbLargePage)
427{
428 Assert(ppPages);
429 Assert(cbLargePage > PAGE_SIZE);
430
431 bool fDemoted = false;
432 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
433 size_t cbPages = cPages * sizeof(page_t *);
434 page_t *pPageList = ppPages[0];
435
436 for (size_t iPage = 0; iPage < cPages; iPage++)
437 {
438 /*
439 * We need the pages exclusively locked, try upgrading the shared lock.
440 * If it fails, drop the shared page lock (cannot access any page_t members once this is done)
441 * and lookup the page from the page hash locking it exclusively.
442 */
443 page_t *pPage = ppPages[iPage];
444 u_offset_t offPage = pPage->p_offset;
445 int rc = page_tryupgrade(pPage);
446 if (!rc)
447 {
448 page_unlock(pPage);
449 page_t *pFoundPage = page_lookup(&g_LargePageVnode, offPage, SE_EXCL);
450 AssertRelease(pFoundPage);
451
452 if (g_fLargePageNoReloc)
453 {
454 /*
455 * This can only be guaranteed if PG_NORELOC is used while allocating the pages.
456 */
457 AssertReleaseMsg(pFoundPage == pPage,
458 ("lookup failed %p:%llu returned %p, expected %p\n", &g_LargePageVnode, offPage,
459 pFoundPage, pPage));
460 }
461
462 /*
463 * Check for page demotion (regardless of relocation). Some places in Solaris (e.g. VM1 page_retire())
464 * could possibly demote the large page to _4K pages between our call to page_unlock() and page_lookup().
465 */
466 if (page_get_pagecnt(pFoundPage->p_szc) == 1) /* Base size of only _4K associated with this page. */
467 fDemoted = true;
468 pPage = pFoundPage;
469 ppPages[iPage] = pFoundPage;
470 }
471 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
472 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
473 }
474
475 if (fDemoted)
476 {
477 for (size_t iPage = 0; iPage < cPages; iPage++)
478 {
479 Assert(page_get_pagecnt(ppPages[iPage]->p_szc) == 1);
480 page_destroy(ppPages[iPage], 0 /* move it to the free list */);
481 }
482 }
483 else
484 {
485 /*
486 * Although we shred the adjacent pages in the linked list, page_destroy_pages works on
487 * adjacent pages via array increments. So this does indeed free all the pages.
488 */
489 AssertPtr(pPageList);
490 page_destroy_pages(pPageList);
491 }
492 kmem_free(ppPages, cbPages);
493 page_unresv(cPages);
494}
495
496
497/**
498 * Unmaps kernel/user-space mapped memory.
499 *
500 * @param pv Pointer to the mapped memory block.
501 * @param cb Size of the memory block.
502 */
503static void rtR0MemObjSolUnmap(void *pv, size_t cb)
504{
505 if (SOL_IS_KRNL_ADDR(pv))
506 {
507 hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
508 vmem_free(heap_arena, pv, cb);
509 }
510 else
511 {
512 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
513 AssertPtr(pAddrSpace);
514 as_rangelock(pAddrSpace);
515 as_unmap(pAddrSpace, pv, cb);
516 as_rangeunlock(pAddrSpace);
517 }
518}
519
520
521/**
522 * Lock down memory mappings for a virtual address.
523 *
524 * @param pv Pointer to the memory to lock down.
525 * @param cb Size of the memory block.
526 * @param fAccess Page access rights (S_READ, S_WRITE, S_EXEC)
527 *
528 * @returns IPRT status code.
529 */
530static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess)
531{
532 /*
533 * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory.
534 */
535 if (!SOL_IS_KRNL_ADDR(pv))
536 {
537 proc_t *pProc = (proc_t *)RTR0ProcHandleSelf();
538 AssertPtr(pProc);
539 faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess);
540 if (rc)
541 {
542 LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc));
543 return VERR_LOCK_FAILED;
544 }
545 }
546 return VINF_SUCCESS;
547}
548
549
550/**
551 * Unlock memory mappings for a virtual address.
552 *
553 * @param pv Pointer to the locked memory.
554 * @param cb Size of the memory block.
555 * @param fPageAccess Page access rights (S_READ, S_WRITE, S_EXEC).
556 */
557static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess)
558{
559 if (!SOL_IS_KRNL_ADDR(pv))
560 {
561 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
562 AssertPtr(pProcess);
563 as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess);
564 }
565}
566
567
568/**
569 * Maps a list of physical pages into user address space.
570 *
571 * @param pVirtAddr Where to store the virtual address of the mapping.
572 * @param fPageAccess Page access rights (PROT_READ, PROT_WRITE,
573 * PROT_EXEC)
574 * @param paPhysAddrs Array of physical addresses to pages.
575 * @param cb Size of memory being mapped.
576 *
577 * @returns IPRT status code.
578 */
579static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb, size_t cbPageSize)
580{
581 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
582 int rc;
583 SEGVBOX_CRARGS Args;
584
585 Args.paPhysAddrs = paPhysAddrs;
586 Args.fPageAccess = fPageAccess;
587 Args.cbPageSize = cbPageSize;
588
589 as_rangelock(pAddrSpace);
590 if (g_frtSolOldMapAddr)
591 g_rtSolMapAddr.u.pfnSol_map_addr_old(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED);
592 else
593 g_rtSolMapAddr.u.pfnSol_map_addr(pVirtAddr, cb, 0 /* offset */, MAP_SHARED);
594 if (*pVirtAddr != NULL)
595 rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args);
596 else
597 rc = ENOMEM;
598 as_rangeunlock(pAddrSpace);
599
600 return RTErrConvertFromErrno(rc);
601}
602
603
604DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
605{
606 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
607
608 switch (pMemSolaris->Core.enmType)
609 {
610 case RTR0MEMOBJTYPE_LOW:
611 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
612 break;
613
614 case RTR0MEMOBJTYPE_PHYS:
615 if (pMemSolaris->Core.u.Phys.fAllocated)
616 {
617 if (pMemSolaris->fLargePage)
618 rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
619 else
620 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
621 }
622 break;
623
624 case RTR0MEMOBJTYPE_PHYS_NC:
625 if (pMemSolaris->fIndivPages)
626 rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
627 else
628 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
629 break;
630
631 case RTR0MEMOBJTYPE_PAGE:
632 ddi_umem_free(pMemSolaris->Cookie);
633 break;
634
635 case RTR0MEMOBJTYPE_LOCK:
636 rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess);
637 break;
638
639 case RTR0MEMOBJTYPE_MAPPING:
640 rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
641 break;
642
643 case RTR0MEMOBJTYPE_RES_VIRT:
644 {
645 if (pMemSolaris->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
646 vmem_xfree(heap_arena, pMemSolaris->Core.pv, pMemSolaris->Core.cb);
647 else
648 AssertFailed();
649 break;
650 }
651
652 case RTR0MEMOBJTYPE_CONT: /* we don't use this type here. */
653 default:
654 AssertMsgFailed(("enmType=%d\n", pMemSolaris->Core.enmType));
655 return VERR_INTERNAL_ERROR;
656 }
657
658 return VINF_SUCCESS;
659}
660
661
662DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
663{
664 /* Create the object. */
665 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb, pszTag);
666 if (pMemSolaris)
667 {
668 void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
669 if (pvMem)
670 {
671 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
672 pMemSolaris->Core.pv = pvMem;
673 pMemSolaris->pvHandle = NULL;
674 *ppMem = &pMemSolaris->Core;
675 return VINF_SUCCESS;
676 }
677 rtR0MemObjDelete(&pMemSolaris->Core);
678 return VERR_NO_PAGE_MEMORY;
679 }
680 return VERR_NO_MEMORY;
681}
682
683
684DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
685 const char *pszTag)
686{
687 return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
688}
689
690
691DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
692{
693 NOREF(fExecutable);
694
695 /* Create the object */
696 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb, pszTag);
697 if (pMemSolaris)
698 {
699 /* Allocate physically low page-aligned memory. */
700 uint64_t uPhysHi = _4G - 1;
701 void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGE_SIZE, false /* fContig */);
702 if (pvMem)
703 {
704 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
705 pMemSolaris->Core.pv = pvMem;
706 pMemSolaris->pvHandle = NULL;
707 *ppMem = &pMemSolaris->Core;
708 return VINF_SUCCESS;
709 }
710 rtR0MemObjDelete(&pMemSolaris->Core);
711 return VERR_NO_LOW_MEMORY;
712 }
713 return VERR_NO_MEMORY;
714}
715
716
717DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
718{
719 NOREF(fExecutable);
720 return rtR0MemObjNativeAllocPhys(ppMem, cb, _4G - 1, PAGE_SIZE /* alignment */, pszTag);
721}
722
723
724DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
725{
726#if HC_ARCH_BITS == 64
727 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb, pszTag);
728 if (pMemSolaris)
729 {
730 if (PhysHighest == NIL_RTHCPHYS)
731 {
732 uint64_t PhysAddr = UINT64_MAX;
733 void *pvPages = rtR0MemObjSolPagesAlloc(&PhysAddr, cb);
734 if (!pvPages)
735 {
736 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb));
737 rtR0MemObjDelete(&pMemSolaris->Core);
738 return VERR_NO_MEMORY;
739 }
740 Assert(PhysAddr != UINT64_MAX);
741 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
742
743 pMemSolaris->Core.pv = NULL;
744 pMemSolaris->pvHandle = pvPages;
745 pMemSolaris->fIndivPages = true;
746 }
747 else
748 {
749 /*
750 * If we must satisfy an upper limit constraint, it isn't feasible to grab individual pages.
751 * We fall back to using contig_alloc().
752 */
753 uint64_t PhysAddr = UINT64_MAX;
754 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, PAGE_SIZE, false /* fContig */);
755 if (!pvMem)
756 {
757 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0SolMemAlloc failed for cb=%u PhysHighest=%RHp.\n", cb, PhysHighest));
758 rtR0MemObjDelete(&pMemSolaris->Core);
759 return VERR_NO_MEMORY;
760 }
761 Assert(PhysAddr != UINT64_MAX);
762 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
763
764 pMemSolaris->Core.pv = pvMem;
765 pMemSolaris->pvHandle = NULL;
766 pMemSolaris->fIndivPages = false;
767 }
768 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
769 *ppMem = &pMemSolaris->Core;
770 return VINF_SUCCESS;
771 }
772 return VERR_NO_MEMORY;
773
774#else /* 32 bit: */
775 return VERR_NOT_SUPPORTED; /* see the RTR0MemObjAllocPhysNC specs */
776#endif
777}
778
779
780DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment,
781 const char *pszTag)
782{
783 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
784
785 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
786 if (RT_UNLIKELY(!pMemSolaris))
787 return VERR_NO_MEMORY;
788
789 /*
790 * Allocating one large page gets special treatment.
791 */
792 static uint32_t s_cbLargePage = UINT32_MAX;
793 if (s_cbLargePage == UINT32_MAX)
794 {
795 if (page_num_pagesizes() > 1)
796 ASMAtomicWriteU32(&s_cbLargePage, page_get_pagesize(1)); /* Page-size code 1 maps to _2M on Solaris x86/amd64. */
797 else
798 ASMAtomicWriteU32(&s_cbLargePage, 0);
799 }
800
801 uint64_t PhysAddr;
802 if ( cb == s_cbLargePage
803 && cb == uAlignment
804 && PhysHighest == NIL_RTHCPHYS)
805 {
806 /*
807 * Allocate one large page (backed by physically contiguous memory).
808 */
809 void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb);
810 if (RT_LIKELY(pvPages))
811 {
812 AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr));
813 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /*?*/
814 pMemSolaris->Core.pv = NULL;
815 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
816 pMemSolaris->Core.u.Phys.fAllocated = true;
817 pMemSolaris->pvHandle = pvPages;
818 pMemSolaris->fLargePage = true;
819
820 *ppMem = &pMemSolaris->Core;
821 return VINF_SUCCESS;
822 }
823 }
824 else
825 {
826 /*
827 * Allocate physically contiguous memory aligned as specified.
828 */
829 AssertCompile(NIL_RTHCPHYS == UINT64_MAX); NOREF(RTASSERTVAR);
830 PhysAddr = PhysHighest;
831 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */);
832 if (RT_LIKELY(pvMem))
833 {
834 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
835 Assert(PhysAddr < PhysHighest);
836 Assert(PhysAddr + cb <= PhysHighest);
837
838 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
839 pMemSolaris->Core.pv = pvMem;
840 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
841 pMemSolaris->Core.u.Phys.fAllocated = true;
842 pMemSolaris->pvHandle = NULL;
843 pMemSolaris->fLargePage = false;
844
845 *ppMem = &pMemSolaris->Core;
846 return VINF_SUCCESS;
847 }
848 }
849 rtR0MemObjDelete(&pMemSolaris->Core);
850 return VERR_NO_CONT_MEMORY;
851}
852
853
854DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy,
855 const char *pszTag)
856{
857 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
858
859 /* Create the object. */
860 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
861 if (!pMemSolaris)
862 return VERR_NO_MEMORY;
863
864 /* There is no allocation here, it needs to be mapped somewhere first. */
865 pMemSolaris->Core.u.Phys.fAllocated = false;
866 pMemSolaris->Core.u.Phys.PhysBase = Phys;
867 pMemSolaris->Core.u.Phys.uCachePolicy = uCachePolicy;
868 *ppMem = &pMemSolaris->Core;
869 return VINF_SUCCESS;
870}
871
872
873DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
874 RTR0PROCESS R0Process, const char *pszTag)
875{
876 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_INVALID_PARAMETER);
877 NOREF(fAccess);
878
879 /* Create the locking object */
880 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK,
881 (void *)R3Ptr, cb, pszTag);
882 if (!pMemSolaris)
883 return VERR_NO_MEMORY;
884
885 /* Lock down user pages. */
886 int fPageAccess = S_READ;
887 if (fAccess & RTMEM_PROT_WRITE)
888 fPageAccess = S_WRITE;
889 if (fAccess & RTMEM_PROT_EXEC)
890 fPageAccess = S_EXEC;
891 int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess);
892 if (RT_FAILURE(rc))
893 {
894 LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc));
895 rtR0MemObjDelete(&pMemSolaris->Core);
896 return rc;
897 }
898
899 /* Fill in the object attributes and return successfully. */
900 pMemSolaris->Core.u.Lock.R0Process = R0Process;
901 pMemSolaris->pvHandle = NULL;
902 pMemSolaris->fAccess = fPageAccess;
903 *ppMem = &pMemSolaris->Core;
904 return VINF_SUCCESS;
905}
906
907
908DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
909{
910 NOREF(fAccess);
911
912 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag);
913 if (!pMemSolaris)
914 return VERR_NO_MEMORY;
915
916 /* Lock down kernel pages. */
917 int fPageAccess = S_READ;
918 if (fAccess & RTMEM_PROT_WRITE)
919 fPageAccess = S_WRITE;
920 if (fAccess & RTMEM_PROT_EXEC)
921 fPageAccess = S_EXEC;
922 int rc = rtR0MemObjSolLock(pv, cb, fPageAccess);
923 if (RT_FAILURE(rc))
924 {
925 LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc));
926 rtR0MemObjDelete(&pMemSolaris->Core);
927 return rc;
928 }
929
930 /* Fill in the object attributes and return successfully. */
931 pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
932 pMemSolaris->pvHandle = NULL;
933 pMemSolaris->fAccess = fPageAccess;
934 *ppMem = &pMemSolaris->Core;
935 return VINF_SUCCESS;
936}
937
938
939DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
940 const char *pszTag)
941{
942 PRTR0MEMOBJSOL pMemSolaris;
943
944 /*
945 * Use xalloc.
946 */
947 void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */,
948 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
949 if (RT_UNLIKELY(!pv))
950 return VERR_NO_MEMORY;
951
952 /* Create the object. */
953 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb, pszTag);
954 if (!pMemSolaris)
955 {
956 LogRel(("rtR0MemObjNativeReserveKernel failed to alloc memory object.\n"));
957 vmem_xfree(heap_arena, pv, cb);
958 return VERR_NO_MEMORY;
959 }
960
961 pMemSolaris->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
962 *ppMem = &pMemSolaris->Core;
963 return VINF_SUCCESS;
964}
965
966
967DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
968 RTR0PROCESS R0Process, const char *pszTag)
969{
970 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process, pszTag);
971 return VERR_NOT_SUPPORTED;
972}
973
974
975DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
976 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
977{
978 /* Fail if requested to do something we can't. */
979 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
980 if (uAlignment > PAGE_SIZE)
981 return VERR_NOT_SUPPORTED;
982
983 /*
984 * Use xalloc to get address space.
985 */
986 if (!cbSub)
987 cbSub = pMemToMap->cb;
988 void *pv = vmem_xalloc(heap_arena, cbSub, uAlignment, 0 /* phase */, 0 /* nocross */,
989 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
990 if (RT_UNLIKELY(!pv))
991 return VERR_MAP_FAILED;
992
993 /*
994 * Load the pages from the other object into it.
995 */
996 uint32_t fAttr = HAT_UNORDERED_OK | HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
997 if (fProt & RTMEM_PROT_READ)
998 fAttr |= PROT_READ;
999 if (fProt & RTMEM_PROT_EXEC)
1000 fAttr |= PROT_EXEC;
1001 if (fProt & RTMEM_PROT_WRITE)
1002 fAttr |= PROT_WRITE;
1003 fAttr |= HAT_NOSYNC;
1004
1005 int rc = VINF_SUCCESS;
1006 size_t off = 0;
1007 while (off < cbSub)
1008 {
1009 RTHCPHYS HCPhys = RTR0MemObjGetPagePhysAddr(pMemToMap, (offSub + off) >> PAGE_SHIFT);
1010 AssertBreakStmt(HCPhys != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_2);
1011 pfn_t pfn = HCPhys >> PAGE_SHIFT;
1012 AssertBreakStmt(((RTHCPHYS)pfn << PAGE_SHIFT) == HCPhys, rc = VERR_INTERNAL_ERROR_3);
1013
1014 hat_devload(kas.a_hat, (uint8_t *)pv + off, PAGE_SIZE, pfn, fAttr, HAT_LOAD_LOCK);
1015
1016 /* Advance. */
1017 off += PAGE_SIZE;
1018 }
1019 if (RT_SUCCESS(rc))
1020 {
1021 /*
1022 * Create a memory object for the mapping.
1023 */
1024 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING,
1025 pv, cbSub, pszTag);
1026 if (pMemSolaris)
1027 {
1028 pMemSolaris->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1029 *ppMem = &pMemSolaris->Core;
1030 return VINF_SUCCESS;
1031 }
1032
1033 LogRel(("rtR0MemObjNativeMapKernel failed to alloc memory object.\n"));
1034 rc = VERR_NO_MEMORY;
1035 }
1036
1037 if (off)
1038 hat_unload(kas.a_hat, pv, off, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
1039 vmem_xfree(heap_arena, pv, cbSub);
1040 return rc;
1041}
1042
1043
1044DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, PRTR0MEMOBJINTERNAL pMemToMap, RTR3PTR R3PtrFixed,
1045 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub,
1046 const char *pszTag)
1047{
1048 /*
1049 * Fend off things we cannot do.
1050 */
1051 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
1052 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
1053 if (uAlignment != PAGE_SIZE)
1054 return VERR_NOT_SUPPORTED;
1055
1056 /*
1057 * Get parameters from the source object and offSub/cbSub.
1058 */
1059 PRTR0MEMOBJSOL pMemToMapSolaris = (PRTR0MEMOBJSOL)pMemToMap;
1060 uint8_t *pb = pMemToMapSolaris->Core.pv ? (uint8_t *)pMemToMapSolaris->Core.pv + offSub : NULL;
1061 size_t const cb = cbSub ? cbSub : pMemToMapSolaris->Core.cb;
1062 size_t const cPages = cb >> PAGE_SHIFT;
1063 Assert(!offSub || cbSub);
1064 Assert(!(cb & PAGE_OFFSET_MASK));
1065
1066 /*
1067 * Create the mapping object
1068 */
1069 PRTR0MEMOBJSOL pMemSolaris;
1070 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pb, cb, pszTag);
1071 if (RT_UNLIKELY(!pMemSolaris))
1072 return VERR_NO_MEMORY;
1073
1074 /*
1075 * Gather the physical page address of the pages to be mapped.
1076 */
1077 int rc = VINF_SUCCESS;
1078 uint64_t *paPhysAddrs = kmem_zalloc(sizeof(uint64_t) * cPages, KM_SLEEP);
1079 if (RT_LIKELY(paPhysAddrs))
1080 {
1081 if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC
1082 && pMemToMapSolaris->fIndivPages)
1083 {
1084 /* Translate individual page_t to physical addresses. */
1085 page_t **papPages = pMemToMapSolaris->pvHandle;
1086 AssertPtr(papPages);
1087 papPages += offSub >> PAGE_SHIFT;
1088 for (size_t iPage = 0; iPage < cPages; iPage++)
1089 paPhysAddrs[iPage] = rtR0MemObjSolPagePhys(papPages[iPage]);
1090 }
1091 else if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS
1092 && pMemToMapSolaris->fLargePage)
1093 {
1094 /* Split up the large page into page-sized chunks. */
1095 RTHCPHYS Phys = pMemToMapSolaris->Core.u.Phys.PhysBase;
1096 Phys += offSub;
1097 for (size_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE)
1098 paPhysAddrs[iPage] = Phys;
1099 }
1100 else
1101 {
1102 /* Have kernel mapping, just translate virtual to physical. */
1103 AssertPtr(pb);
1104 for (size_t iPage = 0; iPage < cPages; iPage++)
1105 {
1106 paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pb);
1107 if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1))
1108 {
1109 LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));
1110 rc = VERR_MAP_FAILED;
1111 break;
1112 }
1113 pb += PAGE_SIZE;
1114 }
1115 }
1116 if (RT_SUCCESS(rc))
1117 {
1118 /*
1119 * Perform the actual mapping.
1120 */
1121 unsigned fPageAccess = PROT_READ;
1122 if (fProt & RTMEM_PROT_WRITE)
1123 fPageAccess |= PROT_WRITE;
1124 if (fProt & RTMEM_PROT_EXEC)
1125 fPageAccess |= PROT_EXEC;
1126
1127 caddr_t UserAddr = NULL;
1128 rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb, PAGE_SIZE);
1129 if (RT_SUCCESS(rc))
1130 {
1131 pMemSolaris->Core.u.Mapping.R0Process = R0Process;
1132 pMemSolaris->Core.pv = UserAddr;
1133
1134 *ppMem = &pMemSolaris->Core;
1135 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1136 return VINF_SUCCESS;
1137 }
1138
1139 LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc));
1140 }
1141
1142 rc = VERR_MAP_FAILED;
1143 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1144 }
1145 else
1146 rc = VERR_NO_MEMORY;
1147 rtR0MemObjDelete(&pMemSolaris->Core);
1148 return rc;
1149}
1150
1151
1152DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1153{
1154 NOREF(pMem);
1155 NOREF(offSub);
1156 NOREF(cbSub);
1157 NOREF(fProt);
1158 return VERR_NOT_SUPPORTED;
1159}
1160
1161
1162DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1163{
1164 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
1165
1166 switch (pMemSolaris->Core.enmType)
1167 {
1168 case RTR0MEMOBJTYPE_PHYS_NC:
1169 if ( pMemSolaris->Core.u.Phys.fAllocated
1170 || !pMemSolaris->fIndivPages)
1171 {
1172 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1173 return rtR0MemObjSolVirtToPhys(pb);
1174 }
1175 page_t **ppPages = pMemSolaris->pvHandle;
1176 return rtR0MemObjSolPagePhys(ppPages[iPage]);
1177
1178 case RTR0MEMOBJTYPE_PAGE:
1179 case RTR0MEMOBJTYPE_LOW:
1180 case RTR0MEMOBJTYPE_LOCK:
1181 {
1182 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1183 return rtR0MemObjSolVirtToPhys(pb);
1184 }
1185
1186 /*
1187 * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case,
1188 * request it from the parent so that we have a clear distinction between CONT/PHYS_NC.
1189 */
1190 case RTR0MEMOBJTYPE_MAPPING:
1191 return rtR0MemObjNativeGetPagePhysAddr(pMemSolaris->Core.uRel.Child.pParent, iPage);
1192
1193 case RTR0MEMOBJTYPE_CONT:
1194 case RTR0MEMOBJTYPE_PHYS:
1195 AssertFailed(); /* handled by the caller */
1196 case RTR0MEMOBJTYPE_RES_VIRT:
1197 default:
1198 return NIL_RTHCPHYS;
1199 }
1200}
1201
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette