VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0Pool.cpp@ 95248

Last change on this file since 95248 was 93650, checked in by vboxsync, 3 years ago

VMM/PGM,*: Split the physical access handler type registration into separate ring-0 and ring-3 steps, expanding the type to 64-bit. bugref:10094

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 8.3 KB
Line 
1/* $Id: PGMR0Pool.cpp 93650 2022-02-08 10:43:53Z vboxsync $ */
2/** @file
3 * PGM Shadow Page Pool, ring-0 specific bits.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_POOL
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/hm.h>
26#include "PGMInternal.h"
27#include <VBox/vmm/vmcc.h>
28#include "PGMInline.h"
29
30#include <VBox/log.h>
31#include <VBox/err.h>
32#include <iprt/mem.h>
33#include <iprt/memobj.h>
34
35
36/**
37 * Called by PGMR0InitVM to complete the page pool setup for ring-0.
38 *
39 * @returns VBox status code.
40 * @param pGVM Pointer to the global VM structure.
41 */
42int pgmR0PoolInitVM(PGVM pGVM)
43{
44 PPGMPOOL pPool = pGVM->pgm.s.pPoolR0;
45 AssertPtrReturn(pPool, VERR_PGM_POOL_IPE);
46
47 int rc = PGMR0HandlerPhysicalTypeSetUpContext(pGVM, PGMPHYSHANDLERKIND_WRITE, PGMPHYSHANDLER_F_KEEP_PGM_LOCK,
48 pgmPoolAccessHandler, pgmRZPoolAccessPfHandler,
49 "Guest Paging Access Handler", pPool->hAccessHandlerType);
50 AssertLogRelRCReturn(rc, rc);
51
52 return VINF_SUCCESS;
53}
54
55
56/**
57 * Worker for PGMR0PoolGrow.
58 */
59static int pgmR0PoolGrowInner(PGVM pGVM, PPGMPOOL pPool)
60{
61 int rc;
62
63 /* With 32-bit guests and no EPT, the CR3 limits the root pages to low
64 (below 4 GB) memory. */
65 /** @todo change the pool to handle ROOT page allocations specially when
66 * required. */
67 bool const fCanUseHighMemory = HMIsNestedPagingActive(pGVM);
68
69 /*
70 * Figure out how many pages should allocate.
71 */
72 uint32_t const cMaxPages = RT_MIN(pPool->cMaxPages, PGMPOOL_IDX_LAST);
73 uint32_t const cCurPages = RT_MIN(pPool->cCurPages, cMaxPages);
74 if (cCurPages < cMaxPages)
75 {
76 uint32_t cNewPages = cMaxPages - cCurPages;
77 if (cNewPages > PGMPOOL_CFG_MAX_GROW)
78 cNewPages = PGMPOOL_CFG_MAX_GROW;
79 LogFlow(("PGMR0PoolGrow: Growing the pool by %u (%#x) pages to %u (%#x) pages. fCanUseHighMemory=%RTbool\n",
80 cNewPages, cNewPages, cCurPages + cNewPages, cCurPages + cNewPages, fCanUseHighMemory));
81
82 /* Check that the handles in the arrays entry are both NIL. */
83 uintptr_t const idxMemHandle = cCurPages / (PGMPOOL_CFG_MAX_GROW);
84 AssertCompile( (PGMPOOL_IDX_LAST + (PGMPOOL_CFG_MAX_GROW - 1)) / PGMPOOL_CFG_MAX_GROW
85 <= RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs));
86 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs) == RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMapObjs));
87 AssertLogRelMsgReturn( pGVM->pgmr0.s.ahPoolMemObjs[idxMemHandle] == NIL_RTR0MEMOBJ
88 && pGVM->pgmr0.s.ahPoolMapObjs[idxMemHandle] == NIL_RTR0MEMOBJ, ("idxMemHandle=%#x\n", idxMemHandle),
89 VERR_PGM_POOL_IPE);
90
91 /*
92 * Allocate the new pages and map them into ring-3.
93 */
94 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
95 if (fCanUseHighMemory)
96 rc = RTR0MemObjAllocPage(&hMemObj, cNewPages * HOST_PAGE_SIZE, false /*fExecutable*/);
97 else
98 rc = RTR0MemObjAllocLow(&hMemObj, cNewPages * HOST_PAGE_SIZE, false /*fExecutable*/);
99 if (RT_SUCCESS(rc))
100 {
101 RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ;
102 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
103 if (RT_SUCCESS(rc))
104 {
105 pGVM->pgmr0.s.ahPoolMemObjs[idxMemHandle] = hMemObj;
106 pGVM->pgmr0.s.ahPoolMapObjs[idxMemHandle] = hMapObj;
107
108 uint8_t *pbRing0 = (uint8_t *)RTR0MemObjAddress(hMemObj);
109 RTR3PTR pbRing3 = RTR0MemObjAddressR3(hMapObj);
110 AssertPtr(pbRing0);
111 Assert(((uintptr_t)pbRing0 & HOST_PAGE_OFFSET_MASK) == 0);
112 Assert(pbRing3 != NIL_RTR3PTR);
113 Assert((pbRing3 & HOST_PAGE_OFFSET_MASK) == 0);
114
115 /*
116 * Initialize the new pages.
117 */
118 for (unsigned iNewPage = 0; iNewPage < cNewPages; iNewPage++)
119 {
120 PPGMPOOLPAGE pPage = &pPool->aPages[cCurPages + iNewPage];
121 pPage->pvPageR0 = &pbRing0[iNewPage * HOST_PAGE_SIZE];
122 pPage->pvPageR3 = pbRing3 + iNewPage * HOST_PAGE_SIZE;
123 pPage->Core.Key = RTR0MemObjGetPagePhysAddr(hMemObj, iNewPage);
124 AssertFatal(pPage->Core.Key < _4G || fCanUseHighMemory);
125 pPage->GCPhys = NIL_RTGCPHYS;
126 pPage->enmKind = PGMPOOLKIND_FREE;
127 pPage->idx = pPage - &pPool->aPages[0];
128 LogFlow(("PGMR0PoolGrow: insert page #%#x - %RHp\n", pPage->idx, pPage->Core.Key));
129 pPage->iNext = pPool->iFreeHead;
130 pPage->iUserHead = NIL_PGMPOOL_USER_INDEX;
131 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
132 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
133 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
134 pPage->iMonitoredPrev = NIL_PGMPOOL_IDX;
135 pPage->iAgeNext = NIL_PGMPOOL_IDX;
136 pPage->iAgePrev = NIL_PGMPOOL_IDX;
137 /* commit it */
138 bool fRc = RTAvloHCPhysInsert(&pPool->HCPhysTree, &pPage->Core); Assert(fRc); NOREF(fRc);
139 pPool->iFreeHead = cCurPages + iNewPage;
140 pPool->cCurPages = cCurPages + iNewPage + 1;
141 }
142
143 return VINF_SUCCESS;
144 }
145
146 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
147 }
148 if (cCurPages > 64)
149 LogRelMax(5, ("PGMR0PoolGrow: rc=%Rrc cNewPages=%#x cCurPages=%#x cMaxPages=%#x fCanUseHighMemory=%d\n",
150 rc, cNewPages, cCurPages, cMaxPages, fCanUseHighMemory));
151 else
152 LogRel(("PGMR0PoolGrow: rc=%Rrc cNewPages=%#x cCurPages=%#x cMaxPages=%#x fCanUseHighMemory=%d\n",
153 rc, cNewPages, cCurPages, cMaxPages, fCanUseHighMemory));
154 }
155 else
156 rc = VINF_SUCCESS;
157 return rc;
158}
159
160
161/**
162 * Grows the shadow page pool.
163 *
164 * I.e. adds more pages to it, assuming that hasn't reached cMaxPages yet.
165 *
166 * @returns VBox status code.
167 * @param pGVM The ring-0 VM structure.
168 * @param idCpu The ID of the calling EMT.
169 * @thread EMT(idCpu)
170 */
171VMMR0_INT_DECL(int) PGMR0PoolGrow(PGVM pGVM, VMCPUID idCpu)
172{
173 /*
174 * Validate input.
175 */
176 PPGMPOOL pPool = pGVM->pgm.s.pPoolR0;
177 AssertReturn(pPool->cCurPages < pPool->cMaxPages, VERR_PGM_POOL_MAXED_OUT_ALREADY);
178 AssertReturn(pPool->pVMR3 == pGVM->pVMR3, VERR_PGM_POOL_IPE);
179 AssertReturn(pPool->pVMR0 == pGVM, VERR_PGM_POOL_IPE);
180
181 AssertReturn(idCpu < pGVM->cCpus, VERR_VM_THREAD_NOT_EMT);
182 PGVMCPU const pGVCpu = &pGVM->aCpus[idCpu];
183
184 /*
185 * Enter the grow critical section and call worker.
186 */
187 STAM_REL_PROFILE_START(&pPool->StatGrow, a);
188
189 VMMR0EMTBLOCKCTX Ctx;
190 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, &pGVM->pgmr0.s.PoolGrowCritSect, &Ctx);
191 AssertRCReturn(rc, rc);
192
193 rc = RTCritSectEnter(&pGVM->pgmr0.s.PoolGrowCritSect);
194 AssertRCReturn(rc, rc);
195
196 rc = pgmR0PoolGrowInner(pGVM, pPool);
197
198 STAM_REL_PROFILE_STOP(&pPool->StatGrow, a);
199 RTCritSectLeave(&pGVM->pgmr0.s.PoolGrowCritSect);
200
201 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
202 return rc;
203}
204
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette