VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0Pool.cpp@ 93632

Last change on this file since 93632 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 7.7 KB
Line 
1/* $Id: PGMR0Pool.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * PGM Shadow Page Pool, ring-0 specific bits.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_POOL
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/hm.h>
26#include "PGMInternal.h"
27#include <VBox/vmm/vmcc.h>
28#include "PGMInline.h"
29
30#include <VBox/log.h>
31#include <VBox/err.h>
32#include <iprt/mem.h>
33#include <iprt/memobj.h>
34
35
36/**
37 * Worker for PGMR0PoolGrow.
38 */
39static int pgmR0PoolGrowInner(PGVM pGVM, PPGMPOOL pPool)
40{
41 int rc;
42
43 /* With 32-bit guests and no EPT, the CR3 limits the root pages to low
44 (below 4 GB) memory. */
45 /** @todo change the pool to handle ROOT page allocations specially when
46 * required. */
47 bool const fCanUseHighMemory = HMIsNestedPagingActive(pGVM);
48
49 /*
50 * Figure out how many pages should allocate.
51 */
52 uint32_t const cMaxPages = RT_MIN(pPool->cMaxPages, PGMPOOL_IDX_LAST);
53 uint32_t const cCurPages = RT_MIN(pPool->cCurPages, cMaxPages);
54 if (cCurPages < cMaxPages)
55 {
56 uint32_t cNewPages = cMaxPages - cCurPages;
57 if (cNewPages > PGMPOOL_CFG_MAX_GROW)
58 cNewPages = PGMPOOL_CFG_MAX_GROW;
59 LogFlow(("PGMR0PoolGrow: Growing the pool by %u (%#x) pages to %u (%#x) pages. fCanUseHighMemory=%RTbool\n",
60 cNewPages, cNewPages, cCurPages + cNewPages, cCurPages + cNewPages, fCanUseHighMemory));
61
62 /* Check that the handles in the arrays entry are both NIL. */
63 uintptr_t const idxMemHandle = cCurPages / (PGMPOOL_CFG_MAX_GROW);
64 AssertCompile( (PGMPOOL_IDX_LAST + (PGMPOOL_CFG_MAX_GROW - 1)) / PGMPOOL_CFG_MAX_GROW
65 <= RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs));
66 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs) == RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMapObjs));
67 AssertLogRelMsgReturn( pGVM->pgmr0.s.ahPoolMemObjs[idxMemHandle] == NIL_RTR0MEMOBJ
68 && pGVM->pgmr0.s.ahPoolMapObjs[idxMemHandle] == NIL_RTR0MEMOBJ, ("idxMemHandle=%#x\n", idxMemHandle),
69 VERR_PGM_POOL_IPE);
70
71 /*
72 * Allocate the new pages and map them into ring-3.
73 */
74 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
75 if (fCanUseHighMemory)
76 rc = RTR0MemObjAllocPage(&hMemObj, cNewPages * HOST_PAGE_SIZE, false /*fExecutable*/);
77 else
78 rc = RTR0MemObjAllocLow(&hMemObj, cNewPages * HOST_PAGE_SIZE, false /*fExecutable*/);
79 if (RT_SUCCESS(rc))
80 {
81 RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ;
82 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
83 if (RT_SUCCESS(rc))
84 {
85 pGVM->pgmr0.s.ahPoolMemObjs[idxMemHandle] = hMemObj;
86 pGVM->pgmr0.s.ahPoolMapObjs[idxMemHandle] = hMapObj;
87
88 uint8_t *pbRing0 = (uint8_t *)RTR0MemObjAddress(hMemObj);
89 RTR3PTR pbRing3 = RTR0MemObjAddressR3(hMapObj);
90 AssertPtr(pbRing0);
91 Assert(((uintptr_t)pbRing0 & HOST_PAGE_OFFSET_MASK) == 0);
92 Assert(pbRing3 != NIL_RTR3PTR);
93 Assert((pbRing3 & HOST_PAGE_OFFSET_MASK) == 0);
94
95 /*
96 * Initialize the new pages.
97 */
98 for (unsigned iNewPage = 0; iNewPage < cNewPages; iNewPage++)
99 {
100 PPGMPOOLPAGE pPage = &pPool->aPages[cCurPages + iNewPage];
101 pPage->pvPageR0 = &pbRing0[iNewPage * HOST_PAGE_SIZE];
102 pPage->pvPageR3 = pbRing3 + iNewPage * HOST_PAGE_SIZE;
103 pPage->Core.Key = RTR0MemObjGetPagePhysAddr(hMemObj, iNewPage);
104 AssertFatal(pPage->Core.Key < _4G || fCanUseHighMemory);
105 pPage->GCPhys = NIL_RTGCPHYS;
106 pPage->enmKind = PGMPOOLKIND_FREE;
107 pPage->idx = pPage - &pPool->aPages[0];
108 LogFlow(("PGMR0PoolGrow: insert page #%#x - %RHp\n", pPage->idx, pPage->Core.Key));
109 pPage->iNext = pPool->iFreeHead;
110 pPage->iUserHead = NIL_PGMPOOL_USER_INDEX;
111 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
112 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
113 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
114 pPage->iMonitoredPrev = NIL_PGMPOOL_IDX;
115 pPage->iAgeNext = NIL_PGMPOOL_IDX;
116 pPage->iAgePrev = NIL_PGMPOOL_IDX;
117 /* commit it */
118 bool fRc = RTAvloHCPhysInsert(&pPool->HCPhysTree, &pPage->Core); Assert(fRc); NOREF(fRc);
119 pPool->iFreeHead = cCurPages + iNewPage;
120 pPool->cCurPages = cCurPages + iNewPage + 1;
121 }
122
123 return VINF_SUCCESS;
124 }
125
126 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
127 }
128 if (cCurPages > 64)
129 LogRelMax(5, ("PGMR0PoolGrow: rc=%Rrc cNewPages=%#x cCurPages=%#x cMaxPages=%#x fCanUseHighMemory=%d\n",
130 rc, cNewPages, cCurPages, cMaxPages, fCanUseHighMemory));
131 else
132 LogRel(("PGMR0PoolGrow: rc=%Rrc cNewPages=%#x cCurPages=%#x cMaxPages=%#x fCanUseHighMemory=%d\n",
133 rc, cNewPages, cCurPages, cMaxPages, fCanUseHighMemory));
134 }
135 else
136 rc = VINF_SUCCESS;
137 return rc;
138}
139
140
141/**
142 * Grows the shadow page pool.
143 *
144 * I.e. adds more pages to it, assuming that hasn't reached cMaxPages yet.
145 *
146 * @returns VBox status code.
147 * @param pGVM The ring-0 VM structure.
148 * @param idCpu The ID of the calling EMT.
149 * @thread EMT(idCpu)
150 */
151VMMR0_INT_DECL(int) PGMR0PoolGrow(PGVM pGVM, VMCPUID idCpu)
152{
153 /*
154 * Validate input.
155 */
156 PPGMPOOL pPool = pGVM->pgm.s.pPoolR0;
157 AssertReturn(pPool->cCurPages < pPool->cMaxPages, VERR_PGM_POOL_MAXED_OUT_ALREADY);
158 AssertReturn(pPool->pVMR3 == pGVM->pVMR3, VERR_PGM_POOL_IPE);
159 AssertReturn(pPool->pVMR0 == pGVM, VERR_PGM_POOL_IPE);
160
161 AssertReturn(idCpu < pGVM->cCpus, VERR_VM_THREAD_NOT_EMT);
162 PGVMCPU const pGVCpu = &pGVM->aCpus[idCpu];
163
164 /*
165 * Enter the grow critical section and call worker.
166 */
167 STAM_REL_PROFILE_START(&pPool->StatGrow, a);
168
169 VMMR0EMTBLOCKCTX Ctx;
170 int rc = VMMR0EmtPrepareToBlock(pGVCpu, VINF_SUCCESS, __FUNCTION__, &pGVM->pgmr0.s.PoolGrowCritSect, &Ctx);
171 AssertRCReturn(rc, rc);
172
173 rc = RTCritSectEnter(&pGVM->pgmr0.s.PoolGrowCritSect);
174 AssertRCReturn(rc, rc);
175
176 rc = pgmR0PoolGrowInner(pGVM, pPool);
177
178 STAM_REL_PROFILE_STOP(&pPool->StatGrow, a);
179 RTCritSectLeave(&pGVM->pgmr0.s.PoolGrowCritSect);
180
181 VMMR0EmtResumeAfterBlocking(pGVCpu, &Ctx);
182 return rc;
183}
184
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette