VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPool.cpp@ 26624

Last change on this file since 26624 was 26364, checked in by vboxsync, 15 years ago

Flush the PGM pool cache as we might have stale references to pages that we just freed. (ballooning)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 43.3 KB
Line 
1/* $Id: PGMPool.cpp 26364 2010-02-09 13:31:20Z vboxsync $ */
2/** @file
3 * PGM Shadow Page Pool.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_pgm_pool PGM Shadow Page Pool
23 *
24 * Motivations:
25 * -# Relationship between shadow page tables and physical guest pages. This
26 * should allow us to skip most of the global flushes now following access
27 * handler changes. The main expense is flushing shadow pages.
28 * -# Limit the pool size if necessary (default is kind of limitless).
29 * -# Allocate shadow pages from RC. We use to only do this in SyncCR3.
30 * -# Required for 64-bit guests.
31 * -# Combining the PD cache and page pool in order to simplify caching.
32 *
33 *
34 * @section sec_pgm_pool_outline Design Outline
35 *
36 * The shadow page pool tracks pages used for shadowing paging structures (i.e.
37 * page tables, page directory, page directory pointer table and page map
38 * level-4). Each page in the pool has an unique identifier. This identifier is
39 * used to link a guest physical page to a shadow PT. The identifier is a
40 * non-zero value and has a relativly low max value - say 14 bits. This makes it
41 * possible to fit it into the upper bits of the of the aHCPhys entries in the
42 * ram range.
43 *
44 * By restricting host physical memory to the first 48 bits (which is the
45 * announced physical memory range of the K8L chip (scheduled for 2008)), we
46 * can safely use the upper 16 bits for shadow page ID and reference counting.
47 *
48 * Update: The 48 bit assumption will be lifted with the new physical memory
49 * management (PGMPAGE), so we won't have any trouble when someone stuffs 2TB
50 * into a box in some years.
51 *
52 * Now, it's possible for a page to be aliased, i.e. mapped by more than one PT
53 * or PD. This is solved by creating a list of physical cross reference extents
54 * when ever this happens. Each node in the list (extent) is can contain 3 page
55 * pool indexes. The list it self is chained using indexes into the paPhysExt
56 * array.
57 *
58 *
59 * @section sec_pgm_pool_life Life Cycle of a Shadow Page
60 *
61 * -# The SyncPT function requests a page from the pool.
62 * The request includes the kind of page it is (PT/PD, PAE/legacy), the
63 * address of the page it's shadowing, and more.
64 * -# The pool responds to the request by allocating a new page.
65 * When the cache is enabled, it will first check if it's in the cache.
66 * Should the pool be exhausted, one of two things can be done:
67 * -# Flush the whole pool and current CR3.
68 * -# Use the cache to find a page which can be flushed (~age).
69 * -# The SyncPT function will sync one or more pages and insert it into the
70 * shadow PD.
71 * -# The SyncPage function may sync more pages on a later \#PFs.
72 * -# The page is freed / flushed in SyncCR3 (perhaps) and some other cases.
73 * When caching is enabled, the page isn't flush but remains in the cache.
74 *
75 *
76 * @section sec_pgm_pool_impl Monitoring
77 *
78 * We always monitor PAGE_SIZE chunks of memory. When we've got multiple shadow
79 * pages for the same PAGE_SIZE of guest memory (PAE and mixed PD/PT) the pages
80 * sharing the monitor get linked using the iMonitoredNext/Prev. The head page
81 * is the pvUser to the access handlers.
82 *
83 *
84 * @section sec_pgm_pool_impl Implementation
85 *
86 * The pool will take pages from the MM page pool. The tracking data
87 * (attributes, bitmaps and so on) are allocated from the hypervisor heap. The
88 * pool content can be accessed both by using the page id and the physical
89 * address (HC). The former is managed by means of an array, the latter by an
90 * offset based AVL tree.
91 *
92 * Flushing of a pool page means that we iterate the content (we know what kind
93 * it is) and updates the link information in the ram range.
94 *
95 * ...
96 */
97
98
99/*******************************************************************************
100* Header Files *
101*******************************************************************************/
102#define LOG_GROUP LOG_GROUP_PGM_POOL
103#include <VBox/pgm.h>
104#include <VBox/mm.h>
105#include "PGMInternal.h"
106#include <VBox/vm.h>
107#include "PGMInline.h"
108
109#include <VBox/log.h>
110#include <VBox/err.h>
111#include <iprt/asm.h>
112#include <iprt/string.h>
113#include <VBox/dbg.h>
114
115
116/*******************************************************************************
117* Internal Functions *
118*******************************************************************************/
119static DECLCALLBACK(int) pgmR3PoolAccessHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
120#ifdef VBOX_WITH_DEBUGGER
121static DECLCALLBACK(int) pgmR3PoolCmdCheck(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
122#endif
123
124#ifdef VBOX_WITH_DEBUGGER
125/** Command descriptors. */
126static const DBGCCMD g_aCmds[] =
127{
128 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
129 { "pgmpoolcheck", 0, 0, NULL, 0, NULL, 0, pgmR3PoolCmdCheck, "", "Check the pgm pool pages." },
130};
131#endif
132
133/**
134 * Initalizes the pool
135 *
136 * @returns VBox status code.
137 * @param pVM The VM handle.
138 */
139int pgmR3PoolInit(PVM pVM)
140{
141 AssertCompile(NIL_PGMPOOL_IDX == 0);
142 /* pPage->cLocked is an unsigned byte. */
143 AssertCompile(VMM_MAX_CPU_COUNT <= 255);
144
145 /*
146 * Query Pool config.
147 */
148 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM/Pool");
149
150 /** @cfgm{/PGM/Pool/MaxPages, uint16_t, #pages, 16, 0x3fff, 1024}
151 * The max size of the shadow page pool in pages. The pool will grow dynamically
152 * up to this limit.
153 */
154 uint16_t cMaxPages;
155 int rc = CFGMR3QueryU16Def(pCfg, "MaxPages", &cMaxPages, 4*_1M >> PAGE_SHIFT);
156 AssertLogRelRCReturn(rc, rc);
157 AssertLogRelMsgReturn(cMaxPages <= PGMPOOL_IDX_LAST && cMaxPages >= RT_ALIGN(PGMPOOL_IDX_FIRST, 16),
158 ("cMaxPages=%u (%#x)\n", cMaxPages, cMaxPages), VERR_INVALID_PARAMETER);
159 cMaxPages = RT_ALIGN(cMaxPages, 16);
160
161 /** @cfgm{/PGM/Pool/MaxUsers, uint16_t, #users, MaxUsers, 32K, MaxPages*2}
162 * The max number of shadow page user tracking records. Each shadow page has
163 * zero of other shadow pages (or CR3s) that references it, or uses it if you
164 * like. The structures describing these relationships are allocated from a
165 * fixed sized pool. This configuration variable defines the pool size.
166 */
167 uint16_t cMaxUsers;
168 rc = CFGMR3QueryU16Def(pCfg, "MaxUsers", &cMaxUsers, cMaxPages * 2);
169 AssertLogRelRCReturn(rc, rc);
170 AssertLogRelMsgReturn(cMaxUsers >= cMaxPages && cMaxPages <= _32K,
171 ("cMaxUsers=%u (%#x)\n", cMaxUsers, cMaxUsers), VERR_INVALID_PARAMETER);
172
173 /** @cfgm{/PGM/Pool/MaxPhysExts, uint16_t, #extents, 16, MaxPages * 2, MAX(MaxPages*2,0x3fff)}
174 * The max number of extents for tracking aliased guest pages.
175 */
176 uint16_t cMaxPhysExts;
177 rc = CFGMR3QueryU16Def(pCfg, "MaxPhysExts", &cMaxPhysExts, RT_MAX(cMaxPages * 2, PGMPOOL_IDX_LAST));
178 AssertLogRelRCReturn(rc, rc);
179 AssertLogRelMsgReturn(cMaxPhysExts >= 16 && cMaxPages <= PGMPOOL_IDX_LAST,
180 ("cMaxPhysExts=%u (%#x)\n", cMaxPhysExts, cMaxPhysExts), VERR_INVALID_PARAMETER);
181
182 /** @cfgm{/PGM/Pool/ChacheEnabled, bool, true}
183 * Enables or disabling caching of shadow pages. Chaching means that we will try
184 * reuse shadow pages instead of recreating them everything SyncCR3, SyncPT or
185 * SyncPage requests one. When reusing a shadow page, we can save time
186 * reconstructing it and it's children.
187 */
188 bool fCacheEnabled;
189 rc = CFGMR3QueryBoolDef(pCfg, "CacheEnabled", &fCacheEnabled, true);
190 AssertLogRelRCReturn(rc, rc);
191
192 Log(("pgmR3PoolInit: cMaxPages=%#RX16 cMaxUsers=%#RX16 cMaxPhysExts=%#RX16 fCacheEnable=%RTbool\n",
193 cMaxPages, cMaxUsers, cMaxPhysExts, fCacheEnabled));
194
195 /*
196 * Allocate the data structures.
197 */
198 uint32_t cb = RT_OFFSETOF(PGMPOOL, aPages[cMaxPages]);
199 cb += cMaxUsers * sizeof(PGMPOOLUSER);
200 cb += cMaxPhysExts * sizeof(PGMPOOLPHYSEXT);
201 PPGMPOOL pPool;
202 rc = MMR3HyperAllocOnceNoRel(pVM, cb, 0, MM_TAG_PGM_POOL, (void **)&pPool);
203 if (RT_FAILURE(rc))
204 return rc;
205 pVM->pgm.s.pPoolR3 = pPool;
206 pVM->pgm.s.pPoolR0 = MMHyperR3ToR0(pVM, pPool);
207 pVM->pgm.s.pPoolRC = MMHyperR3ToRC(pVM, pPool);
208
209 /*
210 * Initialize it.
211 */
212 pPool->pVMR3 = pVM;
213 pPool->pVMR0 = pVM->pVMR0;
214 pPool->pVMRC = pVM->pVMRC;
215 pPool->cMaxPages = cMaxPages;
216 pPool->cCurPages = PGMPOOL_IDX_FIRST;
217 pPool->iUserFreeHead = 0;
218 pPool->cMaxUsers = cMaxUsers;
219 PPGMPOOLUSER paUsers = (PPGMPOOLUSER)&pPool->aPages[pPool->cMaxPages];
220 pPool->paUsersR3 = paUsers;
221 pPool->paUsersR0 = MMHyperR3ToR0(pVM, paUsers);
222 pPool->paUsersRC = MMHyperR3ToRC(pVM, paUsers);
223 for (unsigned i = 0; i < cMaxUsers; i++)
224 {
225 paUsers[i].iNext = i + 1;
226 paUsers[i].iUser = NIL_PGMPOOL_IDX;
227 paUsers[i].iUserTable = 0xfffffffe;
228 }
229 paUsers[cMaxUsers - 1].iNext = NIL_PGMPOOL_USER_INDEX;
230 pPool->iPhysExtFreeHead = 0;
231 pPool->cMaxPhysExts = cMaxPhysExts;
232 PPGMPOOLPHYSEXT paPhysExts = (PPGMPOOLPHYSEXT)&paUsers[cMaxUsers];
233 pPool->paPhysExtsR3 = paPhysExts;
234 pPool->paPhysExtsR0 = MMHyperR3ToR0(pVM, paPhysExts);
235 pPool->paPhysExtsRC = MMHyperR3ToRC(pVM, paPhysExts);
236 for (unsigned i = 0; i < cMaxPhysExts; i++)
237 {
238 paPhysExts[i].iNext = i + 1;
239 paPhysExts[i].aidx[0] = NIL_PGMPOOL_IDX;
240 paPhysExts[i].aidx[1] = NIL_PGMPOOL_IDX;
241 paPhysExts[i].aidx[2] = NIL_PGMPOOL_IDX;
242 }
243 paPhysExts[cMaxPhysExts - 1].iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
244 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aiHash); i++)
245 pPool->aiHash[i] = NIL_PGMPOOL_IDX;
246 pPool->iAgeHead = NIL_PGMPOOL_IDX;
247 pPool->iAgeTail = NIL_PGMPOOL_IDX;
248 pPool->fCacheEnabled = fCacheEnabled;
249 pPool->pfnAccessHandlerR3 = pgmR3PoolAccessHandler;
250 pPool->pszAccessHandler = "Guest Paging Access Handler";
251 pPool->HCPhysTree = 0;
252
253 /* The NIL entry. */
254 Assert(NIL_PGMPOOL_IDX == 0);
255 pPool->aPages[NIL_PGMPOOL_IDX].enmKind = PGMPOOLKIND_INVALID;
256
257 /* The Shadow 32-bit PD. (32 bits guest paging) */
258 pPool->aPages[PGMPOOL_IDX_PD].Core.Key = NIL_RTHCPHYS;
259 pPool->aPages[PGMPOOL_IDX_PD].GCPhys = NIL_RTGCPHYS;
260 pPool->aPages[PGMPOOL_IDX_PD].pvPageR3 = 0;
261 pPool->aPages[PGMPOOL_IDX_PD].enmKind = PGMPOOLKIND_32BIT_PD;
262 pPool->aPages[PGMPOOL_IDX_PD].idx = PGMPOOL_IDX_PD;
263
264 /* The Shadow PDPT. */
265 pPool->aPages[PGMPOOL_IDX_PDPT].Core.Key = NIL_RTHCPHYS;
266 pPool->aPages[PGMPOOL_IDX_PDPT].GCPhys = NIL_RTGCPHYS;
267 pPool->aPages[PGMPOOL_IDX_PDPT].pvPageR3 = 0;
268 pPool->aPages[PGMPOOL_IDX_PDPT].enmKind = PGMPOOLKIND_PAE_PDPT;
269 pPool->aPages[PGMPOOL_IDX_PDPT].idx = PGMPOOL_IDX_PDPT;
270
271 /* The Shadow AMD64 CR3. */
272 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].Core.Key = NIL_RTHCPHYS;
273 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].GCPhys = NIL_RTGCPHYS;
274 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].pvPageR3 = 0;
275 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].enmKind = PGMPOOLKIND_64BIT_PML4;
276 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].idx = PGMPOOL_IDX_AMD64_CR3;
277
278 /* The Nested Paging CR3. */
279 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].Core.Key = NIL_RTHCPHYS;
280 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].GCPhys = NIL_RTGCPHYS;
281 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].pvPageR3 = 0;
282 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].enmKind = PGMPOOLKIND_ROOT_NESTED;
283 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].idx = PGMPOOL_IDX_NESTED_ROOT;
284
285 /*
286 * Set common stuff.
287 */
288 for (unsigned iPage = 1; iPage < PGMPOOL_IDX_FIRST; iPage++)
289 {
290 pPool->aPages[iPage].iNext = NIL_PGMPOOL_IDX;
291 pPool->aPages[iPage].iUserHead = NIL_PGMPOOL_USER_INDEX;
292 pPool->aPages[iPage].iModifiedNext = NIL_PGMPOOL_IDX;
293 pPool->aPages[iPage].iModifiedPrev = NIL_PGMPOOL_IDX;
294 pPool->aPages[iPage].iMonitoredNext = NIL_PGMPOOL_IDX;
295 pPool->aPages[iPage].iMonitoredNext = NIL_PGMPOOL_IDX;
296 pPool->aPages[iPage].iAgeNext = NIL_PGMPOOL_IDX;
297 pPool->aPages[iPage].iAgePrev = NIL_PGMPOOL_IDX;
298 Assert(pPool->aPages[iPage].idx == iPage);
299 Assert(pPool->aPages[iPage].GCPhys == NIL_RTGCPHYS);
300 Assert(!pPool->aPages[iPage].fSeenNonGlobal);
301 Assert(!pPool->aPages[iPage].fMonitored);
302 Assert(!pPool->aPages[iPage].fCached);
303 Assert(!pPool->aPages[iPage].fZeroed);
304 Assert(!pPool->aPages[iPage].fReusedFlushPending);
305 }
306
307#ifdef VBOX_WITH_STATISTICS
308 /*
309 * Register statistics.
310 */
311 STAM_REG(pVM, &pPool->cCurPages, STAMTYPE_U16, "/PGM/Pool/cCurPages", STAMUNIT_PAGES, "Current pool size.");
312 STAM_REG(pVM, &pPool->cMaxPages, STAMTYPE_U16, "/PGM/Pool/cMaxPages", STAMUNIT_PAGES, "Max pool size.");
313 STAM_REG(pVM, &pPool->cUsedPages, STAMTYPE_U16, "/PGM/Pool/cUsedPages", STAMUNIT_PAGES, "The number of pages currently in use.");
314 STAM_REG(pVM, &pPool->cUsedPagesHigh, STAMTYPE_U16_RESET, "/PGM/Pool/cUsedPagesHigh", STAMUNIT_PAGES, "The high watermark for cUsedPages.");
315 STAM_REG(pVM, &pPool->StatAlloc, STAMTYPE_PROFILE_ADV, "/PGM/Pool/Alloc", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolAlloc.");
316 STAM_REG(pVM, &pPool->StatClearAll, STAMTYPE_PROFILE, "/PGM/Pool/ClearAll", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmR3PoolClearAll.");
317 STAM_REG(pVM, &pPool->StatR3Reset, STAMTYPE_PROFILE, "/PGM/Pool/R3Reset", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmR3PoolReset.");
318 STAM_REG(pVM, &pPool->StatFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFlushPage.");
319 STAM_REG(pVM, &pPool->StatFree, STAMTYPE_PROFILE, "/PGM/Pool/Free", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFree.");
320 STAM_REG(pVM, &pPool->StatForceFlushPage, STAMTYPE_COUNTER, "/PGM/Pool/FlushForce", STAMUNIT_OCCURENCES, "Counting explicit flushes by PGMPoolFlushPage().");
321 STAM_REG(pVM, &pPool->StatForceFlushDirtyPage, STAMTYPE_COUNTER, "/PGM/Pool/FlushForceDirty", STAMUNIT_OCCURENCES, "Counting explicit flushes of dirty pages by PGMPoolFlushPage().");
322 STAM_REG(pVM, &pPool->StatForceFlushReused, STAMTYPE_COUNTER, "/PGM/Pool/FlushReused", STAMUNIT_OCCURENCES, "Counting flushes for reused pages.");
323 STAM_REG(pVM, &pPool->StatZeroPage, STAMTYPE_PROFILE, "/PGM/Pool/ZeroPage", STAMUNIT_TICKS_PER_CALL, "Profiling time spent zeroing pages. Overlaps with Alloc.");
324 STAM_REG(pVM, &pPool->cMaxUsers, STAMTYPE_U16, "/PGM/Pool/Track/cMaxUsers", STAMUNIT_COUNT, "Max user tracking records.");
325 STAM_REG(pVM, &pPool->cPresent, STAMTYPE_U32, "/PGM/Pool/Track/cPresent", STAMUNIT_COUNT, "Number of present page table entries.");
326 STAM_REG(pVM, &pPool->StatTrackDeref, STAMTYPE_PROFILE, "/PGM/Pool/Track/Deref", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolTrackDeref.");
327 STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPT, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPT", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolTrackFlushGCPhysPT.");
328 STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPTs, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPTs", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolTrackFlushGCPhysPTs.");
329 STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPTsSlow, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPTsSlow", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolTrackFlushGCPhysPTsSlow.");
330 STAM_REG(pVM, &pPool->StatTrackFlushEntry, STAMTYPE_COUNTER, "/PGM/Pool/Track/Entry/Flush", STAMUNIT_COUNT, "Nr of flushed entries.");
331 STAM_REG(pVM, &pPool->StatTrackFlushEntryKeep, STAMTYPE_COUNTER, "/PGM/Pool/Track/Entry/Update", STAMUNIT_COUNT, "Nr of updated entries.");
332 STAM_REG(pVM, &pPool->StatTrackFreeUpOneUser, STAMTYPE_COUNTER, "/PGM/Pool/Track/FreeUpOneUser", STAMUNIT_TICKS_PER_CALL, "The number of times we were out of user tracking records.");
333 STAM_REG(pVM, &pPool->StatTrackDerefGCPhys, STAMTYPE_PROFILE, "/PGM/Pool/Track/DrefGCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling deref activity related tracking GC physical pages.");
334 STAM_REG(pVM, &pPool->StatTrackLinearRamSearches, STAMTYPE_COUNTER, "/PGM/Pool/Track/LinearRamSearches", STAMUNIT_OCCURENCES, "The number of times we had to do linear ram searches.");
335 STAM_REG(pVM, &pPool->StamTrackPhysExtAllocFailures,STAMTYPE_COUNTER, "/PGM/Pool/Track/PhysExtAllocFailures", STAMUNIT_OCCURENCES, "The number of failing pgmPoolTrackPhysExtAlloc calls.");
336 STAM_REG(pVM, &pPool->StatMonitorRZ, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 access handler.");
337 STAM_REG(pVM, &pPool->StatMonitorRZEmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction.");
338 STAM_REG(pVM, &pPool->StatMonitorRZFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler.");
339 STAM_REG(pVM, &pPool->StatMonitorRZFlushReinit, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/FlushReinit", STAMUNIT_OCCURENCES, "Times we've detected a page table reinit.");
340 STAM_REG(pVM, &pPool->StatMonitorRZFlushModOverflow,STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/FlushOverflow", STAMUNIT_OCCURENCES, "Counting flushes for pages that are modified too often.");
341 STAM_REG(pVM, &pPool->StatMonitorRZFork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork().");
342 STAM_REG(pVM, &pPool->StatMonitorRZHandled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 access we've handled (except REP STOSD).");
343 STAM_REG(pVM, &pPool->StatMonitorRZIntrFailPatch1, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IntrFailPatch1", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction.");
344 STAM_REG(pVM, &pPool->StatMonitorRZIntrFailPatch2, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IntrFailPatch2", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction during flushing.");
345 STAM_REG(pVM, &pPool->StatMonitorRZRepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle.");
346 STAM_REG(pVM, &pPool->StatMonitorRZRepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled.");
347 STAM_REG(pVM, &pPool->StatMonitorRZFaultPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PT", STAMUNIT_OCCURENCES, "Nr of handled PT faults.");
348 STAM_REG(pVM, &pPool->StatMonitorRZFaultPD, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PD", STAMUNIT_OCCURENCES, "Nr of handled PD faults.");
349 STAM_REG(pVM, &pPool->StatMonitorRZFaultPDPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PDPT", STAMUNIT_OCCURENCES, "Nr of handled PDPT faults.");
350 STAM_REG(pVM, &pPool->StatMonitorRZFaultPML4, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PML4", STAMUNIT_OCCURENCES, "Nr of handled PML4 faults.");
351 STAM_REG(pVM, &pPool->StatMonitorR3, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access handler.");
352 STAM_REG(pVM, &pPool->StatMonitorR3EmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction.");
353 STAM_REG(pVM, &pPool->StatMonitorR3FlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the R3 access handler.");
354 STAM_REG(pVM, &pPool->StatMonitorR3FlushReinit, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/FlushReinit", STAMUNIT_OCCURENCES, "Times we've detected a page table reinit.");
355 STAM_REG(pVM, &pPool->StatMonitorR3FlushModOverflow,STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/FlushOverflow", STAMUNIT_OCCURENCES, "Counting flushes for pages that are modified too often.");
356 STAM_REG(pVM, &pPool->StatMonitorR3Fork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork().");
357 STAM_REG(pVM, &pPool->StatMonitorR3Handled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access we've handled (except REP STOSD).");
358 STAM_REG(pVM, &pPool->StatMonitorR3RepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle.");
359 STAM_REG(pVM, &pPool->StatMonitorR3RepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled.");
360 STAM_REG(pVM, &pPool->StatMonitorR3FaultPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PT", STAMUNIT_OCCURENCES, "Nr of handled PT faults.");
361 STAM_REG(pVM, &pPool->StatMonitorR3FaultPD, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PD", STAMUNIT_OCCURENCES, "Nr of handled PD faults.");
362 STAM_REG(pVM, &pPool->StatMonitorR3FaultPDPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PDPT", STAMUNIT_OCCURENCES, "Nr of handled PDPT faults.");
363 STAM_REG(pVM, &pPool->StatMonitorR3FaultPML4, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PML4", STAMUNIT_OCCURENCES, "Nr of handled PML4 faults.");
364 STAM_REG(pVM, &pPool->StatMonitorR3Async, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Async", STAMUNIT_OCCURENCES, "Times we're called in an async thread and need to flush.");
365 STAM_REG(pVM, &pPool->cModifiedPages, STAMTYPE_U16, "/PGM/Pool/Monitor/cModifiedPages", STAMUNIT_PAGES, "The current cModifiedPages value.");
366 STAM_REG(pVM, &pPool->cModifiedPagesHigh, STAMTYPE_U16_RESET, "/PGM/Pool/Monitor/cModifiedPagesHigh", STAMUNIT_PAGES, "The high watermark for cModifiedPages.");
367 STAM_REG(pVM, &pPool->StatResetDirtyPages, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/Dirty/Resets", STAMUNIT_OCCURENCES, "Times we've called pgmPoolResetDirtyPages (and there were dirty page).");
368 STAM_REG(pVM, &pPool->StatDirtyPage, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/Dirty/Pages", STAMUNIT_OCCURENCES, "Times we've called pgmPoolAddDirtyPage.");
369 STAM_REG(pVM, &pPool->StatDirtyPageDupFlush, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/Dirty/FlushDup", STAMUNIT_OCCURENCES, "Times we've had to flush duplicates for dirty page management.");
370 STAM_REG(pVM, &pPool->StatDirtyPageOverFlowFlush, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/Dirty/FlushOverflow",STAMUNIT_OCCURENCES, "Times we've had to flush because of overflow.");
371 STAM_REG(pVM, &pPool->StatCacheHits, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Hits", STAMUNIT_OCCURENCES, "The number of pgmPoolAlloc calls satisfied by the cache.");
372 STAM_REG(pVM, &pPool->StatCacheMisses, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Misses", STAMUNIT_OCCURENCES, "The number of pgmPoolAlloc calls not statisfied by the cache.");
373 STAM_REG(pVM, &pPool->StatCacheKindMismatches, STAMTYPE_COUNTER, "/PGM/Pool/Cache/KindMismatches", STAMUNIT_OCCURENCES, "The number of shadow page kind mismatches. (Better be low, preferably 0!)");
374 STAM_REG(pVM, &pPool->StatCacheFreeUpOne, STAMTYPE_COUNTER, "/PGM/Pool/Cache/FreeUpOne", STAMUNIT_OCCURENCES, "The number of times the cache was asked to free up a page.");
375 STAM_REG(pVM, &pPool->StatCacheCacheable, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Cacheable", STAMUNIT_OCCURENCES, "The number of cacheable allocations.");
376 STAM_REG(pVM, &pPool->StatCacheUncacheable, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Uncacheable", STAMUNIT_OCCURENCES, "The number of uncacheable allocations.");
377#endif /* VBOX_WITH_STATISTICS */
378
379#ifdef VBOX_WITH_DEBUGGER
380 /*
381 * Debugger commands.
382 */
383 static bool s_fRegisteredCmds = false;
384 if (!s_fRegisteredCmds)
385 {
386 rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
387 if (RT_SUCCESS(rc))
388 s_fRegisteredCmds = true;
389 }
390#endif
391
392 return VINF_SUCCESS;
393}
394
395
396/**
397 * Relocate the page pool data.
398 *
399 * @param pVM The VM handle.
400 */
401void pgmR3PoolRelocate(PVM pVM)
402{
403 pVM->pgm.s.pPoolRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pPoolR3);
404 pVM->pgm.s.pPoolR3->pVMRC = pVM->pVMRC;
405 pVM->pgm.s.pPoolR3->paUsersRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pPoolR3->paUsersR3);
406 pVM->pgm.s.pPoolR3->paPhysExtsRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pPoolR3->paPhysExtsR3);
407 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPoolR3->pfnAccessHandlerRC);
408 AssertReleaseRC(rc);
409 /* init order hack. */
410 if (!pVM->pgm.s.pPoolR3->pfnAccessHandlerR0)
411 {
412 rc = PDMR3LdrGetSymbolR0(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPoolR3->pfnAccessHandlerR0);
413 AssertReleaseRC(rc);
414 }
415}
416
417
418/**
419 * Grows the shadow page pool.
420 *
421 * I.e. adds more pages to it, assuming that hasn't reached cMaxPages yet.
422 *
423 * @returns VBox status code.
424 * @param pVM The VM handle.
425 */
426VMMR3DECL(int) PGMR3PoolGrow(PVM pVM)
427{
428 PPGMPOOL pPool = pVM->pgm.s.pPoolR3;
429 AssertReturn(pPool->cCurPages < pPool->cMaxPages, VERR_INTERNAL_ERROR);
430
431 pgmLock(pVM);
432
433 /*
434 * How much to grow it by?
435 */
436 uint32_t cPages = pPool->cMaxPages - pPool->cCurPages;
437 cPages = RT_MIN(PGMPOOL_CFG_MAX_GROW, cPages);
438 LogFlow(("PGMR3PoolGrow: Growing the pool by %d (%#x) pages.\n", cPages, cPages));
439
440 for (unsigned i = pPool->cCurPages; cPages-- > 0; i++)
441 {
442 PPGMPOOLPAGE pPage = &pPool->aPages[i];
443
444 /* Allocate all pages in low (below 4 GB) memory as 32 bits guests need a page table root in low memory. */
445 pPage->pvPageR3 = MMR3PageAllocLow(pVM);
446 if (!pPage->pvPageR3)
447 {
448 Log(("We're out of memory!! i=%d\n", i));
449 pgmUnlock(pVM);
450 return i ? VINF_SUCCESS : VERR_NO_PAGE_MEMORY;
451 }
452 pPage->Core.Key = MMPage2Phys(pVM, pPage->pvPageR3);
453 pPage->GCPhys = NIL_RTGCPHYS;
454 pPage->enmKind = PGMPOOLKIND_FREE;
455 pPage->idx = pPage - &pPool->aPages[0];
456 LogFlow(("PGMR3PoolGrow: insert page #%#x - %RHp\n", pPage->idx, pPage->Core.Key));
457 pPage->iNext = pPool->iFreeHead;
458 pPage->iUserHead = NIL_PGMPOOL_USER_INDEX;
459 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
460 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
461 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
462 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
463 pPage->iAgeNext = NIL_PGMPOOL_IDX;
464 pPage->iAgePrev = NIL_PGMPOOL_IDX;
465 /* commit it */
466 bool fRc = RTAvloHCPhysInsert(&pPool->HCPhysTree, &pPage->Core); Assert(fRc); NOREF(fRc);
467 pPool->iFreeHead = i;
468 pPool->cCurPages = i + 1;
469 }
470
471 pgmUnlock(pVM);
472 Assert(pPool->cCurPages <= pPool->cMaxPages);
473 return VINF_SUCCESS;
474}
475
476
477
478/**
479 * Worker used by pgmR3PoolAccessHandler when it's invoked by an async thread.
480 *
481 * @param pPool The pool.
482 * @param pPage The page.
483 */
484static DECLCALLBACK(void) pgmR3PoolFlushReusedPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
485{
486 /* for the present this should be safe enough I think... */
487 pgmLock(pPool->pVMR3);
488 if ( pPage->fReusedFlushPending
489 && pPage->enmKind != PGMPOOLKIND_FREE)
490 pgmPoolFlushPage(pPool, pPage);
491 pgmUnlock(pPool->pVMR3);
492}
493
494
495/**
496 * \#PF Handler callback for PT write accesses.
497 *
498 * The handler can not raise any faults, it's mainly for monitoring write access
499 * to certain pages.
500 *
501 * @returns VINF_SUCCESS if the handler has carried out the operation.
502 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
503 * @param pVM VM Handle.
504 * @param GCPhys The physical address the guest is writing to.
505 * @param pvPhys The HC mapping of that address.
506 * @param pvBuf What the guest is reading/writing.
507 * @param cbBuf How much it's reading/writing.
508 * @param enmAccessType The access type.
509 * @param pvUser User argument.
510 */
511static DECLCALLBACK(int) pgmR3PoolAccessHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
512{
513 STAM_PROFILE_START(&pVM->pgm.s.pPoolR3->StatMonitorR3, a);
514 PPGMPOOL pPool = pVM->pgm.s.pPoolR3;
515 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser;
516 LogFlow(("pgmR3PoolAccessHandler: GCPhys=%RGp %p:{.Core=%RHp, .idx=%d, .GCPhys=%RGp, .enmType=%d}\n",
517 GCPhys, pPage, pPage->Core.Key, pPage->idx, pPage->GCPhys, pPage->enmKind));
518
519 PVMCPU pVCpu = VMMGetCpu(pVM);
520
521 /*
522 * We don't have to be very sophisticated about this since there are relativly few calls here.
523 * However, we must try our best to detect any non-cpu accesses (disk / networking).
524 *
525 * Just to make life more interesting, we'll have to deal with the async threads too.
526 * We cannot flush a page if we're in an async thread because of REM notifications.
527 */
528 pgmLock(pVM);
529 if (PHYS_PAGE_ADDRESS(GCPhys) != PHYS_PAGE_ADDRESS(pPage->GCPhys))
530 {
531 /* Pool page changed while we were waiting for the lock; ignore. */
532 Log(("CPU%d: pgmR3PoolAccessHandler pgm pool page for %RGp changed (to %RGp) while waiting!\n", pVCpu->idCpu, PHYS_PAGE_ADDRESS(GCPhys), PHYS_PAGE_ADDRESS(pPage->GCPhys)));
533 pgmUnlock(pVM);
534 return VINF_PGM_HANDLER_DO_DEFAULT;
535 }
536
537 Assert(pPage->enmKind != PGMPOOLKIND_FREE);
538
539 if (!pVCpu) /** @todo This shouldn't happen any longer, all access handlers will be called on an EMT. All ring-3 handlers, except MMIO, already own the PGM lock. @bugref{3170} */
540 {
541 Log(("pgmR3PoolAccessHandler: async thread, requesting EMT to flush the page: %p:{.Core=%RHp, .idx=%d, .GCPhys=%RGp, .enmType=%d}\n",
542 pPage, pPage->Core.Key, pPage->idx, pPage->GCPhys, pPage->enmKind));
543 STAM_COUNTER_INC(&pPool->StatMonitorR3Async);
544 if (!pPage->fReusedFlushPending)
545 {
546 pgmUnlock(pVM);
547 int rc = VMR3ReqCallVoidNoWait(pPool->pVMR3, VMCPUID_ANY, (PFNRT)pgmR3PoolFlushReusedPage, 2, pPool, pPage);
548 AssertRCReturn(rc, rc);
549 pgmLock(pVM);
550 pPage->fReusedFlushPending = true;
551 pPage->cModifications += 0x1000;
552 }
553
554 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhys, pvPhys, 0 /* unknown write size */);
555 /** @todo r=bird: making unsafe assumption about not crossing entries here! */
556 while (cbBuf > 4)
557 {
558 cbBuf -= 4;
559 pvPhys = (uint8_t *)pvPhys + 4;
560 GCPhys += 4;
561 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhys, pvPhys, 0 /* unknown write size */);
562 }
563 STAM_PROFILE_STOP(&pPool->StatMonitorR3, a);
564 }
565 else if ( ( pPage->cModifications < 96 /* it's cheaper here. */
566 || pgmPoolIsPageLocked(&pVM->pgm.s, pPage)
567 )
568 && cbBuf <= 4)
569 {
570 /* Clear the shadow entry. */
571 if (!pPage->cModifications++)
572 pgmPoolMonitorModifiedInsert(pPool, pPage);
573 /** @todo r=bird: making unsafe assumption about not crossing entries here! */
574 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhys, pvPhys, 0 /* unknown write size */);
575 STAM_PROFILE_STOP(&pPool->StatMonitorR3, a);
576 }
577 else
578 {
579 pgmPoolMonitorChainFlush(pPool, pPage); /* ASSUME that VERR_PGM_POOL_CLEARED can be ignored here and that FFs will deal with it in due time. */
580 STAM_PROFILE_STOP_EX(&pPool->StatMonitorR3, &pPool->StatMonitorR3FlushPage, a);
581 }
582 pgmUnlock(pVM);
583 return VINF_PGM_HANDLER_DO_DEFAULT;
584}
585
586
587/**
588 * Rendezvous callback used by pgmR3PoolClearAll that clears all shadow pages
589 * and all modification counters.
590 *
591 * This is only called on one of the EMTs while the other ones are waiting for
592 * it to complete this function.
593 *
594 * @returns VINF_SUCCESS (VBox strict status code).
595 * @param pVM The VM handle.
596 * @param pVCpu The VMCPU for the EMT we're being called on. Unused.
597 * @param pvUser Unused parameter.
598 *
599 */
600DECLCALLBACK(VBOXSTRICTRC) pgmR3PoolClearAllRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
601{
602 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
603 STAM_PROFILE_START(&pPool->StatClearAll, c);
604 LogFlow(("pgmPoolClearAllDoIt: cUsedPages=%d\n", pPool->cUsedPages));
605
606 pgmLock(pVM);
607
608#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
609 pgmPoolResetDirtyPages(pVM);
610#endif
611
612 /*
613 * Iterate all the pages until we've encountered all that are in use.
614 * This is simple but not quite optimal solution.
615 */
616 unsigned cModifiedPages = 0; NOREF(cModifiedPages);
617 unsigned cLeft = pPool->cUsedPages;
618 unsigned iPage = pPool->cCurPages;
619 while (--iPage >= PGMPOOL_IDX_FIRST)
620 {
621 PPGMPOOLPAGE pPage = &pPool->aPages[iPage];
622 if (pPage->GCPhys != NIL_RTGCPHYS)
623 {
624 switch (pPage->enmKind)
625 {
626 /*
627 * We only care about shadow page tables.
628 */
629 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
630 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
631 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
632 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
633 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
634 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
635 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
636 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
637 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
638 {
639 if (pPage->cPresent)
640 {
641 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
642 STAM_PROFILE_START(&pPool->StatZeroPage, z);
643 ASMMemZeroPage(pvShw);
644 STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
645 pPage->cPresent = 0;
646 pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX;
647 }
648#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
649 else
650 Assert(!pPage->fDirty);
651#endif
652 }
653 /* fall thru */
654
655 default:
656#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
657 Assert(!pPage->fDirty);
658#endif
659 Assert(!pPage->cModifications || ++cModifiedPages);
660 Assert(pPage->iModifiedNext == NIL_PGMPOOL_IDX || pPage->cModifications);
661 Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX || pPage->cModifications);
662 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
663 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
664 pPage->cModifications = 0;
665 break;
666
667 }
668 if (!--cLeft)
669 break;
670 }
671 }
672
673 /* swipe the special pages too. */
674 for (iPage = PGMPOOL_IDX_FIRST_SPECIAL; iPage < PGMPOOL_IDX_FIRST; iPage++)
675 {
676 PPGMPOOLPAGE pPage = &pPool->aPages[iPage];
677 if (pPage->GCPhys != NIL_RTGCPHYS)
678 {
679 Assert(!pPage->cModifications || ++cModifiedPages);
680 Assert(pPage->iModifiedNext == NIL_PGMPOOL_IDX || pPage->cModifications);
681 Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX || pPage->cModifications);
682 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
683 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
684 pPage->cModifications = 0;
685 }
686 }
687
688#ifndef DEBUG_michael
689 AssertMsg(cModifiedPages == pPool->cModifiedPages, ("%d != %d\n", cModifiedPages, pPool->cModifiedPages));
690#endif
691 pPool->iModifiedHead = NIL_PGMPOOL_IDX;
692 pPool->cModifiedPages = 0;
693
694 /*
695 * Clear all the GCPhys links and rebuild the phys ext free list.
696 */
697 for (PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);
698 pRam;
699 pRam = pRam->CTX_SUFF(pNext))
700 {
701 iPage = pRam->cb >> PAGE_SHIFT;
702 while (iPage-- > 0)
703 PGM_PAGE_SET_TRACKING(&pRam->aPages[iPage], 0);
704 }
705
706 pPool->iPhysExtFreeHead = 0;
707 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
708 const unsigned cMaxPhysExts = pPool->cMaxPhysExts;
709 for (unsigned i = 0; i < cMaxPhysExts; i++)
710 {
711 paPhysExts[i].iNext = i + 1;
712 paPhysExts[i].aidx[0] = NIL_PGMPOOL_IDX;
713 paPhysExts[i].aidx[1] = NIL_PGMPOOL_IDX;
714 paPhysExts[i].aidx[2] = NIL_PGMPOOL_IDX;
715 }
716 paPhysExts[cMaxPhysExts - 1].iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
717
718#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
719 /* Clear all dirty pages. */
720 pPool->idxFreeDirtyPage = 0;
721 pPool->cDirtyPages = 0;
722 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
723 pPool->aIdxDirtyPages[i] = NIL_PGMPOOL_IDX;
724#endif
725
726 /* Clear the PGM_SYNC_CLEAR_PGM_POOL flag on all VCPUs to prevent redundant flushes. */
727 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
728 pVM->aCpus[idCpu].pgm.s.fSyncFlags &= ~PGM_SYNC_CLEAR_PGM_POOL;
729
730 /* Flush job finished. */
731 VM_FF_CLEAR(pVM, VM_FF_PGM_POOL_FLUSH_PENDING);
732
733 pPool->cPresent = 0;
734 pgmUnlock(pVM);
735 PGM_INVL_ALL_VCPU_TLBS(pVM);
736 STAM_PROFILE_STOP(&pPool->StatClearAll, c);
737 return VINF_SUCCESS;
738}
739
740
741/**
742 * Clears the shadow page pool.
743 *
744 * @param pVM The VM handle.
745 */
746void pgmR3PoolClearAll(PVM pVM)
747{
748 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PoolClearAllRendezvous, NULL);
749 AssertRC(rc);
750}
751
752
753#ifdef VBOX_WITH_DEBUGGER
754/**
755 * The '.pgmpoolcheck' command.
756 *
757 * @returns VBox status.
758 * @param pCmd Pointer to the command descriptor (as registered).
759 * @param pCmdHlp Pointer to command helper functions.
760 * @param pVM Pointer to the current VM (if any).
761 * @param paArgs Pointer to (readonly) array of arguments.
762 * @param cArgs Number of arguments in the array.
763 */
764static DECLCALLBACK(int) pgmR3PoolCmdCheck(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
765{
766 /*
767 * Validate input.
768 */
769 if (!pVM)
770 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n");
771
772 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
773
774 for (unsigned i = 0; i < pPool->cCurPages; i++)
775 {
776 PPGMPOOLPAGE pPage = &pPool->aPages[i];
777 bool fFirstMsg = true;
778
779 /* Todo: cover other paging modes too. */
780 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
781 {
782 PX86PTPAE pShwPT = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
783 PX86PTPAE pGstPT;
784 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pGstPT); AssertReleaseRC(rc);
785
786 /* Check if any PTEs are out of sync. */
787 for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++)
788 {
789 if (pShwPT->a[j].n.u1Present)
790 {
791 RTHCPHYS HCPhys = NIL_RTHCPHYS;
792 rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[j].u & X86_PTE_PAE_PG_MASK, &HCPhys);
793 if ( rc != VINF_SUCCESS
794 || (pShwPT->a[j].u & X86_PTE_PAE_PG_MASK) != HCPhys)
795 {
796 if (fFirstMsg)
797 {
798 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Check pool page %RGp\n", pPage->GCPhys);
799 fFirstMsg = false;
800 }
801 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Mismatch HCPhys: rc=%d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, j, pGstPT->a[j].u, pShwPT->a[j].u, HCPhys);
802 }
803 else
804 if ( pShwPT->a[j].n.u1Write
805 && !pGstPT->a[j].n.u1Write)
806 {
807 if (fFirstMsg)
808 {
809 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Check pool page %RGp\n", pPage->GCPhys);
810 fFirstMsg = false;
811 }
812 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Mismatch r/w gst/shw: idx=%d guest %RX64 shw=%RX64 vs %RHp\n", j, pGstPT->a[j].u, pShwPT->a[j].u, HCPhys);
813 }
814 }
815 }
816
817 /* Make sure this page table can't be written to from any shadow mapping. */
818 RTHCPHYS HCPhysPT = NIL_RTHCPHYS;
819 rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pPage->GCPhys, &HCPhysPT);
820 AssertMsgRC(rc, ("PGMPhysGCPhys2HCPhys failed with rc=%d for %RGp\n", rc, pPage->GCPhys));
821 if (rc == VINF_SUCCESS)
822 {
823 for (unsigned j = 0; j < pPool->cCurPages; j++)
824 {
825 PPGMPOOLPAGE pTempPage = &pPool->aPages[j];
826
827 if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
828 {
829 PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pTempPage);
830
831 for (unsigned k = 0; k < RT_ELEMENTS(pShwPT->a); k++)
832 {
833 if ( pShwPT2->a[k].n.u1Present
834 && pShwPT2->a[k].n.u1Write
835# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
836 && !pPage->fDirty
837# endif
838 && ((pShwPT2->a[k].u & X86_PTE_PAE_PG_MASK) == HCPhysPT))
839 {
840 if (fFirstMsg)
841 {
842 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Check pool page %RGp\n", pPage->GCPhys);
843 fFirstMsg = false;
844 }
845 pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Mismatch: r/w: GCPhys=%RGp idx=%d shw %RX64 %RX64\n", pTempPage->GCPhys, k, pShwPT->a[k].u, pShwPT2->a[k].u);
846 }
847 }
848 }
849 }
850 }
851 }
852 }
853 return VINF_SUCCESS;
854}
855#endif /* VBOX_WITH_DEBUGGER */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette