VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPool.cpp@ 14672

Last change on this file since 14672 was 14301, checked in by vboxsync, 16 years ago

Synced some (inactive) new paging code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 29.9 KB
Line 
1/* $Id: PGMPool.cpp 14301 2008-11-18 13:31:42Z vboxsync $ */
2/** @file
3 * PGM Shadow Page Pool.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_pgm_pool PGM Shadow Page Pool
23 *
24 * Motivations:
25 * -# Relationship between shadow page tables and physical guest pages. This
26 * should allow us to skip most of the global flushes now following access
27 * handler changes. The main expense is flushing shadow pages.
28 * -# Limit the pool size if necessary (default is kind of limitless).
29 * -# Allocate shadow pages from RC. We use to only do this in SyncCR3.
30 * -# Required for 64-bit guests.
31 * -# Combining the PD cache and page pool in order to simplify caching.
32 *
33 *
34 * @section sec_pgm_pool_outline Design Outline
35 *
36 * The shadow page pool tracks pages used for shadowing paging structures (i.e.
37 * page tables, page directory, page directory pointer table and page map
38 * level-4). Each page in the pool has an unique identifier. This identifier is
39 * used to link a guest physical page to a shadow PT. The identifier is a
40 * non-zero value and has a relativly low max value - say 14 bits. This makes it
41 * possible to fit it into the upper bits of the of the aHCPhys entries in the
42 * ram range.
43 *
44 * By restricting host physical memory to the first 48 bits (which is the
45 * announced physical memory range of the K8L chip (scheduled for 2008)), we
46 * can safely use the upper 16 bits for shadow page ID and reference counting.
47 *
48 * Update: The 48 bit assumption will be lifted with the new physical memory
49 * management (PGMPAGE), so we won't have any trouble when someone stuffs 2TB
50 * into a box in some years.
51 *
52 * Now, it's possible for a page to be aliased, i.e. mapped by more than one PT
53 * or PD. This is solved by creating a list of physical cross reference extents
54 * when ever this happens. Each node in the list (extent) is can contain 3 page
55 * pool indexes. The list it self is chained using indexes into the paPhysExt
56 * array.
57 *
58 *
59 * @section sec_pgm_pool_life Life Cycle of a Shadow Page
60 *
61 * -# The SyncPT function requests a page from the pool.
62 * The request includes the kind of page it is (PT/PD, PAE/legacy), the
63 * address of the page it's shadowing, and more.
64 * -# The pool responds to the request by allocating a new page.
65 * When the cache is enabled, it will first check if it's in the cache.
66 * Should the pool be exhausted, one of two things can be done:
67 * -# Flush the whole pool and current CR3.
68 * -# Use the cache to find a page which can be flushed (~age).
69 * -# The SyncPT function will sync one or more pages and insert it into the
70 * shadow PD.
71 * -# The SyncPage function may sync more pages on a later \#PFs.
72 * -# The page is freed / flushed in SyncCR3 (perhaps) and some other cases.
73 * When caching is enabled, the page isn't flush but remains in the cache.
74 *
75 *
76 * @section sec_pgm_pool_impl Monitoring
77 *
78 * We always monitor PAGE_SIZE chunks of memory. When we've got multiple shadow
79 * pages for the same PAGE_SIZE of guest memory (PAE and mixed PD/PT) the pages
80 * sharing the monitor get linked using the iMonitoredNext/Prev. The head page
81 * is the pvUser to the access handlers.
82 *
83 *
84 * @section sec_pgm_pool_impl Implementation
85 *
86 * The pool will take pages from the MM page pool. The tracking data
87 * (attributes, bitmaps and so on) are allocated from the hypervisor heap. The
88 * pool content can be accessed both by using the page id and the physical
89 * address (HC). The former is managed by means of an array, the latter by an
90 * offset based AVL tree.
91 *
92 * Flushing of a pool page means that we iterate the content (we know what kind
93 * it is) and updates the link information in the ram range.
94 *
95 * ...
96 */
97
98
99/*******************************************************************************
100* Header Files *
101*******************************************************************************/
102#define LOG_GROUP LOG_GROUP_PGM_POOL
103#include <VBox/pgm.h>
104#include <VBox/mm.h>
105#include "PGMInternal.h"
106#include <VBox/vm.h>
107
108#include <VBox/log.h>
109#include <VBox/err.h>
110#include <iprt/asm.h>
111#include <iprt/string.h>
112
113
114/*******************************************************************************
115* Internal Functions *
116*******************************************************************************/
117#ifdef PGMPOOL_WITH_MONITORING
118static DECLCALLBACK(int) pgmR3PoolAccessHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
119#endif /* PGMPOOL_WITH_MONITORING */
120
121
122/**
123 * Initalizes the pool
124 *
125 * @returns VBox status code.
126 * @param pVM The VM handle.
127 */
128int pgmR3PoolInit(PVM pVM)
129{
130 /*
131 * Query Pool config.
132 */
133 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM/Pool");
134
135 /** @cfgm{/PGM/Pool/MaxPages, uint16_t, #pages, 16, 0x3fff, 1024}
136 * The max size of the shadow page pool in pages. The pool will grow dynamically
137 * up to this limit.
138 */
139 uint16_t cMaxPages;
140 int rc = CFGMR3QueryU16Def(pCfg, "MaxPages", &cMaxPages, 4*_1M >> PAGE_SHIFT);
141 AssertLogRelRCReturn(rc, rc);
142 AssertLogRelMsgReturn(cMaxPages <= PGMPOOL_IDX_LAST && cMaxPages >= RT_ALIGN(PGMPOOL_IDX_FIRST, 16),
143 ("cMaxPages=%u (%#x)\n", cMaxPages, cMaxPages), VERR_INVALID_PARAMETER);
144 cMaxPages = RT_ALIGN(cMaxPages, 16);
145
146 /** @cfgm{/PGM/Pool/MaxUsers, uint16_t, #users, MaxUsers, 32K, MaxPages*2}
147 * The max number of shadow page user tracking records. Each shadow page has
148 * zero of other shadow pages (or CR3s) that references it, or uses it if you
149 * like. The structures describing these relationships are allocated from a
150 * fixed sized pool. This configuration variable defines the pool size.
151 */
152 uint16_t cMaxUsers;
153 rc = CFGMR3QueryU16Def(pCfg, "MaxUsers", &cMaxUsers, cMaxPages * 2);
154 AssertLogRelRCReturn(rc, rc);
155 AssertLogRelMsgReturn(cMaxUsers >= cMaxPages && cMaxPages <= _32K,
156 ("cMaxUsers=%u (%#x)\n", cMaxUsers, cMaxUsers), VERR_INVALID_PARAMETER);
157
158 /** @cfgm{/PGM/Pool/MaxPhysExts, uint16_t, #extents, 16, MaxPages * 2, MAX(MaxPages*2,0x3fff)}
159 * The max number of extents for tracking aliased guest pages.
160 */
161 uint16_t cMaxPhysExts;
162 rc = CFGMR3QueryU16Def(pCfg, "MaxPhysExts", &cMaxPhysExts, RT_MAX(cMaxPages * 2, PGMPOOL_IDX_LAST));
163 AssertLogRelRCReturn(rc, rc);
164 AssertLogRelMsgReturn(cMaxPhysExts >= 16 && cMaxPages <= PGMPOOL_IDX_LAST,
165 ("cMaxPhysExts=%u (%#x)\n", cMaxPhysExts, cMaxPhysExts), VERR_INVALID_PARAMETER);
166
167 /** @cfgm{/PGM/Pool/ChacheEnabled, bool, true}
168 * Enables or disabling caching of shadow pages. Chaching means that we will try
169 * reuse shadow pages instead of recreating them everything SyncCR3, SyncPT or
170 * SyncPage requests one. When reusing a shadow page, we can save time
171 * reconstructing it and it's children.
172 */
173 bool fCacheEnabled;
174 rc = CFGMR3QueryBoolDef(pCfg, "CacheEnabled", &fCacheEnabled, true);
175 AssertLogRelRCReturn(rc, rc);
176
177 Log(("pgmR3PoolInit: cMaxPages=%#RX16 cMaxUsers=%#RX16 cMaxPhysExts=%#RX16 fCacheEnable=%RTbool\n",
178 cMaxPages, cMaxUsers, cMaxPhysExts, fCacheEnabled));
179
180 /*
181 * Allocate the data structures.
182 */
183 uint32_t cb = RT_OFFSETOF(PGMPOOL, aPages[cMaxPages]);
184#ifdef PGMPOOL_WITH_USER_TRACKING
185 cb += cMaxUsers * sizeof(PGMPOOLUSER);
186#endif
187#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
188 cb += cMaxPhysExts * sizeof(PGMPOOLPHYSEXT);
189#endif
190 PPGMPOOL pPool;
191 rc = MMR3HyperAllocOnceNoRel(pVM, cb, 0, MM_TAG_PGM_POOL, (void **)&pPool);
192 if (RT_FAILURE(rc))
193 return rc;
194 pVM->pgm.s.pPoolR3 = pPool;
195 pVM->pgm.s.pPoolR0 = MMHyperR3ToR0(pVM, pPool);
196 pVM->pgm.s.pPoolRC = MMHyperR3ToRC(pVM, pPool);
197
198 /*
199 * Initialize it.
200 */
201 pPool->pVMR3 = pVM;
202 pPool->pVMR0 = pVM->pVMR0;
203 pPool->pVMRC = pVM->pVMRC;
204 pPool->cMaxPages = cMaxPages;
205 pPool->cCurPages = PGMPOOL_IDX_FIRST;
206#ifdef PGMPOOL_WITH_USER_TRACKING
207 pPool->iUserFreeHead = 0;
208 pPool->cMaxUsers = cMaxUsers;
209 PPGMPOOLUSER paUsers = (PPGMPOOLUSER)&pPool->aPages[pPool->cMaxPages];
210 pPool->paUsersR3 = paUsers;
211 pPool->paUsersR0 = MMHyperR3ToR0(pVM, paUsers);
212 pPool->paUsersRC = MMHyperR3ToRC(pVM, paUsers);
213 for (unsigned i = 0; i < cMaxUsers; i++)
214 {
215 paUsers[i].iNext = i + 1;
216 paUsers[i].iUser = NIL_PGMPOOL_IDX;
217 paUsers[i].iUserTable = 0xfffffffe;
218 }
219 paUsers[cMaxUsers - 1].iNext = NIL_PGMPOOL_USER_INDEX;
220#endif
221#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
222 pPool->iPhysExtFreeHead = 0;
223 pPool->cMaxPhysExts = cMaxPhysExts;
224 PPGMPOOLPHYSEXT paPhysExts = (PPGMPOOLPHYSEXT)&paUsers[cMaxUsers];
225 pPool->paPhysExtsR3 = paPhysExts;
226 pPool->paPhysExtsR0 = MMHyperR3ToR0(pVM, paPhysExts);
227 pPool->paPhysExtsRC = MMHyperR3ToRC(pVM, paPhysExts);
228 for (unsigned i = 0; i < cMaxPhysExts; i++)
229 {
230 paPhysExts[i].iNext = i + 1;
231 paPhysExts[i].aidx[0] = NIL_PGMPOOL_IDX;
232 paPhysExts[i].aidx[1] = NIL_PGMPOOL_IDX;
233 paPhysExts[i].aidx[2] = NIL_PGMPOOL_IDX;
234 }
235 paPhysExts[cMaxPhysExts - 1].iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
236#endif
237#ifdef PGMPOOL_WITH_CACHE
238 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aiHash); i++)
239 pPool->aiHash[i] = NIL_PGMPOOL_IDX;
240 pPool->iAgeHead = NIL_PGMPOOL_IDX;
241 pPool->iAgeTail = NIL_PGMPOOL_IDX;
242 pPool->fCacheEnabled = fCacheEnabled;
243#endif
244#ifdef PGMPOOL_WITH_MONITORING
245 pPool->pfnAccessHandlerR3 = pgmR3PoolAccessHandler;
246 pPool->pszAccessHandler = "Guest Paging Access Handler";
247#endif
248 pPool->HCPhysTree = 0;
249
250 /* The NIL entry. */
251 Assert(NIL_PGMPOOL_IDX == 0);
252 pPool->aPages[NIL_PGMPOOL_IDX].enmKind = PGMPOOLKIND_INVALID;
253
254 /* The Shadow 32-bit PD. (32 bits guest paging) */
255 pPool->aPages[PGMPOOL_IDX_PD].Core.Key = NIL_RTHCPHYS;
256 pPool->aPages[PGMPOOL_IDX_PD].GCPhys = NIL_RTGCPHYS;
257#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
258 pPool->aPages[PGMPOOL_IDX_PD].pvPageR3 = 0;
259 pPool->aPages[PGMPOOL_IDX_PD].enmKind = PGMPOOLKIND_32BIT_PD;
260#else
261 pPool->aPages[PGMPOOL_IDX_PD].pvPageR3 = pVM->pgm.s.pShw32BitPdR3;
262 pPool->aPages[PGMPOOL_IDX_PD].enmKind = PGMPOOLKIND_ROOT_32BIT_PD;
263#endif
264 pPool->aPages[PGMPOOL_IDX_PD].idx = PGMPOOL_IDX_PD;
265
266#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
267 /* The Shadow PAE PDs. This is actually 4 pages! (32 bits guest paging) */
268 pPool->aPages[PGMPOOL_IDX_PAE_PD].Core.Key = NIL_RTHCPHYS;
269 pPool->aPages[PGMPOOL_IDX_PAE_PD].GCPhys = NIL_RTGCPHYS;
270 pPool->aPages[PGMPOOL_IDX_PAE_PD].pvPageR3 = pVM->pgm.s.apShwPaePDsR3[0];
271 pPool->aPages[PGMPOOL_IDX_PAE_PD].enmKind = PGMPOOLKIND_ROOT_PAE_PD;
272 pPool->aPages[PGMPOOL_IDX_PAE_PD].idx = PGMPOOL_IDX_PAE_PD;
273
274 /* The Shadow PAE PDs for PAE guest mode. */
275 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
276 {
277 pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].Core.Key = NIL_RTHCPHYS;
278 pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].GCPhys = NIL_RTGCPHYS;
279 pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].pvPageR3 = pVM->pgm.s.apShwPaePDsR3[i];
280 pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
281 pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].idx = PGMPOOL_IDX_PAE_PD_0 + i;
282 }
283#endif
284
285 /* The Shadow PDPT. */
286 pPool->aPages[PGMPOOL_IDX_PDPT].Core.Key = NIL_RTHCPHYS;
287 pPool->aPages[PGMPOOL_IDX_PDPT].GCPhys = NIL_RTGCPHYS;
288#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
289 pPool->aPages[PGMPOOL_IDX_PDPT].pvPageR3 = 0;
290 pPool->aPages[PGMPOOL_IDX_PDPT].enmKind = PGMPOOLKIND_PAE_PDPT;
291#else
292 pPool->aPages[PGMPOOL_IDX_PDPT].pvPageR3 = pVM->pgm.s.pShwPaePdptR3;
293 pPool->aPages[PGMPOOL_IDX_PDPT].enmKind = PGMPOOLKIND_ROOT_PDPT;
294#endif
295 pPool->aPages[PGMPOOL_IDX_PDPT].idx = PGMPOOL_IDX_PDPT;
296
297 /* The Shadow AMD64 CR3. */
298 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].Core.Key = NIL_RTHCPHYS;
299 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].GCPhys = NIL_RTGCPHYS;
300#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
301 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].pvPageR3 = 0;
302#else
303 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].pvPageR3 = pVM->pgm.s.pShwPaePdptR3; /* not used - isn't it wrong as well? */
304#endif
305 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].enmKind = PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4;
306 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].idx = PGMPOOL_IDX_AMD64_CR3;
307
308 /* The Nested Paging CR3. */
309 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].Core.Key = NIL_RTHCPHYS;
310 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].GCPhys = NIL_RTGCPHYS;
311 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].pvPageR3 = pVM->pgm.s.pShwNestedRootR3;
312 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].enmKind = PGMPOOLKIND_ROOT_NESTED;
313 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].idx = PGMPOOL_IDX_NESTED_ROOT;
314
315 /*
316 * Set common stuff.
317 */
318 for (unsigned iPage = 1; iPage < PGMPOOL_IDX_FIRST; iPage++)
319 {
320 pPool->aPages[iPage].iNext = NIL_PGMPOOL_IDX;
321#ifdef PGMPOOL_WITH_USER_TRACKING
322 pPool->aPages[iPage].iUserHead = NIL_PGMPOOL_USER_INDEX;
323#endif
324#ifdef PGMPOOL_WITH_MONITORING
325 pPool->aPages[iPage].iModifiedNext = NIL_PGMPOOL_IDX;
326 pPool->aPages[iPage].iModifiedPrev = NIL_PGMPOOL_IDX;
327 pPool->aPages[iPage].iMonitoredNext = NIL_PGMPOOL_IDX;
328 pPool->aPages[iPage].iMonitoredNext = NIL_PGMPOOL_IDX;
329#endif
330#ifdef PGMPOOL_WITH_CACHE
331 pPool->aPages[iPage].iAgeNext = NIL_PGMPOOL_IDX;
332 pPool->aPages[iPage].iAgePrev = NIL_PGMPOOL_IDX;
333#endif
334 Assert(VALID_PTR(pPool->aPages[iPage].pvPageR3));
335 Assert(pPool->aPages[iPage].idx == iPage);
336 Assert(pPool->aPages[iPage].GCPhys == NIL_RTGCPHYS);
337 Assert(!pPool->aPages[iPage].fSeenNonGlobal);
338 Assert(!pPool->aPages[iPage].fMonitored);
339 Assert(!pPool->aPages[iPage].fCached);
340 Assert(!pPool->aPages[iPage].fZeroed);
341 Assert(!pPool->aPages[iPage].fReusedFlushPending);
342 }
343
344#ifdef VBOX_WITH_STATISTICS
345 /*
346 * Register statistics.
347 */
348 STAM_REG(pVM, &pPool->cCurPages, STAMTYPE_U16, "/PGM/Pool/cCurPages", STAMUNIT_PAGES, "Current pool size.");
349 STAM_REG(pVM, &pPool->cMaxPages, STAMTYPE_U16, "/PGM/Pool/cMaxPages", STAMUNIT_PAGES, "Max pool size.");
350 STAM_REG(pVM, &pPool->cUsedPages, STAMTYPE_U16, "/PGM/Pool/cUsedPages", STAMUNIT_PAGES, "The number of pages currently in use.");
351 STAM_REG(pVM, &pPool->cUsedPagesHigh, STAMTYPE_U16_RESET, "/PGM/Pool/cUsedPagesHigh", STAMUNIT_PAGES, "The high watermark for cUsedPages.");
352 STAM_REG(pVM, &pPool->StatAlloc, STAMTYPE_PROFILE_ADV, "/PGM/Pool/Alloc", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolAlloc.");
353 STAM_REG(pVM, &pPool->StatClearAll, STAMTYPE_PROFILE, "/PGM/Pool/ClearAll", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolClearAll.");
354 STAM_REG(pVM, &pPool->StatFlushAllInt, STAMTYPE_PROFILE, "/PGM/Pool/FlushAllInt", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFlushAllInt.");
355 STAM_REG(pVM, &pPool->StatFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFlushPage.");
356 STAM_REG(pVM, &pPool->StatFree, STAMTYPE_PROFILE, "/PGM/Pool/Free", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFree.");
357 STAM_REG(pVM, &pPool->StatZeroPage, STAMTYPE_PROFILE, "/PGM/Pool/ZeroPage", STAMUNIT_TICKS_PER_CALL, "Profiling time spend zeroing pages. Overlaps with Alloc.");
358# ifdef PGMPOOL_WITH_USER_TRACKING
359 STAM_REG(pVM, &pPool->cMaxUsers, STAMTYPE_U16, "/PGM/Pool/Track/cMaxUsers", STAMUNIT_COUNT, "Max user tracking records.");
360 STAM_REG(pVM, &pPool->cPresent, STAMTYPE_U32, "/PGM/Pool/Track/cPresent", STAMUNIT_COUNT, "Number of present page table entries.");
361 STAM_REG(pVM, &pPool->StatTrackDeref, STAMTYPE_PROFILE, "/PGM/Pool/Track/Deref", STAMUNIT_OCCURENCES, "Profiling of pgmPoolTrackDeref.");
362 STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPT, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPT", STAMUNIT_OCCURENCES, "Profiling of pgmPoolTrackFlushGCPhysPT.");
363 STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPTs, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPTs", STAMUNIT_OCCURENCES, "Profiling of pgmPoolTrackFlushGCPhysPTs.");
364 STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPTsSlow, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPTsSlow", STAMUNIT_OCCURENCES, "Profiling of pgmPoolTrackFlushGCPhysPTsSlow.");
365 STAM_REG(pVM, &pPool->StatTrackFreeUpOneUser, STAMTYPE_COUNTER, "/PGM/Pool/Track/FreeUpOneUser", STAMUNIT_OCCURENCES, "The number of times we were out of user tracking records.");
366# endif
367# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
368 STAM_REG(pVM, &pPool->StatTrackDerefGCPhys, STAMTYPE_PROFILE, "/PGM/Pool/Track/DrefGCPhys", STAMUNIT_OCCURENCES, "Profiling deref activity related tracking GC physical pages.");
369 STAM_REG(pVM, &pPool->StatTrackLinearRamSearches, STAMTYPE_COUNTER, "/PGM/Pool/Track/LinearRamSearches", STAMUNIT_OCCURENCES, "The number of times we had to do linear ram searches.");
370 STAM_REG(pVM, &pPool->StamTrackPhysExtAllocFailures,STAMTYPE_COUNTER, "/PGM/Pool/Track/PhysExtAllocFailures", STAMUNIT_OCCURENCES, "The number of failing pgmPoolTrackPhysExtAlloc calls.");
371# endif
372# ifdef PGMPOOL_WITH_MONITORING
373 STAM_REG(pVM, &pPool->StatMonitorRZ, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 access handler.");
374 STAM_REG(pVM, &pPool->StatMonitorRZEmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction.");
375 STAM_REG(pVM, &pPool->StatMonitorRZFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler.");
376 STAM_REG(pVM, &pPool->StatMonitorRZFork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork().");
377 STAM_REG(pVM, &pPool->StatMonitorRZHandled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 access we've handled (except REP STOSD).");
378 STAM_REG(pVM, &pPool->StatMonitorRZIntrFailPatch1, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IntrFailPatch1", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction.");
379 STAM_REG(pVM, &pPool->StatMonitorRZIntrFailPatch2, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IntrFailPatch2", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction during flushing.");
380 STAM_REG(pVM, &pPool->StatMonitorRZRepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle.");
381 STAM_REG(pVM, &pPool->StatMonitorRZRepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled.");
382 STAM_REG(pVM, &pPool->StatMonitorR3, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access handler.");
383 STAM_REG(pVM, &pPool->StatMonitorR3EmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction.");
384 STAM_REG(pVM, &pPool->StatMonitorR3FlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the R3 access handler.");
385 STAM_REG(pVM, &pPool->StatMonitorR3Fork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork().");
386 STAM_REG(pVM, &pPool->StatMonitorR3Handled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access we've handled (except REP STOSD).");
387 STAM_REG(pVM, &pPool->StatMonitorR3RepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle.");
388 STAM_REG(pVM, &pPool->StatMonitorR3RepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled.");
389 STAM_REG(pVM, &pPool->StatMonitorR3Async, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Async", STAMUNIT_OCCURENCES, "Times we're called in an async thread and need to flush.");
390 STAM_REG(pVM, &pPool->cModifiedPages, STAMTYPE_U16, "/PGM/Pool/Monitor/cModifiedPages", STAMUNIT_PAGES, "The current cModifiedPages value.");
391 STAM_REG(pVM, &pPool->cModifiedPagesHigh, STAMTYPE_U16_RESET, "/PGM/Pool/Monitor/cModifiedPagesHigh", STAMUNIT_PAGES, "The high watermark for cModifiedPages.");
392# endif
393# ifdef PGMPOOL_WITH_CACHE
394 STAM_REG(pVM, &pPool->StatCacheHits, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Hits", STAMUNIT_OCCURENCES, "The number of pgmPoolAlloc calls satisfied by the cache.");
395 STAM_REG(pVM, &pPool->StatCacheMisses, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Misses", STAMUNIT_OCCURENCES, "The number of pgmPoolAlloc calls not statisfied by the cache.");
396 STAM_REG(pVM, &pPool->StatCacheKindMismatches, STAMTYPE_COUNTER, "/PGM/Pool/Cache/KindMismatches", STAMUNIT_OCCURENCES, "The number of shadow page kind mismatches. (Better be low, preferably 0!)");
397 STAM_REG(pVM, &pPool->StatCacheFreeUpOne, STAMTYPE_COUNTER, "/PGM/Pool/Cache/FreeUpOne", STAMUNIT_OCCURENCES, "The number of times the cache was asked to free up a page.");
398 STAM_REG(pVM, &pPool->StatCacheCacheable, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Cacheable", STAMUNIT_OCCURENCES, "The number of cacheable allocations.");
399 STAM_REG(pVM, &pPool->StatCacheUncacheable, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Uncacheable", STAMUNIT_OCCURENCES, "The number of uncacheable allocations.");
400# endif
401#endif /* VBOX_WITH_STATISTICS */
402
403 return VINF_SUCCESS;
404}
405
406
407/**
408 * Relocate the page pool data.
409 *
410 * @param pVM The VM handle.
411 */
412void pgmR3PoolRelocate(PVM pVM)
413{
414 pVM->pgm.s.pPoolRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pPoolR3);
415 pVM->pgm.s.pPoolR3->pVMRC = pVM->pVMRC;
416#ifdef PGMPOOL_WITH_USER_TRACKING
417 pVM->pgm.s.pPoolR3->paUsersRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pPoolR3->paUsersR3);
418#endif
419#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
420 pVM->pgm.s.pPoolR3->paPhysExtsRC = MMHyperR3ToRC(pVM, pVM->pgm.s.pPoolR3->paPhysExtsR3);
421#endif
422#ifdef PGMPOOL_WITH_MONITORING
423 int rc = PDMR3LdrGetSymbolRC(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPoolR3->pfnAccessHandlerRC);
424 AssertReleaseRC(rc);
425 /* init order hack. */
426 if (!pVM->pgm.s.pPoolR3->pfnAccessHandlerR0)
427 {
428 rc = PDMR3LdrGetSymbolR0(pVM, NULL, "pgmPoolAccessHandler", &pVM->pgm.s.pPoolR3->pfnAccessHandlerR0);
429 AssertReleaseRC(rc);
430 }
431#endif
432}
433
434
435/**
436 * Reset notification.
437 *
438 * This will flush the pool.
439 * @param pVM The VM handle.
440 */
441void pgmR3PoolReset(PVM pVM)
442{
443 pgmPoolFlushAll(pVM);
444}
445
446
447/**
448 * Grows the shadow page pool.
449 *
450 * I.e. adds more pages to it, assuming that hasn't reached cMaxPages yet.
451 *
452 * @returns VBox status code.
453 * @param pVM The VM handle.
454 */
455VMMR3DECL(int) PGMR3PoolGrow(PVM pVM)
456{
457 PPGMPOOL pPool = pVM->pgm.s.pPoolR3;
458 AssertReturn(pPool->cCurPages < pPool->cMaxPages, VERR_INTERNAL_ERROR);
459
460 /*
461 * How much to grow it by?
462 */
463 uint32_t cPages = pPool->cMaxPages - pPool->cCurPages;
464 cPages = RT_MIN(PGMPOOL_CFG_MAX_GROW, cPages);
465 LogFlow(("PGMR3PoolGrow: Growing the pool by %d (%#x) pages.\n", cPages, cPages));
466
467 for (unsigned i = pPool->cCurPages; cPages-- > 0; i++)
468 {
469 PPGMPOOLPAGE pPage = &pPool->aPages[i];
470
471 pPage->pvPageR3 = MMR3PageAlloc(pVM);
472 if (!pPage->pvPageR3)
473 {
474 Log(("We're out of memory!! i=%d\n", i));
475 return i ? VINF_SUCCESS : VERR_NO_PAGE_MEMORY;
476 }
477 pPage->Core.Key = MMPage2Phys(pVM, pPage->pvPageR3);
478 LogFlow(("PGMR3PoolGrow: insert page %RHp\n", pPage->Core.Key));
479 pPage->GCPhys = NIL_RTGCPHYS;
480 pPage->enmKind = PGMPOOLKIND_FREE;
481 pPage->idx = pPage - &pPool->aPages[0];
482 pPage->iNext = pPool->iFreeHead;
483#ifdef PGMPOOL_WITH_USER_TRACKING
484 pPage->iUserHead = NIL_PGMPOOL_USER_INDEX;
485#endif
486#ifdef PGMPOOL_WITH_MONITORING
487 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
488 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
489 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
490 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
491#endif
492#ifdef PGMPOOL_WITH_CACHE
493 pPage->iAgeNext = NIL_PGMPOOL_IDX;
494 pPage->iAgePrev = NIL_PGMPOOL_IDX;
495#endif
496 /* commit it */
497 bool fRc = RTAvloHCPhysInsert(&pPool->HCPhysTree, &pPage->Core); Assert(fRc); NOREF(fRc);
498 pPool->iFreeHead = i;
499 pPool->cCurPages = i + 1;
500 }
501
502 Assert(pPool->cCurPages <= pPool->cMaxPages);
503 return VINF_SUCCESS;
504}
505
506
507#ifdef PGMPOOL_WITH_MONITORING
508
509/**
510 * Worker used by pgmR3PoolAccessHandler when it's invoked by an async thread.
511 *
512 * @param pPool The pool.
513 * @param pPage The page.
514 */
515static DECLCALLBACK(void) pgmR3PoolFlushReusedPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
516{
517 /* for the present this should be safe enough I think... */
518 pgmLock(pPool->pVMR3);
519 if ( pPage->fReusedFlushPending
520 && pPage->enmKind != PGMPOOLKIND_FREE)
521 pgmPoolFlushPage(pPool, pPage);
522 pgmUnlock(pPool->pVMR3);
523}
524
525
526/**
527 * \#PF Handler callback for PT write accesses.
528 *
529 * The handler can not raise any faults, it's mainly for monitoring write access
530 * to certain pages.
531 *
532 * @returns VINF_SUCCESS if the handler have carried out the operation.
533 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
534 * @param pVM VM Handle.
535 * @param GCPhys The physical address the guest is writing to.
536 * @param pvPhys The HC mapping of that address.
537 * @param pvBuf What the guest is reading/writing.
538 * @param cbBuf How much it's reading/writing.
539 * @param enmAccessType The access type.
540 * @param pvUser User argument.
541 */
542static DECLCALLBACK(int) pgmR3PoolAccessHandler(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
543{
544 STAM_PROFILE_START(&pVM->pgm.s.pPoolR3->StatMonitorR3, a);
545 PPGMPOOL pPool = pVM->pgm.s.pPoolR3;
546 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser;
547 LogFlow(("pgmR3PoolAccessHandler: GCPhys=%RGp %p:{.Core=%RHp, .idx=%d, .GCPhys=%RGp, .enmType=%d}\n",
548 GCPhys, pPage, pPage->Core.Key, pPage->idx, pPage->GCPhys, pPage->enmKind));
549
550 /*
551 * We don't have to be very sophisiticated about this since there are relativly few calls here.
552 * However, we must try our best to detect any non-cpu accesses (disk / networking).
553 *
554 * Just to make life more interesting, we'll have to deal with the async threads too.
555 * We cannot flush a page if we're in an async thread because of REM notifications.
556 */
557 if (!VM_IS_EMT(pVM))
558 {
559 Log(("pgmR3PoolAccessHandler: async thread, requesting EMT to flush the page: %p:{.Core=%RHp, .idx=%d, .GCPhys=%RGp, .enmType=%d}\n",
560 pPage, pPage->Core.Key, pPage->idx, pPage->GCPhys, pPage->enmKind));
561 STAM_COUNTER_INC(&pPool->StatMonitorR3Async);
562 if (!pPage->fReusedFlushPending)
563 {
564 int rc = VMR3ReqCallEx(pPool->pVMR3, VMREQDEST_ANY, NULL, 0, VMREQFLAGS_NO_WAIT | VMREQFLAGS_VOID, (PFNRT)pgmR3PoolFlushReusedPage, 2, pPool, pPage);
565 AssertRCReturn(rc, rc);
566 pPage->fReusedFlushPending = true;
567 pPage->cModifications += 0x1000;
568 }
569 pgmPoolMonitorChainChanging(pPool, pPage, GCPhys, pvPhys, NULL);
570 /** @todo r=bird: making unsafe assumption about not crossing entries here! */
571 while (cbBuf > 4)
572 {
573 cbBuf -= 4;
574 pvPhys = (uint8_t *)pvPhys + 4;
575 GCPhys += 4;
576 pgmPoolMonitorChainChanging(pPool, pPage, GCPhys, pvPhys, NULL);
577 }
578 STAM_PROFILE_STOP(&pPool->StatMonitorR3, a);
579 }
580 else if ( (pPage->fCR3Mix || pPage->cModifications < 96) /* it's cheaper here. */
581 && cbBuf <= 4)
582 {
583 /* Clear the shadow entry. */
584 if (!pPage->cModifications++)
585 pgmPoolMonitorModifiedInsert(pPool, pPage);
586 /** @todo r=bird: making unsafe assumption about not crossing entries here! */
587 pgmPoolMonitorChainChanging(pPool, pPage, GCPhys, pvPhys, NULL);
588 STAM_PROFILE_STOP(&pPool->StatMonitorR3, a);
589 }
590 else
591 {
592 pgmPoolMonitorChainFlush(pPool, pPage); /* ASSUME that VERR_PGM_POOL_CLEARED can be ignored here and that FFs will deal with it in due time. */
593 STAM_PROFILE_STOP_EX(&pPool->StatMonitorR3, &pPool->StatMonitorR3FlushPage, a);
594 }
595
596 return VINF_PGM_HANDLER_DO_DEFAULT;
597}
598
599#endif /* PGMPOOL_WITH_MONITORING */
600
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette