VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp@ 27062

Last change on this file since 27062 was 27038, checked in by vboxsync, 15 years ago

Try to reuse a 2mb large page if it was previously disabled for monitoring purposes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 191.8 KB
Line 
1/* $Id: PGMAllPool.cpp 27038 2010-03-04 14:48:14Z vboxsync $ */
2/** @file
3 * PGM Shadow Page Pool.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM_POOL
27#include <VBox/pgm.h>
28#include <VBox/mm.h>
29#include <VBox/em.h>
30#include <VBox/cpum.h>
31#ifdef IN_RC
32# include <VBox/patm.h>
33#endif
34#include "../PGMInternal.h"
35#include <VBox/vm.h>
36#include "../PGMInline.h"
37#include <VBox/disopcode.h>
38#include <VBox/hwacc_vmx.h>
39
40#include <VBox/log.h>
41#include <VBox/err.h>
42#include <iprt/asm.h>
43#include <iprt/string.h>
44
45
46/*******************************************************************************
47* Internal Functions *
48*******************************************************************************/
49RT_C_DECLS_BEGIN
50static void pgmPoolFlushAllInt(PPGMPOOL pPool);
51DECLINLINE(unsigned) pgmPoolTrackGetShadowEntrySize(PGMPOOLKIND enmKind);
52DECLINLINE(unsigned) pgmPoolTrackGetGuestEntrySize(PGMPOOLKIND enmKind);
53static void pgmPoolTrackDeref(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
54static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
55static void pgmPoolMonitorModifiedRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
56#ifndef IN_RING3
57DECLEXPORT(int) pgmPoolAccessHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
58#endif
59#ifdef LOG_ENABLED
60static const char *pgmPoolPoolKindToStr(uint8_t enmKind);
61#endif
62#if defined(VBOX_STRICT) && defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT)
63static void pgmPoolTrackCheckPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT, PCX86PTPAE pGstPT);
64#endif
65
66int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage);
67PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt);
68void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt);
69void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt);
70
71RT_C_DECLS_END
72
73
74/**
75 * Checks if the specified page pool kind is for a 4MB or 2MB guest page.
76 *
77 * @returns true if it's the shadow of a 4MB or 2MB guest page, otherwise false.
78 * @param enmKind The page kind.
79 */
80DECLINLINE(bool) pgmPoolIsBigPage(PGMPOOLKIND enmKind)
81{
82 switch (enmKind)
83 {
84 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
85 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
86 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
87 return true;
88 default:
89 return false;
90 }
91}
92
93/** @def PGMPOOL_PAGE_2_LOCKED_PTR
94 * Maps a pool page pool into the current context and lock it (RC only).
95 *
96 * @returns VBox status code.
97 * @param pVM The VM handle.
98 * @param pPage The pool page.
99 *
100 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
101 * small page window employeed by that function. Be careful.
102 * @remark There is no need to assert on the result.
103 */
104#if defined(IN_RC)
105DECLINLINE(void *) PGMPOOL_PAGE_2_LOCKED_PTR(PVM pVM, PPGMPOOLPAGE pPage)
106{
107 void *pv = pgmPoolMapPageInlined(&pVM->pgm.s, pPage);
108
109 /* Make sure the dynamic mapping will not be reused. */
110 if (pv)
111 PGMDynLockHCPage(pVM, (uint8_t *)pv);
112
113 return pv;
114}
115#else
116# define PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage) PGMPOOL_PAGE_2_PTR(pVM, pPage)
117#endif
118
119/** @def PGMPOOL_UNLOCK_PTR
120 * Unlock a previously locked dynamic caching (RC only).
121 *
122 * @returns VBox status code.
123 * @param pVM The VM handle.
124 * @param pPage The pool page.
125 *
126 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
127 * small page window employeed by that function. Be careful.
128 * @remark There is no need to assert on the result.
129 */
130#if defined(IN_RC)
131DECLINLINE(void) PGMPOOL_UNLOCK_PTR(PVM pVM, void *pvPage)
132{
133 if (pvPage)
134 PGMDynUnlockHCPage(pVM, (uint8_t *)pvPage);
135}
136#else
137# define PGMPOOL_UNLOCK_PTR(pVM, pPage) do {} while (0)
138#endif
139
140
141/**
142 * Flushes a chain of pages sharing the same access monitor.
143 *
144 * @returns VBox status code suitable for scheduling.
145 * @param pPool The pool.
146 * @param pPage A page in the chain.
147 */
148int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
149{
150 LogFlow(("pgmPoolMonitorChainFlush: Flush page %RGp type=%d\n", pPage->GCPhys, pPage->enmKind));
151
152 /*
153 * Find the list head.
154 */
155 uint16_t idx = pPage->idx;
156 if (pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
157 {
158 while (pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
159 {
160 idx = pPage->iMonitoredPrev;
161 Assert(idx != pPage->idx);
162 pPage = &pPool->aPages[idx];
163 }
164 }
165
166 /*
167 * Iterate the list flushing each shadow page.
168 */
169 int rc = VINF_SUCCESS;
170 for (;;)
171 {
172 idx = pPage->iMonitoredNext;
173 Assert(idx != pPage->idx);
174 if (pPage->idx >= PGMPOOL_IDX_FIRST)
175 {
176 int rc2 = pgmPoolFlushPage(pPool, pPage);
177 AssertRC(rc2);
178 }
179 /* next */
180 if (idx == NIL_PGMPOOL_IDX)
181 break;
182 pPage = &pPool->aPages[idx];
183 }
184 return rc;
185}
186
187
188/**
189 * Wrapper for getting the current context pointer to the entry being modified.
190 *
191 * @returns VBox status code suitable for scheduling.
192 * @param pVM VM Handle.
193 * @param pvDst Destination address
194 * @param pvSrc Source guest virtual address.
195 * @param GCPhysSrc The source guest physical address.
196 * @param cb Size of data to read
197 */
198DECLINLINE(int) pgmPoolPhysSimpleReadGCPhys(PVM pVM, void *pvDst, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvSrc, RTGCPHYS GCPhysSrc, size_t cb)
199{
200#if defined(IN_RING3)
201 memcpy(pvDst, (RTHCPTR)((uintptr_t)pvSrc & ~(RTHCUINTPTR)(cb - 1)), cb);
202 return VINF_SUCCESS;
203#else
204 /* @todo in RC we could attempt to use the virtual address, although this can cause many faults (PAE Windows XP guest). */
205 return PGMPhysSimpleReadGCPhys(pVM, pvDst, GCPhysSrc & ~(RTGCPHYS)(cb - 1), cb);
206#endif
207}
208
209/**
210 * Process shadow entries before they are changed by the guest.
211 *
212 * For PT entries we will clear them. For PD entries, we'll simply check
213 * for mapping conflicts and set the SyncCR3 FF if found.
214 *
215 * @param pVCpu VMCPU handle
216 * @param pPool The pool.
217 * @param pPage The head page.
218 * @param GCPhysFault The guest physical fault address.
219 * @param uAddress In R0 and GC this is the guest context fault address (flat).
220 * In R3 this is the host context 'fault' address.
221 * @param cbWrite Write size; might be zero if the caller knows we're not crossing entry boundaries
222 */
223void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, unsigned cbWrite)
224{
225 AssertMsg(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX, ("%#x (idx=%#x)\n", pPage->iMonitoredPrev, pPage->idx));
226 const unsigned off = GCPhysFault & PAGE_OFFSET_MASK;
227 PVM pVM = pPool->CTX_SUFF(pVM);
228
229 LogFlow(("pgmPoolMonitorChainChanging: %RGv phys=%RGp cbWrite=%d\n", (RTGCPTR)(CTXTYPE(RTGCPTR, uintptr_t, RTGCPTR))pvAddress, GCPhysFault, cbWrite));
230
231 for (;;)
232 {
233 union
234 {
235 void *pv;
236 PX86PT pPT;
237 PX86PTPAE pPTPae;
238 PX86PD pPD;
239 PX86PDPAE pPDPae;
240 PX86PDPT pPDPT;
241 PX86PML4 pPML4;
242 } uShw;
243
244 LogFlow(("pgmPoolMonitorChainChanging: page idx=%d phys=%RGp (next=%d) kind=%s\n", pPage->idx, pPage->GCPhys, pPage->iMonitoredNext, pgmPoolPoolKindToStr(pPage->enmKind), cbWrite));
245
246 uShw.pv = NULL;
247 switch (pPage->enmKind)
248 {
249 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
250 {
251 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
252 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
253 const unsigned iShw = off / sizeof(X86PTE);
254 LogFlow(("PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT iShw=%x\n", iShw));
255 if (uShw.pPT->a[iShw].n.u1Present)
256 {
257 X86PTE GstPte;
258
259 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvAddress, GCPhysFault, sizeof(GstPte));
260 AssertRC(rc);
261 Log4(("pgmPoolMonitorChainChanging 32_32: deref %016RX64 GCPhys %08RX32\n", uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, GstPte.u & X86_PTE_PG_MASK));
262 pgmPoolTracDerefGCPhysHint(pPool, pPage,
263 uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK,
264 GstPte.u & X86_PTE_PG_MASK);
265 ASMAtomicWriteSize(&uShw.pPT->a[iShw], 0);
266 }
267 break;
268 }
269
270 /* page/2 sized */
271 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
272 {
273 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
274 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
275 if (!((off ^ pPage->GCPhys) & (PAGE_SIZE / 2)))
276 {
277 const unsigned iShw = (off / sizeof(X86PTE)) & (X86_PG_PAE_ENTRIES - 1);
278 LogFlow(("PGMPOOLKIND_PAE_PT_FOR_32BIT_PT iShw=%x\n", iShw));
279 if (uShw.pPTPae->a[iShw].n.u1Present)
280 {
281 X86PTE GstPte;
282 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvAddress, GCPhysFault, sizeof(GstPte));
283 AssertRC(rc);
284
285 Log4(("pgmPoolMonitorChainChanging pae_32: deref %016RX64 GCPhys %08RX32\n", uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, GstPte.u & X86_PTE_PG_MASK));
286 pgmPoolTracDerefGCPhysHint(pPool, pPage,
287 uShw.pPTPae->a[iShw].u & X86_PTE_PAE_PG_MASK,
288 GstPte.u & X86_PTE_PG_MASK);
289 ASMAtomicWriteSize(&uShw.pPTPae->a[iShw], 0);
290 }
291 }
292 break;
293 }
294
295 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
296 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
297 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
298 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
299 {
300 unsigned iGst = off / sizeof(X86PDE);
301 unsigned iShwPdpt = iGst / 256;
302 unsigned iShw = (iGst % 256) * 2;
303 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
304
305 LogFlow(("pgmPoolMonitorChainChanging PAE for 32 bits: iGst=%x iShw=%x idx = %d page idx=%d\n", iGst, iShw, iShwPdpt, pPage->enmKind - PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD));
306 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
307 if (iShwPdpt == pPage->enmKind - (unsigned)PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD)
308 {
309 for (unsigned i = 0; i < 2; i++)
310 {
311# ifndef IN_RING0
312 if ((uShw.pPDPae->a[iShw + i].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))
313 {
314 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
315 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
316 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw=%#x!\n", iShwPdpt, iShw+i));
317 break;
318 }
319 else
320# endif /* !IN_RING0 */
321 if (uShw.pPDPae->a[iShw+i].n.u1Present)
322 {
323 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw+i, uShw.pPDPae->a[iShw+i].u));
324 pgmPoolFree(pVM,
325 uShw.pPDPae->a[iShw+i].u & X86_PDE_PAE_PG_MASK,
326 pPage->idx,
327 iShw + i);
328 ASMAtomicWriteSize(&uShw.pPDPae->a[iShw+i], 0);
329 }
330
331 /* paranoia / a bit assumptive. */
332 if ( (off & 3)
333 && (off & 3) + cbWrite > 4)
334 {
335 const unsigned iShw2 = iShw + 2 + i;
336 if (iShw2 < RT_ELEMENTS(uShw.pPDPae->a))
337 {
338# ifndef IN_RING0
339 if ((uShw.pPDPae->a[iShw2].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))
340 {
341 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
342 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
343 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw2=%#x!\n", iShwPdpt, iShw2));
344 break;
345 }
346 else
347# endif /* !IN_RING0 */
348 if (uShw.pPDPae->a[iShw2].n.u1Present)
349 {
350 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u));
351 pgmPoolFree(pVM,
352 uShw.pPDPae->a[iShw2].u & X86_PDE_PAE_PG_MASK,
353 pPage->idx,
354 iShw2);
355 ASMAtomicWriteSize(&uShw.pPDPae->a[iShw2].u, 0);
356 }
357 }
358 }
359 }
360 }
361 break;
362 }
363
364 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
365 {
366 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
367 const unsigned iShw = off / sizeof(X86PTEPAE);
368 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
369 if (uShw.pPTPae->a[iShw].n.u1Present)
370 {
371 X86PTEPAE GstPte;
372 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvAddress, GCPhysFault, sizeof(GstPte));
373 AssertRC(rc);
374
375 Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", uShw.pPTPae->a[iShw].u & X86_PTE_PAE_PG_MASK, GstPte.u & X86_PTE_PAE_PG_MASK));
376 pgmPoolTracDerefGCPhysHint(pPool, pPage,
377 uShw.pPTPae->a[iShw].u & X86_PTE_PAE_PG_MASK,
378 GstPte.u & X86_PTE_PAE_PG_MASK);
379 ASMAtomicWriteSize(&uShw.pPTPae->a[iShw].u, 0);
380 }
381
382 /* paranoia / a bit assumptive. */
383 if ( (off & 7)
384 && (off & 7) + cbWrite > sizeof(X86PTEPAE))
385 {
386 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PTEPAE);
387 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pPTPae->a));
388
389 if (uShw.pPTPae->a[iShw2].n.u1Present)
390 {
391 X86PTEPAE GstPte;
392# ifdef IN_RING3
393 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, (RTHCPTR)((RTHCUINTPTR)pvAddress + sizeof(GstPte)), GCPhysFault + sizeof(GstPte), sizeof(GstPte));
394# else
395 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvAddress + sizeof(GstPte), GCPhysFault + sizeof(GstPte), sizeof(GstPte));
396# endif
397 AssertRC(rc);
398 Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", uShw.pPTPae->a[iShw2].u & X86_PTE_PAE_PG_MASK, GstPte.u & X86_PTE_PAE_PG_MASK));
399 pgmPoolTracDerefGCPhysHint(pPool, pPage,
400 uShw.pPTPae->a[iShw2].u & X86_PTE_PAE_PG_MASK,
401 GstPte.u & X86_PTE_PAE_PG_MASK);
402 ASMAtomicWriteSize(&uShw.pPTPae->a[iShw2].u ,0);
403 }
404 }
405 break;
406 }
407
408 case PGMPOOLKIND_32BIT_PD:
409 {
410 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
411 const unsigned iShw = off / sizeof(X86PTE); // ASSUMING 32-bit guest paging!
412
413 LogFlow(("pgmPoolMonitorChainChanging: PGMPOOLKIND_32BIT_PD %x\n", iShw));
414 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
415# ifndef IN_RING0
416 if (uShw.pPD->a[iShw].u & PGM_PDFLAGS_MAPPING)
417 {
418 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
419 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
420 STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
421 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
422 break;
423 }
424# endif /* !IN_RING0 */
425# ifndef IN_RING0
426 else
427# endif /* !IN_RING0 */
428 {
429 if (uShw.pPD->a[iShw].n.u1Present)
430 {
431 LogFlow(("pgmPoolMonitorChainChanging: 32 bit pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u));
432 pgmPoolFree(pVM,
433 uShw.pPD->a[iShw].u & X86_PDE_PAE_PG_MASK,
434 pPage->idx,
435 iShw);
436 ASMAtomicWriteSize(&uShw.pPD->a[iShw].u, 0);
437 }
438 }
439 /* paranoia / a bit assumptive. */
440 if ( (off & 3)
441 && (off & 3) + cbWrite > sizeof(X86PTE))
442 {
443 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PTE);
444 if ( iShw2 != iShw
445 && iShw2 < RT_ELEMENTS(uShw.pPD->a))
446 {
447# ifndef IN_RING0
448 if (uShw.pPD->a[iShw2].u & PGM_PDFLAGS_MAPPING)
449 {
450 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
451 STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
452 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
453 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
454 break;
455 }
456# endif /* !IN_RING0 */
457# ifndef IN_RING0
458 else
459# endif /* !IN_RING0 */
460 {
461 if (uShw.pPD->a[iShw2].n.u1Present)
462 {
463 LogFlow(("pgmPoolMonitorChainChanging: 32 bit pd iShw=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPD->a[iShw2].u));
464 pgmPoolFree(pVM,
465 uShw.pPD->a[iShw2].u & X86_PDE_PAE_PG_MASK,
466 pPage->idx,
467 iShw2);
468 ASMAtomicWriteSize(&uShw.pPD->a[iShw2].u, 0);
469 }
470 }
471 }
472 }
473#if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). */
474 if ( uShw.pPD->a[iShw].n.u1Present
475 && !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
476 {
477 LogFlow(("pgmPoolMonitorChainChanging: iShw=%#x: %RX32 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u));
478# ifdef IN_RC /* TLB load - we're pushing things a bit... */
479 ASMProbeReadByte(pvAddress);
480# endif
481 pgmPoolFree(pVM, uShw.pPD->a[iShw].u & X86_PDE_PG_MASK, pPage->idx, iShw);
482 ASMAtomicWriteSize(&uShw.pPD->a[iShw].u, 0);
483 }
484#endif
485 break;
486 }
487
488 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
489 {
490 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
491 const unsigned iShw = off / sizeof(X86PDEPAE);
492 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
493#ifndef IN_RING0
494 if (uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING)
495 {
496 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
497 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
498 STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
499 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
500 break;
501 }
502#endif /* !IN_RING0 */
503 /*
504 * Causes trouble when the guest uses a PDE to refer to the whole page table level
505 * structure. (Invalidate here; faults later on when it tries to change the page
506 * table entries -> recheck; probably only applies to the RC case.)
507 */
508# ifndef IN_RING0
509 else
510# endif /* !IN_RING0 */
511 {
512 if (uShw.pPDPae->a[iShw].n.u1Present)
513 {
514 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPae->a[iShw].u));
515 pgmPoolFree(pVM,
516 uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK,
517 pPage->idx,
518 iShw);
519 ASMAtomicWriteSize(&uShw.pPDPae->a[iShw].u, 0);
520 }
521 }
522 /* paranoia / a bit assumptive. */
523 if ( (off & 7)
524 && (off & 7) + cbWrite > sizeof(X86PDEPAE))
525 {
526 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDEPAE);
527 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pPDPae->a));
528
529#ifndef IN_RING0
530 if ( iShw2 != iShw
531 && uShw.pPDPae->a[iShw2].u & PGM_PDFLAGS_MAPPING)
532 {
533 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
534 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
535 STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
536 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
537 break;
538 }
539#endif /* !IN_RING0 */
540# ifndef IN_RING0
541 else
542# endif /* !IN_RING0 */
543 if (uShw.pPDPae->a[iShw2].n.u1Present)
544 {
545 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u));
546 pgmPoolFree(pVM,
547 uShw.pPDPae->a[iShw2].u & X86_PDE_PAE_PG_MASK,
548 pPage->idx,
549 iShw2);
550 ASMAtomicWriteSize(&uShw.pPDPae->a[iShw2].u, 0);
551 }
552 }
553 break;
554 }
555
556 case PGMPOOLKIND_PAE_PDPT:
557 {
558 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPDPT));
559 /*
560 * Hopefully this doesn't happen very often:
561 * - touching unused parts of the page
562 * - messing with the bits of pd pointers without changing the physical address
563 */
564 /* PDPT roots are not page aligned; 32 byte only! */
565 const unsigned offPdpt = GCPhysFault - pPage->GCPhys;
566
567 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
568 const unsigned iShw = offPdpt / sizeof(X86PDPE);
569 if (iShw < X86_PG_PAE_PDPE_ENTRIES) /* don't use RT_ELEMENTS(uShw.pPDPT->a), because that's for long mode only */
570 {
571# ifndef IN_RING0
572 if (uShw.pPDPT->a[iShw].u & PGM_PLXFLAGS_MAPPING)
573 {
574 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
575 STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
576 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
577 LogFlow(("pgmPoolMonitorChainChanging: Detected pdpt conflict at iShw=%#x!\n", iShw));
578 break;
579 }
580# endif /* !IN_RING0 */
581# ifndef IN_RING0
582 else
583# endif /* !IN_RING0 */
584 if (uShw.pPDPT->a[iShw].n.u1Present)
585 {
586 LogFlow(("pgmPoolMonitorChainChanging: pae pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPT->a[iShw].u));
587 pgmPoolFree(pVM,
588 uShw.pPDPT->a[iShw].u & X86_PDPE_PG_MASK,
589 pPage->idx,
590 iShw);
591 ASMAtomicWriteSize(&uShw.pPDPT->a[iShw].u, 0);
592 }
593
594 /* paranoia / a bit assumptive. */
595 if ( (offPdpt & 7)
596 && (offPdpt & 7) + cbWrite > sizeof(X86PDPE))
597 {
598 const unsigned iShw2 = (offPdpt + cbWrite - 1) / sizeof(X86PDPE);
599 if ( iShw2 != iShw
600 && iShw2 < X86_PG_PAE_PDPE_ENTRIES)
601 {
602# ifndef IN_RING0
603 if (uShw.pPDPT->a[iShw2].u & PGM_PLXFLAGS_MAPPING)
604 {
605 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
606 STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict));
607 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
608 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
609 break;
610 }
611# endif /* !IN_RING0 */
612# ifndef IN_RING0
613 else
614# endif /* !IN_RING0 */
615 if (uShw.pPDPT->a[iShw2].n.u1Present)
616 {
617 LogFlow(("pgmPoolMonitorChainChanging: pae pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPT->a[iShw2].u));
618 pgmPoolFree(pVM,
619 uShw.pPDPT->a[iShw2].u & X86_PDPE_PG_MASK,
620 pPage->idx,
621 iShw2);
622 ASMAtomicWriteSize(&uShw.pPDPT->a[iShw2].u, 0);
623 }
624 }
625 }
626 }
627 break;
628 }
629
630#ifndef IN_RC
631 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
632 {
633 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
634 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
635 const unsigned iShw = off / sizeof(X86PDEPAE);
636 Assert(!(uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING));
637 if (uShw.pPDPae->a[iShw].n.u1Present)
638 {
639 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPae->a[iShw].u));
640 pgmPoolFree(pVM,
641 uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK,
642 pPage->idx,
643 iShw);
644 ASMAtomicWriteSize(&uShw.pPDPae->a[iShw].u, 0);
645 }
646 /* paranoia / a bit assumptive. */
647 if ( (off & 7)
648 && (off & 7) + cbWrite > sizeof(X86PDEPAE))
649 {
650 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDEPAE);
651 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pPDPae->a));
652
653 Assert(!(uShw.pPDPae->a[iShw2].u & PGM_PDFLAGS_MAPPING));
654 if (uShw.pPDPae->a[iShw2].n.u1Present)
655 {
656 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u));
657 pgmPoolFree(pVM,
658 uShw.pPDPae->a[iShw2].u & X86_PDE_PAE_PG_MASK,
659 pPage->idx,
660 iShw2);
661 ASMAtomicWriteSize(&uShw.pPDPae->a[iShw2].u, 0);
662 }
663 }
664 break;
665 }
666
667 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
668 {
669 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPDPT));
670 /*
671 * Hopefully this doesn't happen very often:
672 * - messing with the bits of pd pointers without changing the physical address
673 */
674 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
675 const unsigned iShw = off / sizeof(X86PDPE);
676 if (uShw.pPDPT->a[iShw].n.u1Present)
677 {
678 LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPT->a[iShw].u));
679 pgmPoolFree(pVM, uShw.pPDPT->a[iShw].u & X86_PDPE_PG_MASK, pPage->idx, iShw);
680 ASMAtomicWriteSize(&uShw.pPDPT->a[iShw].u, 0);
681 }
682 /* paranoia / a bit assumptive. */
683 if ( (off & 7)
684 && (off & 7) + cbWrite > sizeof(X86PDPE))
685 {
686 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDPE);
687 if (uShw.pPDPT->a[iShw2].n.u1Present)
688 {
689 LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPT->a[iShw2].u));
690 pgmPoolFree(pVM, uShw.pPDPT->a[iShw2].u & X86_PDPE_PG_MASK, pPage->idx, iShw2);
691 ASMAtomicWriteSize(&uShw.pPDPT->a[iShw2].u, 0);
692 }
693 }
694 break;
695 }
696
697 case PGMPOOLKIND_64BIT_PML4:
698 {
699 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPML4));
700 /*
701 * Hopefully this doesn't happen very often:
702 * - messing with the bits of pd pointers without changing the physical address
703 */
704 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
705 const unsigned iShw = off / sizeof(X86PDPE);
706 if (uShw.pPML4->a[iShw].n.u1Present)
707 {
708 LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPML4->a[iShw].u));
709 pgmPoolFree(pVM, uShw.pPML4->a[iShw].u & X86_PML4E_PG_MASK, pPage->idx, iShw);
710 ASMAtomicWriteSize(&uShw.pPML4->a[iShw].u, 0);
711 }
712 /* paranoia / a bit assumptive. */
713 if ( (off & 7)
714 && (off & 7) + cbWrite > sizeof(X86PDPE))
715 {
716 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PML4E);
717 if (uShw.pPML4->a[iShw2].n.u1Present)
718 {
719 LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPML4->a[iShw2].u));
720 pgmPoolFree(pVM, uShw.pPML4->a[iShw2].u & X86_PML4E_PG_MASK, pPage->idx, iShw2);
721 ASMAtomicWriteSize(&uShw.pPML4->a[iShw2].u, 0);
722 }
723 }
724 break;
725 }
726#endif /* IN_RING0 */
727
728 default:
729 AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind));
730 }
731 PGMPOOL_UNLOCK_PTR(pVM, uShw.pv);
732
733 /* next */
734 if (pPage->iMonitoredNext == NIL_PGMPOOL_IDX)
735 return;
736 pPage = &pPool->aPages[pPage->iMonitoredNext];
737 }
738}
739
740# ifndef IN_RING3
741/**
742 * Checks if a access could be a fork operation in progress.
743 *
744 * Meaning, that the guest is setting up the parent process for Copy-On-Write.
745 *
746 * @returns true if it's likly that we're forking, otherwise false.
747 * @param pPool The pool.
748 * @param pDis The disassembled instruction.
749 * @param offFault The access offset.
750 */
751DECLINLINE(bool) pgmPoolMonitorIsForking(PPGMPOOL pPool, PDISCPUSTATE pDis, unsigned offFault)
752{
753 /*
754 * i386 linux is using btr to clear X86_PTE_RW.
755 * The functions involved are (2.6.16 source inspection):
756 * clear_bit
757 * ptep_set_wrprotect
758 * copy_one_pte
759 * copy_pte_range
760 * copy_pmd_range
761 * copy_pud_range
762 * copy_page_range
763 * dup_mmap
764 * dup_mm
765 * copy_mm
766 * copy_process
767 * do_fork
768 */
769 if ( pDis->pCurInstr->opcode == OP_BTR
770 && !(offFault & 4)
771 /** @todo Validate that the bit index is X86_PTE_RW. */
772 )
773 {
774 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,Fork));
775 return true;
776 }
777 return false;
778}
779
780
781/**
782 * Determine whether the page is likely to have been reused.
783 *
784 * @returns true if we consider the page as being reused for a different purpose.
785 * @returns false if we consider it to still be a paging page.
786 * @param pVM VM Handle.
787 * @param pVCpu VMCPU Handle.
788 * @param pRegFrame Trap register frame.
789 * @param pDis The disassembly info for the faulting instruction.
790 * @param pvFault The fault address.
791 *
792 * @remark The REP prefix check is left to the caller because of STOSD/W.
793 */
794DECLINLINE(bool) pgmPoolMonitorIsReused(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, RTGCPTR pvFault)
795{
796#ifndef IN_RC
797 /** @todo could make this general, faulting close to rsp should be a safe reuse heuristic. */
798 if ( HWACCMHasPendingIrq(pVM)
799 && (pRegFrame->rsp - pvFault) < 32)
800 {
801 /* Fault caused by stack writes while trying to inject an interrupt event. */
802 Log(("pgmPoolMonitorIsReused: reused %RGv for interrupt stack (rsp=%RGv).\n", pvFault, pRegFrame->rsp));
803 return true;
804 }
805#else
806 NOREF(pVM); NOREF(pvFault);
807#endif
808
809 LogFlow(("Reused instr %RGv %d at %RGv param1.flags=%x param1.reg=%d\n", pRegFrame->rip, pDis->pCurInstr->opcode, pvFault, pDis->param1.flags, pDis->param1.base.reg_gen));
810
811 /* Non-supervisor mode write means it's used for something else. */
812 if (CPUMGetGuestCPL(pVCpu, pRegFrame) != 0)
813 return true;
814
815 switch (pDis->pCurInstr->opcode)
816 {
817 /* call implies the actual push of the return address faulted */
818 case OP_CALL:
819 Log4(("pgmPoolMonitorIsReused: CALL\n"));
820 return true;
821 case OP_PUSH:
822 Log4(("pgmPoolMonitorIsReused: PUSH\n"));
823 return true;
824 case OP_PUSHF:
825 Log4(("pgmPoolMonitorIsReused: PUSHF\n"));
826 return true;
827 case OP_PUSHA:
828 Log4(("pgmPoolMonitorIsReused: PUSHA\n"));
829 return true;
830 case OP_FXSAVE:
831 Log4(("pgmPoolMonitorIsReused: FXSAVE\n"));
832 return true;
833 case OP_MOVNTI: /* solaris - block_zero_no_xmm */
834 Log4(("pgmPoolMonitorIsReused: MOVNTI\n"));
835 return true;
836 case OP_MOVNTDQ: /* solaris - hwblkclr & hwblkpagecopy */
837 Log4(("pgmPoolMonitorIsReused: MOVNTDQ\n"));
838 return true;
839 case OP_MOVSWD:
840 case OP_STOSWD:
841 if ( pDis->prefix == (PREFIX_REP|PREFIX_REX)
842 && pRegFrame->rcx >= 0x40
843 )
844 {
845 Assert(pDis->mode == CPUMODE_64BIT);
846
847 Log(("pgmPoolMonitorIsReused: OP_STOSQ\n"));
848 return true;
849 }
850 return false;
851 }
852 if ( ( (pDis->param1.flags & USE_REG_GEN32)
853 || (pDis->param1.flags & USE_REG_GEN64))
854 && (pDis->param1.base.reg_gen == USE_REG_ESP))
855 {
856 Log4(("pgmPoolMonitorIsReused: ESP\n"));
857 return true;
858 }
859
860 return false;
861}
862
863/**
864 * Flushes the page being accessed.
865 *
866 * @returns VBox status code suitable for scheduling.
867 * @param pVM The VM handle.
868 * @param pVCpu The VMCPU handle.
869 * @param pPool The pool.
870 * @param pPage The pool page (head).
871 * @param pDis The disassembly of the write instruction.
872 * @param pRegFrame The trap register frame.
873 * @param GCPhysFault The fault address as guest physical address.
874 * @param pvFault The fault address.
875 */
876static int pgmPoolAccessHandlerFlush(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,
877 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault)
878{
879 /*
880 * First, do the flushing.
881 */
882 int rc = pgmPoolMonitorChainFlush(pPool, pPage);
883
884 /*
885 * Emulate the instruction (xp/w2k problem, requires pc/cr2/sp detection). Must do this in raw mode (!); XP boot will fail otherwise
886 */
887 uint32_t cbWritten;
888 int rc2 = EMInterpretInstructionCPUEx(pVM, pVCpu, pDis, pRegFrame, pvFault, &cbWritten, EMCODETYPE_ALL);
889 if (RT_SUCCESS(rc2))
890 pRegFrame->rip += pDis->opsize;
891 else if (rc2 == VERR_EM_INTERPRETER)
892 {
893#ifdef IN_RC
894 if (PATMIsPatchGCAddr(pVM, pRegFrame->eip))
895 {
896 LogFlow(("pgmPoolAccessHandlerPTWorker: Interpretation failed for patch code %04x:%RGv, ignoring.\n",
897 pRegFrame->cs, (RTGCPTR)pRegFrame->eip));
898 rc = VINF_SUCCESS;
899 STAM_COUNTER_INC(&pPool->StatMonitorRZIntrFailPatch2);
900 }
901 else
902#endif
903 {
904 rc = VINF_EM_RAW_EMULATE_INSTR;
905 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,EmulateInstr));
906 }
907 }
908 else
909 rc = rc2;
910
911 LogFlow(("pgmPoolAccessHandlerPT: returns %Rrc (flushed)\n", rc));
912 return rc;
913}
914
915/**
916 * Handles the STOSD write accesses.
917 *
918 * @returns VBox status code suitable for scheduling.
919 * @param pVM The VM handle.
920 * @param pPool The pool.
921 * @param pPage The pool page (head).
922 * @param pDis The disassembly of the write instruction.
923 * @param pRegFrame The trap register frame.
924 * @param GCPhysFault The fault address as guest physical address.
925 * @param pvFault The fault address.
926 */
927DECLINLINE(int) pgmPoolAccessHandlerSTOSD(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,
928 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault)
929{
930 unsigned uIncrement = pDis->param1.size;
931
932 Assert(pDis->mode == CPUMODE_32BIT || pDis->mode == CPUMODE_64BIT);
933 Assert(pRegFrame->rcx <= 0x20);
934
935#ifdef VBOX_STRICT
936 if (pDis->opmode == CPUMODE_32BIT)
937 Assert(uIncrement == 4);
938 else
939 Assert(uIncrement == 8);
940#endif
941
942 Log3(("pgmPoolAccessHandlerSTOSD\n"));
943
944 /*
945 * Increment the modification counter and insert it into the list
946 * of modified pages the first time.
947 */
948 if (!pPage->cModifications++)
949 pgmPoolMonitorModifiedInsert(pPool, pPage);
950
951 /*
952 * Execute REP STOSD.
953 *
954 * This ASSUMES that we're not invoked by Trap0e on in a out-of-sync
955 * write situation, meaning that it's safe to write here.
956 */
957 PVMCPU pVCpu = VMMGetCpu(pPool->CTX_SUFF(pVM));
958 RTGCUINTPTR pu32 = (RTGCUINTPTR)pvFault;
959 while (pRegFrame->rcx)
960 {
961#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
962 uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
963 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement);
964 PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
965#else
966 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement);
967#endif
968#ifdef IN_RC
969 *(uint32_t *)(uintptr_t)pu32 = pRegFrame->eax;
970#else
971 PGMPhysSimpleWriteGCPhys(pVM, GCPhysFault, &pRegFrame->rax, uIncrement);
972#endif
973 pu32 += uIncrement;
974 GCPhysFault += uIncrement;
975 pRegFrame->rdi += uIncrement;
976 pRegFrame->rcx--;
977 }
978 pRegFrame->rip += pDis->opsize;
979
980 LogFlow(("pgmPoolAccessHandlerSTOSD: returns\n"));
981 return VINF_SUCCESS;
982}
983
984
985/**
986 * Handles the simple write accesses.
987 *
988 * @returns VBox status code suitable for scheduling.
989 * @param pVM The VM handle.
990 * @param pVCpu The VMCPU handle.
991 * @param pPool The pool.
992 * @param pPage The pool page (head).
993 * @param pDis The disassembly of the write instruction.
994 * @param pRegFrame The trap register frame.
995 * @param GCPhysFault The fault address as guest physical address.
996 * @param pvFault The fault address.
997 * @param pfReused Reused state (out)
998 */
999DECLINLINE(int) pgmPoolAccessHandlerSimple(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,
1000 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault, bool *pfReused)
1001{
1002 Log3(("pgmPoolAccessHandlerSimple\n"));
1003 /*
1004 * Increment the modification counter and insert it into the list
1005 * of modified pages the first time.
1006 */
1007 if (!pPage->cModifications++)
1008 pgmPoolMonitorModifiedInsert(pPool, pPage);
1009
1010 /*
1011 * Clear all the pages. ASSUMES that pvFault is readable.
1012 */
1013#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1014 uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
1015 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1));
1016 PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
1017#else
1018 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1));
1019#endif
1020
1021 /*
1022 * Interpret the instruction.
1023 */
1024 uint32_t cb;
1025 int rc = EMInterpretInstructionCPUEx(pVM, pVCpu, pDis, pRegFrame, pvFault, &cb, EMCODETYPE_ALL);
1026 if (RT_SUCCESS(rc))
1027 pRegFrame->rip += pDis->opsize;
1028 else if (rc == VERR_EM_INTERPRETER)
1029 {
1030 LogFlow(("pgmPoolAccessHandlerPTWorker: Interpretation failed for %04x:%RGv - opcode=%d\n",
1031 pRegFrame->cs, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr->opcode));
1032 rc = VINF_EM_RAW_EMULATE_INSTR;
1033 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,EmulateInstr));
1034 }
1035
1036#if 0 /* experimental code */
1037 if (rc == VINF_SUCCESS)
1038 {
1039 switch (pPage->enmKind)
1040 {
1041 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
1042 {
1043 X86PTEPAE GstPte;
1044 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvFault, GCPhysFault, sizeof(GstPte));
1045 AssertRC(rc);
1046
1047 /* Check the new value written by the guest. If present and with a bogus physical address, then
1048 * it's fairly safe to assume the guest is reusing the PT.
1049 */
1050 if (GstPte.n.u1Present)
1051 {
1052 RTHCPHYS HCPhys = -1;
1053 int rc = PGMPhysGCPhys2HCPhys(pVM, GstPte.u & X86_PTE_PAE_PG_MASK, &HCPhys);
1054 if (rc != VINF_SUCCESS)
1055 {
1056 *pfReused = true;
1057 STAM_COUNTER_INC(&pPool->StatForceFlushReused);
1058 }
1059 }
1060 break;
1061 }
1062 }
1063 }
1064#endif
1065
1066 LogFlow(("pgmPoolAccessHandlerSimple: returns %Rrc cb=%d\n", rc, cb));
1067 return rc;
1068}
1069
1070/**
1071 * \#PF Handler callback for PT write accesses.
1072 *
1073 * @returns VBox status code (appropriate for GC return).
1074 * @param pVM VM Handle.
1075 * @param uErrorCode CPU Error code.
1076 * @param pRegFrame Trap register frame.
1077 * NULL on DMA and other non CPU access.
1078 * @param pvFault The fault address (cr2).
1079 * @param GCPhysFault The GC physical address corresponding to pvFault.
1080 * @param pvUser User argument.
1081 */
1082DECLEXPORT(int) pgmPoolAccessHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1083{
1084 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a);
1085 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1086 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser;
1087 PVMCPU pVCpu = VMMGetCpu(pVM);
1088 unsigned cMaxModifications;
1089 bool fForcedFlush = false;
1090
1091 LogFlow(("pgmPoolAccessHandler: pvFault=%RGv pPage=%p:{.idx=%d} GCPhysFault=%RGp\n", pvFault, pPage, pPage->idx, GCPhysFault));
1092
1093 pgmLock(pVM);
1094 if (PHYS_PAGE_ADDRESS(GCPhysFault) != PHYS_PAGE_ADDRESS(pPage->GCPhys))
1095 {
1096 /* Pool page changed while we were waiting for the lock; ignore. */
1097 Log(("CPU%d: pgmPoolAccessHandler pgm pool page for %RGp changed (to %RGp) while waiting!\n", pVCpu->idCpu, PHYS_PAGE_ADDRESS(GCPhysFault), PHYS_PAGE_ADDRESS(pPage->GCPhys)));
1098 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,Handled), a);
1099 pgmUnlock(pVM);
1100 return VINF_SUCCESS;
1101 }
1102#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1103 if (pPage->fDirty)
1104 {
1105 Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH));
1106 pgmUnlock(pVM);
1107 return VINF_SUCCESS; /* SMP guest case where we were blocking on the pgm lock while the same page was being marked dirty. */
1108 }
1109#endif
1110
1111#if 0 /* test code defined(VBOX_STRICT) && defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) */
1112 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
1113 {
1114 void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
1115 void *pvGst;
1116 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
1117 pgmPoolTrackCheckPTPaePae(pPool, pPage, (PX86PTPAE)pvShw, (PCX86PTPAE)pvGst);
1118 }
1119#endif
1120
1121 /*
1122 * Disassemble the faulting instruction.
1123 */
1124 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
1125 int rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, NULL);
1126 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1127 {
1128 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("Unexpected rc %d\n", rc));
1129 pgmUnlock(pVM);
1130 return rc;
1131 }
1132
1133 Assert(pPage->enmKind != PGMPOOLKIND_FREE);
1134
1135 /*
1136 * We should ALWAYS have the list head as user parameter. This
1137 * is because we use that page to record the changes.
1138 */
1139 Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
1140
1141#ifdef IN_RING0
1142 /* Maximum nr of modifications depends on the page type. */
1143 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
1144 cMaxModifications = 4;
1145 else
1146 cMaxModifications = 24;
1147#else
1148 cMaxModifications = 48;
1149#endif
1150
1151 /*
1152 * Incremental page table updates should weight more than random ones.
1153 * (Only applies when started from offset 0)
1154 */
1155 pVCpu->pgm.s.cPoolAccessHandler++;
1156 if ( pPage->pvLastAccessHandlerRip >= pRegFrame->rip - 0x40 /* observed loops in Windows 7 x64 */
1157 && pPage->pvLastAccessHandlerRip < pRegFrame->rip + 0x40
1158 && pvFault == (pPage->pvLastAccessHandlerFault + pDis->param1.size)
1159 && pVCpu->pgm.s.cPoolAccessHandler == (pPage->cLastAccessHandlerCount + 1))
1160 {
1161 Log(("Possible page reuse cMods=%d -> %d (locked=%d type=%s)\n", pPage->cModifications, pPage->cModifications * 2, pgmPoolIsPageLocked(&pVM->pgm.s, pPage), pgmPoolPoolKindToStr(pPage->enmKind)));
1162 pPage->cModifications = pPage->cModifications * 2;
1163 pPage->pvLastAccessHandlerFault = pvFault;
1164 pPage->cLastAccessHandlerCount = pVCpu->pgm.s.cPoolAccessHandler;
1165 if (pPage->cModifications >= cMaxModifications)
1166 {
1167 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FlushReinit));
1168 fForcedFlush = true;
1169 }
1170 }
1171
1172 if (pPage->cModifications >= cMaxModifications)
1173 Log(("Mod overflow %RGv cMods=%d (locked=%d type=%s)\n", pvFault, pPage->cModifications, pgmPoolIsPageLocked(&pVM->pgm.s, pPage), pgmPoolPoolKindToStr(pPage->enmKind)));
1174
1175 /*
1176 * Check if it's worth dealing with.
1177 */
1178 bool fReused = false;
1179 bool fNotReusedNotForking = false;
1180 if ( ( pPage->cModifications < cMaxModifications /** @todo #define */ /** @todo need to check that it's not mapping EIP. */ /** @todo adjust this! */
1181 || pgmPoolIsPageLocked(&pVM->pgm.s, pPage)
1182 )
1183 && !(fReused = pgmPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault))
1184 && !pgmPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK))
1185 {
1186 /*
1187 * Simple instructions, no REP prefix.
1188 */
1189 if (!(pDis->prefix & (PREFIX_REP | PREFIX_REPNE)))
1190 {
1191 rc = pgmPoolAccessHandlerSimple(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault, &fReused);
1192 if (fReused)
1193 goto flushPage;
1194
1195 /* A mov instruction to change the first page table entry will be remembered so we can detect
1196 * full page table changes early on. This will reduce the amount of unnecessary traps we'll take.
1197 */
1198 if ( rc == VINF_SUCCESS
1199 && pDis->pCurInstr->opcode == OP_MOV
1200 && (pvFault & PAGE_OFFSET_MASK) == 0)
1201 {
1202 pPage->pvLastAccessHandlerFault = pvFault;
1203 pPage->cLastAccessHandlerCount = pVCpu->pgm.s.cPoolAccessHandler;
1204 pPage->pvLastAccessHandlerRip = pRegFrame->rip;
1205 /* Make sure we don't kick out a page too quickly. */
1206 if (pPage->cModifications > 8)
1207 pPage->cModifications = 2;
1208 }
1209 else
1210 if (pPage->pvLastAccessHandlerFault == pvFault)
1211 {
1212 /* ignore the 2nd write to this page table entry. */
1213 pPage->cLastAccessHandlerCount = pVCpu->pgm.s.cPoolAccessHandler;
1214 }
1215 else
1216 {
1217 pPage->pvLastAccessHandlerFault = 0;
1218 pPage->pvLastAccessHandlerRip = 0;
1219 }
1220
1221 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,Handled), a);
1222 pgmUnlock(pVM);
1223 return rc;
1224 }
1225
1226 /*
1227 * Windows is frequently doing small memset() operations (netio test 4k+).
1228 * We have to deal with these or we'll kill the cache and performance.
1229 */
1230 if ( pDis->pCurInstr->opcode == OP_STOSWD
1231 && !pRegFrame->eflags.Bits.u1DF
1232 && pDis->opmode == pDis->mode
1233 && pDis->addrmode == pDis->mode)
1234 {
1235 bool fValidStosd = false;
1236
1237 if ( pDis->mode == CPUMODE_32BIT
1238 && pDis->prefix == PREFIX_REP
1239 && pRegFrame->ecx <= 0x20
1240 && pRegFrame->ecx * 4 <= PAGE_SIZE - ((uintptr_t)pvFault & PAGE_OFFSET_MASK)
1241 && !((uintptr_t)pvFault & 3)
1242 && (pRegFrame->eax == 0 || pRegFrame->eax == 0x80) /* the two values observed. */
1243 )
1244 {
1245 fValidStosd = true;
1246 pRegFrame->rcx &= 0xffffffff; /* paranoia */
1247 }
1248 else
1249 if ( pDis->mode == CPUMODE_64BIT
1250 && pDis->prefix == (PREFIX_REP | PREFIX_REX)
1251 && pRegFrame->rcx <= 0x20
1252 && pRegFrame->rcx * 8 <= PAGE_SIZE - ((uintptr_t)pvFault & PAGE_OFFSET_MASK)
1253 && !((uintptr_t)pvFault & 7)
1254 && (pRegFrame->rax == 0 || pRegFrame->rax == 0x80) /* the two values observed. */
1255 )
1256 {
1257 fValidStosd = true;
1258 }
1259
1260 if (fValidStosd)
1261 {
1262 rc = pgmPoolAccessHandlerSTOSD(pVM, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault);
1263 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,RepStosd), a);
1264 pgmUnlock(pVM);
1265 return rc;
1266 }
1267 }
1268
1269 /* REP prefix, don't bother. */
1270 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,RepPrefix));
1271 Log4(("pgmPoolAccessHandler: eax=%#x ecx=%#x edi=%#x esi=%#x rip=%RGv opcode=%d prefix=%#x\n",
1272 pRegFrame->eax, pRegFrame->ecx, pRegFrame->edi, pRegFrame->esi, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr->opcode, pDis->prefix));
1273 fNotReusedNotForking = true;
1274 }
1275
1276#if defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) && defined(IN_RING0)
1277 /* E.g. Windows 7 x64 initializes page tables and touches some pages in the table during the process. This
1278 * leads to pgm pool trashing and an excessive amount of write faults due to page monitoring.
1279 */
1280 if ( pPage->cModifications >= cMaxModifications
1281 && !fForcedFlush
1282 && pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT
1283 && ( fNotReusedNotForking
1284 || ( !pgmPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault)
1285 && !pgmPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK))
1286 )
1287 )
1288 {
1289 Assert(!pgmPoolIsPageLocked(&pVM->pgm.s, pPage));
1290 Assert(pPage->fDirty == false);
1291
1292 /* Flush any monitored duplicates as we will disable write protection. */
1293 if ( pPage->iMonitoredNext != NIL_PGMPOOL_IDX
1294 || pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
1295 {
1296 PPGMPOOLPAGE pPageHead = pPage;
1297
1298 /* Find the monitor head. */
1299 while (pPageHead->iMonitoredPrev != NIL_PGMPOOL_IDX)
1300 pPageHead = &pPool->aPages[pPageHead->iMonitoredPrev];
1301
1302 while (pPageHead)
1303 {
1304 unsigned idxNext = pPageHead->iMonitoredNext;
1305
1306 if (pPageHead != pPage)
1307 {
1308 STAM_COUNTER_INC(&pPool->StatDirtyPageDupFlush);
1309 Log(("Flush duplicate page idx=%d GCPhys=%RGp type=%s\n", pPageHead->idx, pPageHead->GCPhys, pgmPoolPoolKindToStr(pPageHead->enmKind)));
1310 int rc2 = pgmPoolFlushPage(pPool, pPageHead);
1311 AssertRC(rc2);
1312 }
1313
1314 if (idxNext == NIL_PGMPOOL_IDX)
1315 break;
1316
1317 pPageHead = &pPool->aPages[idxNext];
1318 }
1319 }
1320
1321 /* The flushing above might fail for locked pages, so double check. */
1322 if ( pPage->iMonitoredNext == NIL_PGMPOOL_IDX
1323 && pPage->iMonitoredPrev == NIL_PGMPOOL_IDX)
1324 {
1325 pgmPoolAddDirtyPage(pVM, pPool, pPage);
1326
1327 /* Temporarily allow write access to the page table again. */
1328 rc = PGMHandlerPhysicalPageTempOff(pVM, pPage->GCPhys, pPage->GCPhys);
1329 if (rc == VINF_SUCCESS)
1330 {
1331 rc = PGMShwModifyPage(pVCpu, pvFault, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
1332 AssertMsg(rc == VINF_SUCCESS
1333 /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */
1334 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1335 || rc == VERR_PAGE_NOT_PRESENT,
1336 ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", pvFault, rc));
1337
1338 pPage->pvDirtyFault = pvFault;
1339
1340 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a);
1341 pgmUnlock(pVM);
1342 return rc;
1343 }
1344 }
1345 }
1346#endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */
1347
1348 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FlushModOverflow));
1349flushPage:
1350 /*
1351 * Not worth it, so flush it.
1352 *
1353 * If we considered it to be reused, don't go back to ring-3
1354 * to emulate failed instructions since we usually cannot
1355 * interpret then. This may be a bit risky, in which case
1356 * the reuse detection must be fixed.
1357 */
1358 rc = pgmPoolAccessHandlerFlush(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault);
1359 if ( rc == VINF_EM_RAW_EMULATE_INSTR
1360 && fReused)
1361 {
1362 /* Make sure that the current instruction still has shadow page backing, otherwise we'll end up in a loop. */
1363 if (PGMShwGetPage(pVCpu, pRegFrame->rip, NULL, NULL) == VINF_SUCCESS)
1364 rc = VINF_SUCCESS; /* safe to restart the instruction. */
1365 }
1366 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,FlushPage), a);
1367 pgmUnlock(pVM);
1368 return rc;
1369}
1370
1371# endif /* !IN_RING3 */
1372
1373# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1374
1375# ifdef VBOX_STRICT
1376/**
1377 * Check references to guest physical memory in a PAE / PAE page table.
1378 *
1379 * @param pPool The pool.
1380 * @param pPage The page.
1381 * @param pShwPT The shadow page table (mapping of the page).
1382 * @param pGstPT The guest page table.
1383 */
1384static void pgmPoolTrackCheckPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT, PCX86PTPAE pGstPT)
1385{
1386 unsigned cErrors = 0;
1387 int LastRc = -1; /* initialized to shut up gcc */
1388 unsigned LastPTE = ~0U; /* initialized to shut up gcc */
1389 RTHCPHYS LastHCPhys = NIL_RTHCPHYS; /* initialized to shut up gcc */
1390
1391#ifdef VBOX_STRICT
1392 for (unsigned i = 0; i < RT_MIN(RT_ELEMENTS(pShwPT->a), pPage->iFirstPresent); i++)
1393 AssertMsg(!pShwPT->a[i].n.u1Present, ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, pShwPT->a[i].u, pPage->iFirstPresent));
1394#endif
1395 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
1396 {
1397 if (pShwPT->a[i].n.u1Present)
1398 {
1399 RTHCPHYS HCPhys = NIL_RTHCPHYS;
1400 int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, &HCPhys);
1401 if ( rc != VINF_SUCCESS
1402 || (pShwPT->a[i].u & X86_PTE_PAE_PG_MASK) != HCPhys)
1403 {
1404 Log(("rc=%d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, i, pGstPT->a[i].u, pShwPT->a[i].u, HCPhys));
1405 LastPTE = i;
1406 LastRc = rc;
1407 LastHCPhys = HCPhys;
1408 cErrors++;
1409
1410 RTHCPHYS HCPhysPT = NIL_RTHCPHYS;
1411 rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pPage->GCPhys, &HCPhysPT);
1412 AssertRC(rc);
1413
1414 for (unsigned iPage = 0; iPage < pPool->cCurPages; iPage++)
1415 {
1416 PPGMPOOLPAGE pTempPage = &pPool->aPages[iPage];
1417
1418 if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
1419 {
1420 PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pTempPage);
1421
1422 for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++)
1423 {
1424 if ( pShwPT2->a[j].n.u1Present
1425 && pShwPT2->a[j].n.u1Write
1426 && ((pShwPT2->a[j].u & X86_PTE_PAE_PG_MASK) == HCPhysPT))
1427 {
1428 Log(("GCPhys=%RGp idx=%d %RX64 vs %RX64\n", pTempPage->GCPhys, j, pShwPT->a[j].u, pShwPT2->a[j].u));
1429 }
1430 }
1431 }
1432 }
1433 }
1434 }
1435 }
1436 AssertMsg(!cErrors, ("cErrors=%d: last rc=%d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", cErrors, LastRc, LastPTE, pGstPT->a[LastPTE].u, pShwPT->a[LastPTE].u, LastHCPhys));
1437}
1438# endif /* VBOX_STRICT */
1439
1440/**
1441 * Clear references to guest physical memory in a PAE / PAE page table.
1442 *
1443 * @returns nr of changed PTEs
1444 * @param pPool The pool.
1445 * @param pPage The page.
1446 * @param pShwPT The shadow page table (mapping of the page).
1447 * @param pGstPT The guest page table.
1448 * @param pOldGstPT The old cached guest page table.
1449 * @param fAllowRemoval Bail out as soon as we encounter an invalid PTE
1450 * @param pfFlush Flush reused page table (out)
1451 */
1452DECLINLINE(unsigned) pgmPoolTrackFlushPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT, PCX86PTPAE pGstPT, PCX86PTPAE pOldGstPT, bool fAllowRemoval, bool *pfFlush)
1453{
1454 unsigned cChanged = 0;
1455
1456#ifdef VBOX_STRICT
1457 for (unsigned i = 0; i < RT_MIN(RT_ELEMENTS(pShwPT->a), pPage->iFirstPresent); i++)
1458 AssertMsg(!pShwPT->a[i].n.u1Present, ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, pShwPT->a[i].u, pPage->iFirstPresent));
1459#endif
1460 *pfFlush = false;
1461
1462 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
1463 {
1464 /* Check the new value written by the guest. If present and with a bogus physical address, then
1465 * it's fairly safe to assume the guest is reusing the PT.
1466 */
1467 if ( fAllowRemoval
1468 && pGstPT->a[i].n.u1Present)
1469 {
1470 if (!PGMPhysIsGCPhysValid(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK))
1471 {
1472 *pfFlush = true;
1473 return ++cChanged;
1474 }
1475 }
1476 if (pShwPT->a[i].n.u1Present)
1477 {
1478 /* If the old cached PTE is identical, then there's no need to flush the shadow copy. */
1479 if ((pGstPT->a[i].u & X86_PTE_PAE_PG_MASK) == (pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK))
1480 {
1481#ifdef VBOX_STRICT
1482 RTHCPHYS HCPhys = NIL_RTGCPHYS;
1483 int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, &HCPhys);
1484 AssertMsg(rc == VINF_SUCCESS && (pShwPT->a[i].u & X86_PTE_PAE_PG_MASK) == HCPhys, ("rc=%d guest %RX64 old %RX64 shw=%RX64 vs %RHp\n", rc, pGstPT->a[i].u, pOldGstPT->a[i].u, pShwPT->a[i].u, HCPhys));
1485#endif
1486 uint64_t uHostAttr = pShwPT->a[i].u & (X86_PTE_P | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G | X86_PTE_PAE_NX);
1487 bool fHostRW = !!(pShwPT->a[i].u & X86_PTE_RW);
1488 uint64_t uGuestAttr = pGstPT->a[i].u & (X86_PTE_P | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G | X86_PTE_PAE_NX);
1489 bool fGuestRW = !!(pGstPT->a[i].u & X86_PTE_RW);
1490
1491 if ( uHostAttr == uGuestAttr
1492 && fHostRW <= fGuestRW)
1493 continue;
1494 }
1495 cChanged++;
1496 /* Something was changed, so flush it. */
1497 Log4(("pgmPoolTrackDerefPTPaePae: i=%d pte=%RX64 hint=%RX64\n",
1498 i, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK));
1499 pgmPoolTracDerefGCPhysHint(pPool, pPage, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK);
1500 ASMAtomicWriteSize(&pShwPT->a[i].u, 0);
1501 }
1502 }
1503 return cChanged;
1504}
1505
1506
1507/**
1508 * Flush a dirty page
1509 *
1510 * @param pVM VM Handle.
1511 * @param pPool The pool.
1512 * @param idxSlot Dirty array slot index
1513 * @param fAllowRemoval Allow a reused page table to be removed
1514 */
1515static void pgmPoolFlushDirtyPage(PVM pVM, PPGMPOOL pPool, unsigned idxSlot, bool fAllowRemoval = false)
1516{
1517 PPGMPOOLPAGE pPage;
1518 unsigned idxPage;
1519
1520 Assert(idxSlot < RT_ELEMENTS(pPool->aIdxDirtyPages));
1521 if (pPool->aIdxDirtyPages[idxSlot] == NIL_PGMPOOL_IDX)
1522 return;
1523
1524 idxPage = pPool->aIdxDirtyPages[idxSlot];
1525 AssertRelease(idxPage != NIL_PGMPOOL_IDX);
1526 pPage = &pPool->aPages[idxPage];
1527 Assert(pPage->idx == idxPage);
1528 Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX && pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
1529
1530 AssertMsg(pPage->fDirty, ("Page %RGp (slot=%d) not marked dirty!", pPage->GCPhys, idxSlot));
1531 Log(("Flush dirty page %RGp cMods=%d\n", pPage->GCPhys, pPage->cModifications));
1532
1533 /* First write protect the page again to catch all write accesses. (before checking for changes -> SMP) */
1534 int rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys);
1535 Assert(rc == VINF_SUCCESS);
1536 pPage->fDirty = false;
1537
1538#ifdef VBOX_STRICT
1539 uint64_t fFlags = 0;
1540 RTHCPHYS HCPhys;
1541 rc = PGMShwGetPage(VMMGetCpu(pVM), pPage->pvDirtyFault, &fFlags, &HCPhys);
1542 AssertMsg( ( rc == VINF_SUCCESS
1543 && (!(fFlags & X86_PTE_RW) || HCPhys != pPage->Core.Key))
1544 /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */
1545 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1546 || rc == VERR_PAGE_NOT_PRESENT,
1547 ("PGMShwGetPage -> GCPtr=%RGv rc=%d flags=%RX64\n", pPage->pvDirtyFault, rc, fFlags));
1548#endif
1549
1550 /* Flush those PTEs that have changed. */
1551 STAM_PROFILE_START(&pPool->StatTrackDeref,a);
1552 void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
1553 void *pvGst;
1554 bool fFlush;
1555 rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
1556 unsigned cChanges = pgmPoolTrackFlushPTPaePae(pPool, pPage, (PX86PTPAE)pvShw, (PCX86PTPAE)pvGst, (PCX86PTPAE)&pPool->aDirtyPages[idxSlot][0], fAllowRemoval, &fFlush);
1557 STAM_PROFILE_STOP(&pPool->StatTrackDeref,a);
1558 /** Note: we might want to consider keeping the dirty page active in case there were many changes. */
1559
1560 /* This page is likely to be modified again, so reduce the nr of modifications just a bit here. */
1561 Assert(pPage->cModifications);
1562 if (cChanges < 4)
1563 pPage->cModifications = 1; /* must use > 0 here */
1564 else
1565 pPage->cModifications = RT_MAX(1, pPage->cModifications / 2);
1566
1567 STAM_COUNTER_INC(&pPool->StatResetDirtyPages);
1568 if (pPool->cDirtyPages == RT_ELEMENTS(pPool->aIdxDirtyPages))
1569 pPool->idxFreeDirtyPage = idxSlot;
1570
1571 pPool->cDirtyPages--;
1572 pPool->aIdxDirtyPages[idxSlot] = NIL_PGMPOOL_IDX;
1573 Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aIdxDirtyPages));
1574 if (fFlush)
1575 {
1576 Assert(fAllowRemoval);
1577 Log(("Flush reused page table!\n"));
1578 pgmPoolFlushPage(pPool, pPage);
1579 STAM_COUNTER_INC(&pPool->StatForceFlushReused);
1580 }
1581 else
1582 Log(("Removed dirty page %RGp cMods=%d cChanges=%d\n", pPage->GCPhys, pPage->cModifications, cChanges));
1583}
1584
1585# ifndef IN_RING3
1586/**
1587 * Add a new dirty page
1588 *
1589 * @param pVM VM Handle.
1590 * @param pPool The pool.
1591 * @param pPage The page.
1592 */
1593void pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1594{
1595 unsigned idxFree;
1596
1597 Assert(PGMIsLocked(pVM));
1598 AssertCompile(RT_ELEMENTS(pPool->aIdxDirtyPages) == 8 || RT_ELEMENTS(pPool->aIdxDirtyPages) == 16);
1599 Assert(!pPage->fDirty);
1600
1601 idxFree = pPool->idxFreeDirtyPage;
1602 Assert(idxFree < RT_ELEMENTS(pPool->aIdxDirtyPages));
1603 Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX && pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
1604
1605 if (pPool->cDirtyPages >= RT_ELEMENTS(pPool->aIdxDirtyPages))
1606 {
1607 STAM_COUNTER_INC(&pPool->StatDirtyPageOverFlowFlush);
1608 pgmPoolFlushDirtyPage(pVM, pPool, idxFree, true /* allow removal of reused page tables*/);
1609 }
1610 Assert(pPool->cDirtyPages < RT_ELEMENTS(pPool->aIdxDirtyPages));
1611 AssertMsg(pPool->aIdxDirtyPages[idxFree] == NIL_PGMPOOL_IDX, ("idxFree=%d cDirtyPages=%d\n", idxFree, pPool->cDirtyPages));
1612
1613 Log(("Add dirty page %RGp (slot=%d)\n", pPage->GCPhys, idxFree));
1614
1615 /* Make a copy of the guest page table as we require valid GCPhys addresses when removing
1616 * references to physical pages. (the HCPhys linear lookup is *extremely* expensive!)
1617 */
1618 void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
1619 void *pvGst;
1620 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
1621 memcpy(&pPool->aDirtyPages[idxFree][0], pvGst, PAGE_SIZE);
1622#ifdef VBOX_STRICT
1623 pgmPoolTrackCheckPTPaePae(pPool, pPage, (PX86PTPAE)pvShw, (PCX86PTPAE)pvGst);
1624#endif
1625
1626 STAM_COUNTER_INC(&pPool->StatDirtyPage);
1627 pPage->fDirty = true;
1628 pPage->idxDirty = idxFree;
1629 pPool->aIdxDirtyPages[idxFree] = pPage->idx;
1630 pPool->cDirtyPages++;
1631
1632 pPool->idxFreeDirtyPage = (pPool->idxFreeDirtyPage + 1) & (RT_ELEMENTS(pPool->aIdxDirtyPages) - 1);
1633 if ( pPool->cDirtyPages < RT_ELEMENTS(pPool->aIdxDirtyPages)
1634 && pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] != NIL_PGMPOOL_IDX)
1635 {
1636 unsigned i;
1637 for (i = 1; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
1638 {
1639 idxFree = (pPool->idxFreeDirtyPage + i) & (RT_ELEMENTS(pPool->aIdxDirtyPages) - 1);
1640 if (pPool->aIdxDirtyPages[idxFree] == NIL_PGMPOOL_IDX)
1641 {
1642 pPool->idxFreeDirtyPage = idxFree;
1643 break;
1644 }
1645 }
1646 Assert(i != RT_ELEMENTS(pPool->aIdxDirtyPages));
1647 }
1648
1649 Assert(pPool->cDirtyPages == RT_ELEMENTS(pPool->aIdxDirtyPages) || pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] == NIL_PGMPOOL_IDX);
1650 return;
1651}
1652# endif /* !IN_RING3 */
1653
1654/**
1655 * Check if the specified page is dirty (not write monitored)
1656 *
1657 * @return dirty or not
1658 * @param pVM VM Handle.
1659 * @param GCPhys Guest physical address
1660 */
1661bool pgmPoolIsDirtyPage(PVM pVM, RTGCPHYS GCPhys)
1662{
1663 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1664 Assert(PGMIsLocked(pVM));
1665 if (!pPool->cDirtyPages)
1666 return false;
1667
1668 GCPhys = GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1);
1669
1670 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
1671 {
1672 if (pPool->aIdxDirtyPages[i] != NIL_PGMPOOL_IDX)
1673 {
1674 PPGMPOOLPAGE pPage;
1675 unsigned idxPage = pPool->aIdxDirtyPages[i];
1676
1677 pPage = &pPool->aPages[idxPage];
1678 if (pPage->GCPhys == GCPhys)
1679 return true;
1680 }
1681 }
1682 return false;
1683}
1684
1685/**
1686 * Reset all dirty pages by reinstating page monitoring.
1687 *
1688 * @param pVM VM Handle.
1689 */
1690void pgmPoolResetDirtyPages(PVM pVM)
1691{
1692 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1693 Assert(PGMIsLocked(pVM));
1694 Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aIdxDirtyPages));
1695
1696 if (!pPool->cDirtyPages)
1697 return;
1698
1699 Log(("pgmPoolResetDirtyPages\n"));
1700 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
1701 pgmPoolFlushDirtyPage(pVM, pPool, i, true /* allow removal of reused page tables*/);
1702
1703 pPool->idxFreeDirtyPage = 0;
1704 if ( pPool->cDirtyPages != RT_ELEMENTS(pPool->aIdxDirtyPages)
1705 && pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] != NIL_PGMPOOL_IDX)
1706 {
1707 unsigned i;
1708 for (i = 1; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
1709 {
1710 if (pPool->aIdxDirtyPages[i] == NIL_PGMPOOL_IDX)
1711 {
1712 pPool->idxFreeDirtyPage = i;
1713 break;
1714 }
1715 }
1716 AssertMsg(i != RT_ELEMENTS(pPool->aIdxDirtyPages), ("cDirtyPages %d", pPool->cDirtyPages));
1717 }
1718
1719 Assert(pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] == NIL_PGMPOOL_IDX || pPool->cDirtyPages == RT_ELEMENTS(pPool->aIdxDirtyPages));
1720 return;
1721}
1722
1723/**
1724 * Reset all dirty pages by reinstating page monitoring.
1725 *
1726 * @param pVM VM Handle.
1727 * @param GCPhysPT Physical address of the page table
1728 */
1729void pgmPoolInvalidateDirtyPage(PVM pVM, RTGCPHYS GCPhysPT)
1730{
1731 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1732 Assert(PGMIsLocked(pVM));
1733 Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aIdxDirtyPages));
1734 unsigned idxDirtyPage = RT_ELEMENTS(pPool->aIdxDirtyPages);
1735
1736 if (!pPool->cDirtyPages)
1737 return;
1738
1739 GCPhysPT = GCPhysPT & ~(RTGCPHYS)(PAGE_SIZE - 1);
1740
1741 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
1742 {
1743 if (pPool->aIdxDirtyPages[i] != NIL_PGMPOOL_IDX)
1744 {
1745 unsigned idxPage = pPool->aIdxDirtyPages[i];
1746
1747 PPGMPOOLPAGE pPage = &pPool->aPages[idxPage];
1748 if (pPage->GCPhys == GCPhysPT)
1749 {
1750 idxDirtyPage = i;
1751 break;
1752 }
1753 }
1754 }
1755
1756 if (idxDirtyPage != RT_ELEMENTS(pPool->aIdxDirtyPages))
1757 {
1758 pgmPoolFlushDirtyPage(pVM, pPool, idxDirtyPage, true /* allow removal of reused page tables*/);
1759 if ( pPool->cDirtyPages != RT_ELEMENTS(pPool->aIdxDirtyPages)
1760 && pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] != NIL_PGMPOOL_IDX)
1761 {
1762 unsigned i;
1763 for (i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
1764 {
1765 if (pPool->aIdxDirtyPages[i] == NIL_PGMPOOL_IDX)
1766 {
1767 pPool->idxFreeDirtyPage = i;
1768 break;
1769 }
1770 }
1771 AssertMsg(i != RT_ELEMENTS(pPool->aIdxDirtyPages), ("cDirtyPages %d", pPool->cDirtyPages));
1772 }
1773 }
1774}
1775
1776# endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */
1777
1778/**
1779 * Inserts a page into the GCPhys hash table.
1780 *
1781 * @param pPool The pool.
1782 * @param pPage The page.
1783 */
1784DECLINLINE(void) pgmPoolHashInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1785{
1786 Log3(("pgmPoolHashInsert: %RGp\n", pPage->GCPhys));
1787 Assert(pPage->GCPhys != NIL_RTGCPHYS); Assert(pPage->iNext == NIL_PGMPOOL_IDX);
1788 uint16_t iHash = PGMPOOL_HASH(pPage->GCPhys);
1789 pPage->iNext = pPool->aiHash[iHash];
1790 pPool->aiHash[iHash] = pPage->idx;
1791}
1792
1793
1794/**
1795 * Removes a page from the GCPhys hash table.
1796 *
1797 * @param pPool The pool.
1798 * @param pPage The page.
1799 */
1800DECLINLINE(void) pgmPoolHashRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1801{
1802 Log3(("pgmPoolHashRemove: %RGp\n", pPage->GCPhys));
1803 uint16_t iHash = PGMPOOL_HASH(pPage->GCPhys);
1804 if (pPool->aiHash[iHash] == pPage->idx)
1805 pPool->aiHash[iHash] = pPage->iNext;
1806 else
1807 {
1808 uint16_t iPrev = pPool->aiHash[iHash];
1809 for (;;)
1810 {
1811 const int16_t i = pPool->aPages[iPrev].iNext;
1812 if (i == pPage->idx)
1813 {
1814 pPool->aPages[iPrev].iNext = pPage->iNext;
1815 break;
1816 }
1817 if (i == NIL_PGMPOOL_IDX)
1818 {
1819 AssertReleaseMsgFailed(("GCPhys=%RGp idx=%#x\n", pPage->GCPhys, pPage->idx));
1820 break;
1821 }
1822 iPrev = i;
1823 }
1824 }
1825 pPage->iNext = NIL_PGMPOOL_IDX;
1826}
1827
1828
1829/**
1830 * Frees up one cache page.
1831 *
1832 * @returns VBox status code.
1833 * @retval VINF_SUCCESS on success.
1834 * @param pPool The pool.
1835 * @param iUser The user index.
1836 */
1837static int pgmPoolCacheFreeOne(PPGMPOOL pPool, uint16_t iUser)
1838{
1839#ifndef IN_RC
1840 const PVM pVM = pPool->CTX_SUFF(pVM);
1841#endif
1842 Assert(pPool->iAgeHead != pPool->iAgeTail); /* We shouldn't be here if there < 2 cached entries! */
1843 STAM_COUNTER_INC(&pPool->StatCacheFreeUpOne);
1844
1845 /*
1846 * Select one page from the tail of the age list.
1847 */
1848 PPGMPOOLPAGE pPage;
1849 for (unsigned iLoop = 0; ; iLoop++)
1850 {
1851 uint16_t iToFree = pPool->iAgeTail;
1852 if (iToFree == iUser)
1853 iToFree = pPool->aPages[iToFree].iAgePrev;
1854/* This is the alternative to the SyncCR3 pgmPoolCacheUsed calls.
1855 if (pPool->aPages[iToFree].iUserHead != NIL_PGMPOOL_USER_INDEX)
1856 {
1857 uint16_t i = pPool->aPages[iToFree].iAgePrev;
1858 for (unsigned j = 0; j < 10 && i != NIL_PGMPOOL_USER_INDEX; j++, i = pPool->aPages[i].iAgePrev)
1859 {
1860 if (pPool->aPages[iToFree].iUserHead == NIL_PGMPOOL_USER_INDEX)
1861 continue;
1862 iToFree = i;
1863 break;
1864 }
1865 }
1866*/
1867 Assert(iToFree != iUser);
1868 AssertRelease(iToFree != NIL_PGMPOOL_IDX);
1869 pPage = &pPool->aPages[iToFree];
1870
1871 /*
1872 * Reject any attempts at flushing the currently active shadow CR3 mapping.
1873 * Call pgmPoolCacheUsed to move the page to the head of the age list.
1874 */
1875 if (!pgmPoolIsPageLocked(&pPool->CTX_SUFF(pVM)->pgm.s, pPage))
1876 break;
1877 LogFlow(("pgmPoolCacheFreeOne: refuse CR3 mapping\n"));
1878 pgmPoolCacheUsed(pPool, pPage);
1879 AssertLogRelReturn(iLoop < 8192, VERR_INTERNAL_ERROR);
1880 }
1881
1882 /*
1883 * Found a usable page, flush it and return.
1884 */
1885 int rc = pgmPoolFlushPage(pPool, pPage);
1886 /* This flush was initiated by us and not the guest, so explicitly flush the TLB. */
1887 /* todo: find out why this is necessary; pgmPoolFlushPage should trigger a flush if one is really needed. */
1888 if (rc == VINF_SUCCESS)
1889 PGM_INVL_ALL_VCPU_TLBS(pVM);
1890 return rc;
1891}
1892
1893
1894/**
1895 * Checks if a kind mismatch is really a page being reused
1896 * or if it's just normal remappings.
1897 *
1898 * @returns true if reused and the cached page (enmKind1) should be flushed
1899 * @returns false if not reused.
1900 * @param enmKind1 The kind of the cached page.
1901 * @param enmKind2 The kind of the requested page.
1902 */
1903static bool pgmPoolCacheReusedByKind(PGMPOOLKIND enmKind1, PGMPOOLKIND enmKind2)
1904{
1905 switch (enmKind1)
1906 {
1907 /*
1908 * Never reuse them. There is no remapping in non-paging mode.
1909 */
1910 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
1911 case PGMPOOLKIND_32BIT_PD_PHYS:
1912 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
1913 case PGMPOOLKIND_PAE_PD_PHYS:
1914 case PGMPOOLKIND_PAE_PDPT_PHYS:
1915 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
1916 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
1917 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
1918 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
1919 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
1920 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT: /* never reuse them for other types */
1921 return false;
1922
1923 /*
1924 * It's perfectly fine to reuse these, except for PAE and non-paging stuff.
1925 */
1926 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
1927 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
1928 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
1929 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
1930 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
1931 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
1932 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
1933 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
1934 case PGMPOOLKIND_32BIT_PD:
1935 case PGMPOOLKIND_PAE_PDPT:
1936 switch (enmKind2)
1937 {
1938 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
1939 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
1940 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
1941 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
1942 case PGMPOOLKIND_64BIT_PML4:
1943 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
1944 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
1945 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
1946 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
1947 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
1948 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
1949 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
1950 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
1951 return true;
1952 default:
1953 return false;
1954 }
1955
1956 /*
1957 * It's perfectly fine to reuse these, except for PAE and non-paging stuff.
1958 */
1959 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
1960 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
1961 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
1962 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
1963 case PGMPOOLKIND_64BIT_PML4:
1964 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
1965 switch (enmKind2)
1966 {
1967 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
1968 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
1969 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
1970 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
1971 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
1972 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
1973 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
1974 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
1975 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
1976 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
1977 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
1978 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
1979 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
1980 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
1981 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
1982 return true;
1983 default:
1984 return false;
1985 }
1986
1987 /*
1988 * These cannot be flushed, and it's common to reuse the PDs as PTs.
1989 */
1990 case PGMPOOLKIND_ROOT_NESTED:
1991 return false;
1992
1993 default:
1994 AssertFatalMsgFailed(("enmKind1=%d\n", enmKind1));
1995 }
1996}
1997
1998
1999/**
2000 * Attempts to satisfy a pgmPoolAlloc request from the cache.
2001 *
2002 * @returns VBox status code.
2003 * @retval VINF_PGM_CACHED_PAGE on success.
2004 * @retval VERR_FILE_NOT_FOUND if not found.
2005 * @param pPool The pool.
2006 * @param GCPhys The GC physical address of the page we're gonna shadow.
2007 * @param enmKind The kind of mapping.
2008 * @param enmAccess Access type for the mapping (only relevant for big pages)
2009 * @param iUser The shadow page pool index of the user table.
2010 * @param iUserTable The index into the user table (shadowed).
2011 * @param ppPage Where to store the pointer to the page.
2012 */
2013static int pgmPoolCacheAlloc(PPGMPOOL pPool, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage)
2014{
2015#ifndef IN_RC
2016 const PVM pVM = pPool->CTX_SUFF(pVM);
2017#endif
2018 /*
2019 * Look up the GCPhys in the hash.
2020 */
2021 unsigned i = pPool->aiHash[PGMPOOL_HASH(GCPhys)];
2022 Log3(("pgmPoolCacheAlloc: %RGp kind %s iUser=%x iUserTable=%x SLOT=%d\n", GCPhys, pgmPoolPoolKindToStr(enmKind), iUser, iUserTable, i));
2023 if (i != NIL_PGMPOOL_IDX)
2024 {
2025 do
2026 {
2027 PPGMPOOLPAGE pPage = &pPool->aPages[i];
2028 Log4(("pgmPoolCacheAlloc: slot %d found page %RGp\n", i, pPage->GCPhys));
2029 if (pPage->GCPhys == GCPhys)
2030 {
2031 if ( (PGMPOOLKIND)pPage->enmKind == enmKind
2032 && (PGMPOOLACCESS)pPage->enmAccess == enmAccess)
2033 {
2034 /* Put it at the start of the use list to make sure pgmPoolTrackAddUser
2035 * doesn't flush it in case there are no more free use records.
2036 */
2037 pgmPoolCacheUsed(pPool, pPage);
2038
2039 int rc = pgmPoolTrackAddUser(pPool, pPage, iUser, iUserTable);
2040 if (RT_SUCCESS(rc))
2041 {
2042 Assert((PGMPOOLKIND)pPage->enmKind == enmKind);
2043 *ppPage = pPage;
2044 if (pPage->cModifications)
2045 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
2046 STAM_COUNTER_INC(&pPool->StatCacheHits);
2047 return VINF_PGM_CACHED_PAGE;
2048 }
2049 return rc;
2050 }
2051
2052 if ((PGMPOOLKIND)pPage->enmKind != enmKind)
2053 {
2054 /*
2055 * The kind is different. In some cases we should now flush the page
2056 * as it has been reused, but in most cases this is normal remapping
2057 * of PDs as PT or big pages using the GCPhys field in a slightly
2058 * different way than the other kinds.
2059 */
2060 if (pgmPoolCacheReusedByKind((PGMPOOLKIND)pPage->enmKind, enmKind))
2061 {
2062 STAM_COUNTER_INC(&pPool->StatCacheKindMismatches);
2063 pgmPoolFlushPage(pPool, pPage);
2064 break;
2065 }
2066 }
2067 }
2068
2069 /* next */
2070 i = pPage->iNext;
2071 } while (i != NIL_PGMPOOL_IDX);
2072 }
2073
2074 Log3(("pgmPoolCacheAlloc: Missed GCPhys=%RGp enmKind=%s\n", GCPhys, pgmPoolPoolKindToStr(enmKind)));
2075 STAM_COUNTER_INC(&pPool->StatCacheMisses);
2076 return VERR_FILE_NOT_FOUND;
2077}
2078
2079
2080/**
2081 * Inserts a page into the cache.
2082 *
2083 * @param pPool The pool.
2084 * @param pPage The cached page.
2085 * @param fCanBeCached Set if the page is fit for caching from the caller's point of view.
2086 */
2087static void pgmPoolCacheInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fCanBeCached)
2088{
2089 /*
2090 * Insert into the GCPhys hash if the page is fit for that.
2091 */
2092 Assert(!pPage->fCached);
2093 if (fCanBeCached)
2094 {
2095 pPage->fCached = true;
2096 pgmPoolHashInsert(pPool, pPage);
2097 Log3(("pgmPoolCacheInsert: Caching %p:{.Core=%RHp, .idx=%d, .enmKind=%s, GCPhys=%RGp}\n",
2098 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys));
2099 STAM_COUNTER_INC(&pPool->StatCacheCacheable);
2100 }
2101 else
2102 {
2103 Log3(("pgmPoolCacheInsert: Not caching %p:{.Core=%RHp, .idx=%d, .enmKind=%s, GCPhys=%RGp}\n",
2104 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys));
2105 STAM_COUNTER_INC(&pPool->StatCacheUncacheable);
2106 }
2107
2108 /*
2109 * Insert at the head of the age list.
2110 */
2111 pPage->iAgePrev = NIL_PGMPOOL_IDX;
2112 pPage->iAgeNext = pPool->iAgeHead;
2113 if (pPool->iAgeHead != NIL_PGMPOOL_IDX)
2114 pPool->aPages[pPool->iAgeHead].iAgePrev = pPage->idx;
2115 else
2116 pPool->iAgeTail = pPage->idx;
2117 pPool->iAgeHead = pPage->idx;
2118}
2119
2120
2121/**
2122 * Flushes a cached page.
2123 *
2124 * @param pPool The pool.
2125 * @param pPage The cached page.
2126 */
2127static void pgmPoolCacheFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2128{
2129 Log3(("pgmPoolCacheFlushPage: %RGp\n", pPage->GCPhys));
2130
2131 /*
2132 * Remove the page from the hash.
2133 */
2134 if (pPage->fCached)
2135 {
2136 pPage->fCached = false;
2137 pgmPoolHashRemove(pPool, pPage);
2138 }
2139 else
2140 Assert(pPage->iNext == NIL_PGMPOOL_IDX);
2141
2142 /*
2143 * Remove it from the age list.
2144 */
2145 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
2146 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
2147 else
2148 pPool->iAgeTail = pPage->iAgePrev;
2149 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
2150 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
2151 else
2152 pPool->iAgeHead = pPage->iAgeNext;
2153 pPage->iAgeNext = NIL_PGMPOOL_IDX;
2154 pPage->iAgePrev = NIL_PGMPOOL_IDX;
2155}
2156
2157
2158/**
2159 * Looks for pages sharing the monitor.
2160 *
2161 * @returns Pointer to the head page.
2162 * @returns NULL if not found.
2163 * @param pPool The Pool
2164 * @param pNewPage The page which is going to be monitored.
2165 */
2166static PPGMPOOLPAGE pgmPoolMonitorGetPageByGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pNewPage)
2167{
2168 /*
2169 * Look up the GCPhys in the hash.
2170 */
2171 RTGCPHYS GCPhys = pNewPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1);
2172 unsigned i = pPool->aiHash[PGMPOOL_HASH(GCPhys)];
2173 if (i == NIL_PGMPOOL_IDX)
2174 return NULL;
2175 do
2176 {
2177 PPGMPOOLPAGE pPage = &pPool->aPages[i];
2178 if ( pPage->GCPhys - GCPhys < PAGE_SIZE
2179 && pPage != pNewPage)
2180 {
2181 switch (pPage->enmKind)
2182 {
2183 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2184 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2185 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2186 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2187 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2188 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2189 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2190 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2191 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2192 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2193 case PGMPOOLKIND_64BIT_PML4:
2194 case PGMPOOLKIND_32BIT_PD:
2195 case PGMPOOLKIND_PAE_PDPT:
2196 {
2197 /* find the head */
2198 while (pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
2199 {
2200 Assert(pPage->iMonitoredPrev != pPage->idx);
2201 pPage = &pPool->aPages[pPage->iMonitoredPrev];
2202 }
2203 return pPage;
2204 }
2205
2206 /* ignore, no monitoring. */
2207 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2208 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2209 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2210 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2211 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2212 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2213 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2214 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2215 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2216 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2217 case PGMPOOLKIND_ROOT_NESTED:
2218 case PGMPOOLKIND_PAE_PD_PHYS:
2219 case PGMPOOLKIND_PAE_PDPT_PHYS:
2220 case PGMPOOLKIND_32BIT_PD_PHYS:
2221 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
2222 break;
2223 default:
2224 AssertFatalMsgFailed(("enmKind=%d idx=%d\n", pPage->enmKind, pPage->idx));
2225 }
2226 }
2227
2228 /* next */
2229 i = pPage->iNext;
2230 } while (i != NIL_PGMPOOL_IDX);
2231 return NULL;
2232}
2233
2234
2235/**
2236 * Enabled write monitoring of a guest page.
2237 *
2238 * @returns VBox status code.
2239 * @retval VINF_SUCCESS on success.
2240 * @param pPool The pool.
2241 * @param pPage The cached page.
2242 */
2243static int pgmPoolMonitorInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2244{
2245 LogFlow(("pgmPoolMonitorInsert %RGp\n", pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1)));
2246
2247 /*
2248 * Filter out the relevant kinds.
2249 */
2250 switch (pPage->enmKind)
2251 {
2252 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2253 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2254 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2255 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2256 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2257 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2258 case PGMPOOLKIND_64BIT_PML4:
2259 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2260 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2261 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2262 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2263 case PGMPOOLKIND_32BIT_PD:
2264 case PGMPOOLKIND_PAE_PDPT:
2265 break;
2266
2267 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2268 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2269 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2270 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2271 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2272 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2273 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2274 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2275 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2276 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2277 case PGMPOOLKIND_ROOT_NESTED:
2278 /* Nothing to monitor here. */
2279 return VINF_SUCCESS;
2280
2281 case PGMPOOLKIND_32BIT_PD_PHYS:
2282 case PGMPOOLKIND_PAE_PDPT_PHYS:
2283 case PGMPOOLKIND_PAE_PD_PHYS:
2284 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
2285 /* Nothing to monitor here. */
2286 return VINF_SUCCESS;
2287 default:
2288 AssertFatalMsgFailed(("This can't happen! enmKind=%d\n", pPage->enmKind));
2289 }
2290
2291 /*
2292 * Install handler.
2293 */
2294 int rc;
2295 PPGMPOOLPAGE pPageHead = pgmPoolMonitorGetPageByGCPhys(pPool, pPage);
2296 if (pPageHead)
2297 {
2298 Assert(pPageHead != pPage); Assert(pPageHead->iMonitoredNext != pPage->idx);
2299 Assert(pPageHead->iMonitoredPrev != pPage->idx);
2300
2301#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2302 if (pPageHead->fDirty)
2303 pgmPoolFlushDirtyPage(pPool->CTX_SUFF(pVM), pPool, pPageHead->idxDirty, false /* do not remove */);
2304#endif
2305
2306 pPage->iMonitoredPrev = pPageHead->idx;
2307 pPage->iMonitoredNext = pPageHead->iMonitoredNext;
2308 if (pPageHead->iMonitoredNext != NIL_PGMPOOL_IDX)
2309 pPool->aPages[pPageHead->iMonitoredNext].iMonitoredPrev = pPage->idx;
2310 pPageHead->iMonitoredNext = pPage->idx;
2311 rc = VINF_SUCCESS;
2312 }
2313 else
2314 {
2315 Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX); Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
2316 PVM pVM = pPool->CTX_SUFF(pVM);
2317 const RTGCPHYS GCPhysPage = pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1);
2318 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
2319 GCPhysPage, GCPhysPage + (PAGE_SIZE - 1),
2320 pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pPage),
2321 pPool->pfnAccessHandlerR0, MMHyperCCToR0(pVM, pPage),
2322 pPool->pfnAccessHandlerRC, MMHyperCCToRC(pVM, pPage),
2323 pPool->pszAccessHandler);
2324 /** @todo we should probably deal with out-of-memory conditions here, but for now increasing
2325 * the heap size should suffice. */
2326 AssertFatalMsgRC(rc, ("PGMHandlerPhysicalRegisterEx %RGp failed with %Rrc\n", GCPhysPage, rc));
2327 Assert(!(VMMGetCpu(pVM)->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_ISSET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3));
2328 }
2329 pPage->fMonitored = true;
2330 return rc;
2331}
2332
2333
2334/**
2335 * Disables write monitoring of a guest page.
2336 *
2337 * @returns VBox status code.
2338 * @retval VINF_SUCCESS on success.
2339 * @param pPool The pool.
2340 * @param pPage The cached page.
2341 */
2342static int pgmPoolMonitorFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2343{
2344 /*
2345 * Filter out the relevant kinds.
2346 */
2347 switch (pPage->enmKind)
2348 {
2349 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2350 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2351 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2352 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2353 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2354 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2355 case PGMPOOLKIND_64BIT_PML4:
2356 case PGMPOOLKIND_32BIT_PD:
2357 case PGMPOOLKIND_PAE_PDPT:
2358 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2359 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2360 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2361 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2362 break;
2363
2364 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2365 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2366 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2367 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2368 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2369 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2370 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2371 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2372 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2373 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2374 case PGMPOOLKIND_ROOT_NESTED:
2375 case PGMPOOLKIND_PAE_PD_PHYS:
2376 case PGMPOOLKIND_PAE_PDPT_PHYS:
2377 case PGMPOOLKIND_32BIT_PD_PHYS:
2378 /* Nothing to monitor here. */
2379 return VINF_SUCCESS;
2380
2381 default:
2382 AssertFatalMsgFailed(("This can't happen! enmKind=%d\n", pPage->enmKind));
2383 }
2384
2385 /*
2386 * Remove the page from the monitored list or uninstall it if last.
2387 */
2388 const PVM pVM = pPool->CTX_SUFF(pVM);
2389 int rc;
2390 if ( pPage->iMonitoredNext != NIL_PGMPOOL_IDX
2391 || pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
2392 {
2393 if (pPage->iMonitoredPrev == NIL_PGMPOOL_IDX)
2394 {
2395 PPGMPOOLPAGE pNewHead = &pPool->aPages[pPage->iMonitoredNext];
2396 pNewHead->iMonitoredPrev = NIL_PGMPOOL_IDX;
2397 rc = PGMHandlerPhysicalChangeCallbacks(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1),
2398 pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pNewHead),
2399 pPool->pfnAccessHandlerR0, MMHyperCCToR0(pVM, pNewHead),
2400 pPool->pfnAccessHandlerRC, MMHyperCCToRC(pVM, pNewHead),
2401 pPool->pszAccessHandler);
2402 AssertFatalRCSuccess(rc);
2403 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
2404 }
2405 else
2406 {
2407 pPool->aPages[pPage->iMonitoredPrev].iMonitoredNext = pPage->iMonitoredNext;
2408 if (pPage->iMonitoredNext != NIL_PGMPOOL_IDX)
2409 {
2410 pPool->aPages[pPage->iMonitoredNext].iMonitoredPrev = pPage->iMonitoredPrev;
2411 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
2412 }
2413 pPage->iMonitoredPrev = NIL_PGMPOOL_IDX;
2414 rc = VINF_SUCCESS;
2415 }
2416 }
2417 else
2418 {
2419 rc = PGMHandlerPhysicalDeregister(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1));
2420 AssertFatalRC(rc);
2421#ifdef VBOX_STRICT
2422 PVMCPU pVCpu = VMMGetCpu(pVM);
2423#endif
2424 AssertMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3),
2425 ("%#x %#x\n", pVCpu->pgm.s.fSyncFlags, pVM->fGlobalForcedActions));
2426 }
2427 pPage->fMonitored = false;
2428
2429 /*
2430 * Remove it from the list of modified pages (if in it).
2431 */
2432 pgmPoolMonitorModifiedRemove(pPool, pPage);
2433
2434 return rc;
2435}
2436
2437
2438/**
2439 * Inserts the page into the list of modified pages.
2440 *
2441 * @param pPool The pool.
2442 * @param pPage The page.
2443 */
2444void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2445{
2446 Log3(("pgmPoolMonitorModifiedInsert: idx=%d\n", pPage->idx));
2447 AssertMsg( pPage->iModifiedNext == NIL_PGMPOOL_IDX
2448 && pPage->iModifiedPrev == NIL_PGMPOOL_IDX
2449 && pPool->iModifiedHead != pPage->idx,
2450 ("Next=%d Prev=%d idx=%d cModifications=%d Head=%d cModifiedPages=%d\n",
2451 pPage->iModifiedNext, pPage->iModifiedPrev, pPage->idx, pPage->cModifications,
2452 pPool->iModifiedHead, pPool->cModifiedPages));
2453
2454 pPage->iModifiedNext = pPool->iModifiedHead;
2455 if (pPool->iModifiedHead != NIL_PGMPOOL_IDX)
2456 pPool->aPages[pPool->iModifiedHead].iModifiedPrev = pPage->idx;
2457 pPool->iModifiedHead = pPage->idx;
2458 pPool->cModifiedPages++;
2459#ifdef VBOX_WITH_STATISTICS
2460 if (pPool->cModifiedPages > pPool->cModifiedPagesHigh)
2461 pPool->cModifiedPagesHigh = pPool->cModifiedPages;
2462#endif
2463}
2464
2465
2466/**
2467 * Removes the page from the list of modified pages and resets the
2468 * moficiation counter.
2469 *
2470 * @param pPool The pool.
2471 * @param pPage The page which is believed to be in the list of modified pages.
2472 */
2473static void pgmPoolMonitorModifiedRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2474{
2475 Log3(("pgmPoolMonitorModifiedRemove: idx=%d cModifications=%d\n", pPage->idx, pPage->cModifications));
2476 if (pPool->iModifiedHead == pPage->idx)
2477 {
2478 Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX);
2479 pPool->iModifiedHead = pPage->iModifiedNext;
2480 if (pPage->iModifiedNext != NIL_PGMPOOL_IDX)
2481 {
2482 pPool->aPages[pPage->iModifiedNext].iModifiedPrev = NIL_PGMPOOL_IDX;
2483 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
2484 }
2485 pPool->cModifiedPages--;
2486 }
2487 else if (pPage->iModifiedPrev != NIL_PGMPOOL_IDX)
2488 {
2489 pPool->aPages[pPage->iModifiedPrev].iModifiedNext = pPage->iModifiedNext;
2490 if (pPage->iModifiedNext != NIL_PGMPOOL_IDX)
2491 {
2492 pPool->aPages[pPage->iModifiedNext].iModifiedPrev = pPage->iModifiedPrev;
2493 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
2494 }
2495 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
2496 pPool->cModifiedPages--;
2497 }
2498 else
2499 Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX);
2500 pPage->cModifications = 0;
2501}
2502
2503
2504/**
2505 * Zaps the list of modified pages, resetting their modification counters in the process.
2506 *
2507 * @param pVM The VM handle.
2508 */
2509static void pgmPoolMonitorModifiedClearAll(PVM pVM)
2510{
2511 pgmLock(pVM);
2512 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2513 LogFlow(("pgmPoolMonitorModifiedClearAll: cModifiedPages=%d\n", pPool->cModifiedPages));
2514
2515 unsigned cPages = 0; NOREF(cPages);
2516
2517#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2518 pgmPoolResetDirtyPages(pVM);
2519#endif
2520
2521 uint16_t idx = pPool->iModifiedHead;
2522 pPool->iModifiedHead = NIL_PGMPOOL_IDX;
2523 while (idx != NIL_PGMPOOL_IDX)
2524 {
2525 PPGMPOOLPAGE pPage = &pPool->aPages[idx];
2526 idx = pPage->iModifiedNext;
2527 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
2528 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
2529 pPage->cModifications = 0;
2530 Assert(++cPages);
2531 }
2532 AssertMsg(cPages == pPool->cModifiedPages, ("%d != %d\n", cPages, pPool->cModifiedPages));
2533 pPool->cModifiedPages = 0;
2534 pgmUnlock(pVM);
2535}
2536
2537
2538/**
2539 * Handle SyncCR3 pool tasks
2540 *
2541 * @returns VBox status code.
2542 * @retval VINF_SUCCESS if successfully added.
2543 * @retval VINF_PGM_SYNC_CR3 is it needs to be deferred to ring 3 (GC only)
2544 * @param pVCpu The VMCPU handle.
2545 * @remark Should only be used when monitoring is available, thus placed in
2546 * the PGMPOOL_WITH_MONITORING #ifdef.
2547 */
2548int pgmPoolSyncCR3(PVMCPU pVCpu)
2549{
2550 PVM pVM = pVCpu->CTX_SUFF(pVM);
2551 LogFlow(("pgmPoolSyncCR3\n"));
2552
2553 /*
2554 * When monitoring shadowed pages, we reset the modification counters on CR3 sync.
2555 * Occasionally we will have to clear all the shadow page tables because we wanted
2556 * to monitor a page which was mapped by too many shadowed page tables. This operation
2557 * sometimes refered to as a 'lightweight flush'.
2558 */
2559# ifdef IN_RING3 /* Don't flush in ring-0 or raw mode, it's taking too long. */
2560 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2561 pgmR3PoolClearAll(pVM);
2562# else /* !IN_RING3 */
2563 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2564 {
2565 LogFlow(("SyncCR3: PGM_SYNC_CLEAR_PGM_POOL is set -> VINF_PGM_SYNC_CR3\n"));
2566 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
2567
2568 /* Make sure all other VCPUs return to ring 3. */
2569 if (pVM->cCpus > 1)
2570 {
2571 VM_FF_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING);
2572 PGM_INVL_ALL_VCPU_TLBS(pVM);
2573 }
2574 return VINF_PGM_SYNC_CR3;
2575 }
2576# endif /* !IN_RING3 */
2577 else
2578 pgmPoolMonitorModifiedClearAll(pVM);
2579
2580 return VINF_SUCCESS;
2581}
2582
2583
2584/**
2585 * Frees up at least one user entry.
2586 *
2587 * @returns VBox status code.
2588 * @retval VINF_SUCCESS if successfully added.
2589 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
2590 * @param pPool The pool.
2591 * @param iUser The user index.
2592 */
2593static int pgmPoolTrackFreeOneUser(PPGMPOOL pPool, uint16_t iUser)
2594{
2595 STAM_COUNTER_INC(&pPool->StatTrackFreeUpOneUser);
2596 /*
2597 * Just free cached pages in a braindead fashion.
2598 */
2599 /** @todo walk the age list backwards and free the first with usage. */
2600 int rc = VINF_SUCCESS;
2601 do
2602 {
2603 int rc2 = pgmPoolCacheFreeOne(pPool, iUser);
2604 if (RT_FAILURE(rc2) && rc == VINF_SUCCESS)
2605 rc = rc2;
2606 } while (pPool->iUserFreeHead == NIL_PGMPOOL_USER_INDEX);
2607 return rc;
2608}
2609
2610
2611/**
2612 * Inserts a page into the cache.
2613 *
2614 * This will create user node for the page, insert it into the GCPhys
2615 * hash, and insert it into the age list.
2616 *
2617 * @returns VBox status code.
2618 * @retval VINF_SUCCESS if successfully added.
2619 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
2620 * @param pPool The pool.
2621 * @param pPage The cached page.
2622 * @param GCPhys The GC physical address of the page we're gonna shadow.
2623 * @param iUser The user index.
2624 * @param iUserTable The user table index.
2625 */
2626DECLINLINE(int) pgmPoolTrackInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhys, uint16_t iUser, uint32_t iUserTable)
2627{
2628 int rc = VINF_SUCCESS;
2629 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
2630
2631 LogFlow(("pgmPoolTrackInsert GCPhys=%RGp iUser %x iUserTable %x\n", GCPhys, iUser, iUserTable));
2632
2633#ifdef VBOX_STRICT
2634 /*
2635 * Check that the entry doesn't already exists.
2636 */
2637 if (pPage->iUserHead != NIL_PGMPOOL_USER_INDEX)
2638 {
2639 uint16_t i = pPage->iUserHead;
2640 do
2641 {
2642 Assert(i < pPool->cMaxUsers);
2643 AssertMsg(paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable));
2644 i = paUsers[i].iNext;
2645 } while (i != NIL_PGMPOOL_USER_INDEX);
2646 }
2647#endif
2648
2649 /*
2650 * Find free a user node.
2651 */
2652 uint16_t i = pPool->iUserFreeHead;
2653 if (i == NIL_PGMPOOL_USER_INDEX)
2654 {
2655 rc = pgmPoolTrackFreeOneUser(pPool, iUser);
2656 if (RT_FAILURE(rc))
2657 return rc;
2658 i = pPool->iUserFreeHead;
2659 }
2660
2661 /*
2662 * Unlink the user node from the free list,
2663 * initialize and insert it into the user list.
2664 */
2665 pPool->iUserFreeHead = paUsers[i].iNext;
2666 paUsers[i].iNext = NIL_PGMPOOL_USER_INDEX;
2667 paUsers[i].iUser = iUser;
2668 paUsers[i].iUserTable = iUserTable;
2669 pPage->iUserHead = i;
2670
2671 /*
2672 * Insert into cache and enable monitoring of the guest page if enabled.
2673 *
2674 * Until we implement caching of all levels, including the CR3 one, we'll
2675 * have to make sure we don't try monitor & cache any recursive reuse of
2676 * a monitored CR3 page. Because all windows versions are doing this we'll
2677 * have to be able to do combined access monitoring, CR3 + PT and
2678 * PD + PT (guest PAE).
2679 *
2680 * Update:
2681 * We're now cooperating with the CR3 monitor if an uncachable page is found.
2682 */
2683 const bool fCanBeMonitored = true;
2684 pgmPoolCacheInsert(pPool, pPage, fCanBeMonitored); /* This can be expanded. */
2685 if (fCanBeMonitored)
2686 {
2687 rc = pgmPoolMonitorInsert(pPool, pPage);
2688 AssertRC(rc);
2689 }
2690 return rc;
2691}
2692
2693
2694/**
2695 * Adds a user reference to a page.
2696 *
2697 * This will move the page to the head of the
2698 *
2699 * @returns VBox status code.
2700 * @retval VINF_SUCCESS if successfully added.
2701 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
2702 * @param pPool The pool.
2703 * @param pPage The cached page.
2704 * @param iUser The user index.
2705 * @param iUserTable The user table.
2706 */
2707static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
2708{
2709 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
2710
2711 Log3(("pgmPoolTrackAddUser GCPhys = %RGp iUser %x iUserTable %x\n", pPage->GCPhys, iUser, iUserTable));
2712
2713# ifdef VBOX_STRICT
2714 /*
2715 * Check that the entry doesn't already exists. We only allow multiple users of top-level paging structures (SHW_POOL_ROOT_IDX).
2716 */
2717 if (pPage->iUserHead != NIL_PGMPOOL_USER_INDEX)
2718 {
2719 uint16_t i = pPage->iUserHead;
2720 do
2721 {
2722 Assert(i < pPool->cMaxUsers);
2723 AssertMsg(iUser != PGMPOOL_IDX_PD || iUser != PGMPOOL_IDX_PDPT || iUser != PGMPOOL_IDX_NESTED_ROOT || iUser != PGMPOOL_IDX_AMD64_CR3 ||
2724 paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable));
2725 i = paUsers[i].iNext;
2726 } while (i != NIL_PGMPOOL_USER_INDEX);
2727 }
2728# endif
2729
2730 /*
2731 * Allocate a user node.
2732 */
2733 uint16_t i = pPool->iUserFreeHead;
2734 if (i == NIL_PGMPOOL_USER_INDEX)
2735 {
2736 int rc = pgmPoolTrackFreeOneUser(pPool, iUser);
2737 if (RT_FAILURE(rc))
2738 return rc;
2739 i = pPool->iUserFreeHead;
2740 }
2741 pPool->iUserFreeHead = paUsers[i].iNext;
2742
2743 /*
2744 * Initialize the user node and insert it.
2745 */
2746 paUsers[i].iNext = pPage->iUserHead;
2747 paUsers[i].iUser = iUser;
2748 paUsers[i].iUserTable = iUserTable;
2749 pPage->iUserHead = i;
2750
2751# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2752 if (pPage->fDirty)
2753 pgmPoolFlushDirtyPage(pPool->CTX_SUFF(pVM), pPool, pPage->idxDirty, false /* do not remove */);
2754# endif
2755
2756 /*
2757 * Tell the cache to update its replacement stats for this page.
2758 */
2759 pgmPoolCacheUsed(pPool, pPage);
2760 return VINF_SUCCESS;
2761}
2762
2763
2764/**
2765 * Frees a user record associated with a page.
2766 *
2767 * This does not clear the entry in the user table, it simply replaces the
2768 * user record to the chain of free records.
2769 *
2770 * @param pPool The pool.
2771 * @param HCPhys The HC physical address of the shadow page.
2772 * @param iUser The shadow page pool index of the user table.
2773 * @param iUserTable The index into the user table (shadowed).
2774 */
2775static void pgmPoolTrackFreeUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
2776{
2777 /*
2778 * Unlink and free the specified user entry.
2779 */
2780 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
2781
2782 Log3(("pgmPoolTrackFreeUser %RGp %x %x\n", pPage->GCPhys, iUser, iUserTable));
2783 /* Special: For PAE and 32-bit paging, there is usually no more than one user. */
2784 uint16_t i = pPage->iUserHead;
2785 if ( i != NIL_PGMPOOL_USER_INDEX
2786 && paUsers[i].iUser == iUser
2787 && paUsers[i].iUserTable == iUserTable)
2788 {
2789 pPage->iUserHead = paUsers[i].iNext;
2790
2791 paUsers[i].iUser = NIL_PGMPOOL_IDX;
2792 paUsers[i].iNext = pPool->iUserFreeHead;
2793 pPool->iUserFreeHead = i;
2794 return;
2795 }
2796
2797 /* General: Linear search. */
2798 uint16_t iPrev = NIL_PGMPOOL_USER_INDEX;
2799 while (i != NIL_PGMPOOL_USER_INDEX)
2800 {
2801 if ( paUsers[i].iUser == iUser
2802 && paUsers[i].iUserTable == iUserTable)
2803 {
2804 if (iPrev != NIL_PGMPOOL_USER_INDEX)
2805 paUsers[iPrev].iNext = paUsers[i].iNext;
2806 else
2807 pPage->iUserHead = paUsers[i].iNext;
2808
2809 paUsers[i].iUser = NIL_PGMPOOL_IDX;
2810 paUsers[i].iNext = pPool->iUserFreeHead;
2811 pPool->iUserFreeHead = i;
2812 return;
2813 }
2814 iPrev = i;
2815 i = paUsers[i].iNext;
2816 }
2817
2818 /* Fatal: didn't find it */
2819 AssertFatalMsgFailed(("Didn't find the user entry! iUser=%#x iUserTable=%#x GCPhys=%RGp\n",
2820 iUser, iUserTable, pPage->GCPhys));
2821}
2822
2823
2824/**
2825 * Gets the entry size of a shadow table.
2826 *
2827 * @param enmKind The kind of page.
2828 *
2829 * @returns The size of the entry in bytes. That is, 4 or 8.
2830 * @returns If the kind is not for a table, an assertion is raised and 0 is
2831 * returned.
2832 */
2833DECLINLINE(unsigned) pgmPoolTrackGetShadowEntrySize(PGMPOOLKIND enmKind)
2834{
2835 switch (enmKind)
2836 {
2837 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2838 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2839 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2840 case PGMPOOLKIND_32BIT_PD:
2841 case PGMPOOLKIND_32BIT_PD_PHYS:
2842 return 4;
2843
2844 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2845 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2846 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2847 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2848 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2849 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2850 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2851 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2852 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2853 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2854 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2855 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2856 case PGMPOOLKIND_64BIT_PML4:
2857 case PGMPOOLKIND_PAE_PDPT:
2858 case PGMPOOLKIND_ROOT_NESTED:
2859 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2860 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2861 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2862 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2863 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2864 case PGMPOOLKIND_PAE_PD_PHYS:
2865 case PGMPOOLKIND_PAE_PDPT_PHYS:
2866 return 8;
2867
2868 default:
2869 AssertFatalMsgFailed(("enmKind=%d\n", enmKind));
2870 }
2871}
2872
2873
2874/**
2875 * Gets the entry size of a guest table.
2876 *
2877 * @param enmKind The kind of page.
2878 *
2879 * @returns The size of the entry in bytes. That is, 0, 4 or 8.
2880 * @returns If the kind is not for a table, an assertion is raised and 0 is
2881 * returned.
2882 */
2883DECLINLINE(unsigned) pgmPoolTrackGetGuestEntrySize(PGMPOOLKIND enmKind)
2884{
2885 switch (enmKind)
2886 {
2887 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2888 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2889 case PGMPOOLKIND_32BIT_PD:
2890 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2891 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2892 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2893 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2894 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2895 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2896 return 4;
2897
2898 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2899 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2900 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2901 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2902 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2903 case PGMPOOLKIND_64BIT_PML4:
2904 case PGMPOOLKIND_PAE_PDPT:
2905 return 8;
2906
2907 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2908 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2909 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2910 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2911 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2912 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2913 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2914 case PGMPOOLKIND_ROOT_NESTED:
2915 case PGMPOOLKIND_PAE_PD_PHYS:
2916 case PGMPOOLKIND_PAE_PDPT_PHYS:
2917 case PGMPOOLKIND_32BIT_PD_PHYS:
2918 /** @todo can we return 0? (nobody is calling this...) */
2919 AssertFailed();
2920 return 0;
2921
2922 default:
2923 AssertFatalMsgFailed(("enmKind=%d\n", enmKind));
2924 }
2925}
2926
2927
2928/**
2929 * Scans one shadow page table for mappings of a physical page.
2930 *
2931 * @returns true/false indicating removal of all relevant PTEs
2932 * @param pVM The VM handle.
2933 * @param pPhysPage The guest page in question.
2934 * @param fFlushPTEs Flush PTEs or allow them to be updated (e.g. in case of an RW bit change)
2935 * @param iShw The shadow page table.
2936 * @param cRefs The number of references made in that PT.
2937 */
2938static bool pgmPoolTrackFlushGCPhysPTInt(PVM pVM, PCPGMPAGE pPhysPage, bool fFlushPTEs, uint16_t iShw, uint16_t cRefs)
2939{
2940 LogFlow(("pgmPoolTrackFlushGCPhysPT: pPhysPage=%RHp iShw=%d cRefs=%d\n", PGM_PAGE_GET_HCPHYS(pPhysPage), iShw, cRefs));
2941 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2942 bool bRet = false;
2943
2944 /*
2945 * Assert sanity.
2946 */
2947 Assert(cRefs == 1);
2948 AssertFatalMsg(iShw < pPool->cCurPages && iShw != NIL_PGMPOOL_IDX, ("iShw=%d\n", iShw));
2949 PPGMPOOLPAGE pPage = &pPool->aPages[iShw];
2950
2951 /*
2952 * Then, clear the actual mappings to the page in the shadow PT.
2953 */
2954 switch (pPage->enmKind)
2955 {
2956 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2957 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2958 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2959 {
2960 const uint32_t u32 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P;
2961 PX86PT pPT = (PX86PT)PGMPOOL_PAGE_2_PTR(pVM, pPage);
2962 uint32_t u32AndMask, u32OrMask;
2963
2964 u32AndMask = 0;
2965 u32OrMask = 0;
2966
2967 if (!fFlushPTEs)
2968 {
2969 switch (PGM_PAGE_GET_HNDL_PHYS_STATE(pPhysPage))
2970 {
2971 case PGM_PAGE_HNDL_PHYS_STATE_NONE: /** No handler installed. */
2972 case PGM_PAGE_HNDL_PHYS_STATE_DISABLED: /** Monitoring is temporarily disabled. */
2973 u32OrMask = X86_PTE_RW;
2974 u32AndMask = UINT32_MAX;
2975 bRet = true;
2976 STAM_COUNTER_INC(&pPool->StatTrackFlushEntryKeep);
2977 break;
2978
2979 case PGM_PAGE_HNDL_PHYS_STATE_WRITE: /** Write access is monitored. */
2980 u32OrMask = 0;
2981 u32AndMask = ~X86_PTE_RW;
2982 bRet = true;
2983 STAM_COUNTER_INC(&pPool->StatTrackFlushEntryKeep);
2984 break;
2985 default:
2986 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
2987 break;
2988 }
2989 }
2990 else
2991 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
2992
2993 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
2994 if ((pPT->a[i].u & (X86_PTE_PG_MASK | X86_PTE_P)) == u32)
2995 {
2996 X86PTE Pte;
2997
2998 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pte=%RX32 cRefs=%#x\n", i, pPT->a[i], cRefs));
2999 Pte.u = (pPT->a[i].u & u32AndMask) | u32OrMask;
3000 if (Pte.u & PGM_PTFLAGS_TRACK_DIRTY)
3001 Pte.n.u1Write = 0; /* need to disallow writes when dirty bit tracking is still active. */
3002
3003 ASMAtomicWriteSize(&pPT->a[i].u, Pte.u);
3004 cRefs--;
3005 if (!cRefs)
3006 return bRet;
3007 }
3008#ifdef LOG_ENABLED
3009 Log(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent));
3010 for (unsigned i = 0; i < RT_ELEMENTS(pPT->a); i++)
3011 if ((pPT->a[i].u & (X86_PTE_PG_MASK | X86_PTE_P)) == u32)
3012 {
3013 Log(("i=%d cRefs=%d\n", i, cRefs--));
3014 }
3015#endif
3016 AssertFatalMsgFailed(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent));
3017 break;
3018 }
3019
3020 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
3021 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
3022 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
3023 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
3024 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
3025 {
3026 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P;
3027 PX86PTPAE pPT = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3028 uint64_t u64AndMask, u64OrMask;
3029
3030 u64OrMask = 0;
3031 u64AndMask = 0;
3032 if (!fFlushPTEs)
3033 {
3034 switch (PGM_PAGE_GET_HNDL_PHYS_STATE(pPhysPage))
3035 {
3036 case PGM_PAGE_HNDL_PHYS_STATE_NONE: /** No handler installed. */
3037 case PGM_PAGE_HNDL_PHYS_STATE_DISABLED: /** Monitoring is temporarily disabled. */
3038 u64OrMask = X86_PTE_RW;
3039 u64AndMask = UINT64_MAX;
3040 bRet = true;
3041 STAM_COUNTER_INC(&pPool->StatTrackFlushEntryKeep);
3042 break;
3043
3044 case PGM_PAGE_HNDL_PHYS_STATE_WRITE: /** Write access is monitored. */
3045 u64OrMask = 0;
3046 u64AndMask = ~((uint64_t)X86_PTE_RW);
3047 bRet = true;
3048 STAM_COUNTER_INC(&pPool->StatTrackFlushEntryKeep);
3049 break;
3050
3051 default:
3052 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3053 break;
3054 }
3055 }
3056 else
3057 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3058
3059 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
3060 if ((pPT->a[i].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P)) == u64)
3061 {
3062 X86PTEPAE Pte;
3063
3064 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pte=%RX64 cRefs=%#x\n", i, pPT->a[i], cRefs));
3065 Pte.u = (pPT->a[i].u & u64AndMask) | u64OrMask;
3066 if (Pte.u & PGM_PTFLAGS_TRACK_DIRTY)
3067 Pte.n.u1Write = 0; /* need to disallow writes when dirty bit tracking is still active. */
3068
3069 ASMAtomicWriteSize(&pPT->a[i].u, Pte.u);
3070 cRefs--;
3071 if (!cRefs)
3072 return bRet;
3073 }
3074#ifdef LOG_ENABLED
3075 Log(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent));
3076 for (unsigned i = 0; i < RT_ELEMENTS(pPT->a); i++)
3077 if ((pPT->a[i].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P)) == u64)
3078 {
3079 Log(("i=%d cRefs=%d\n", i, cRefs--));
3080 }
3081#endif
3082 AssertFatalMsgFailed(("cRefs=%d iFirstPresent=%d cPresent=%d u64=%RX64\n", cRefs, pPage->iFirstPresent, pPage->cPresent, u64));
3083 break;
3084 }
3085
3086 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
3087 {
3088 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P;
3089 PEPTPT pPT = (PEPTPT)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3090 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
3091 if ((pPT->a[i].u & (EPT_PTE_PG_MASK | X86_PTE_P)) == u64)
3092 {
3093 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pte=%RX64 cRefs=%#x\n", i, pPT->a[i], cRefs));
3094 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3095 pPT->a[i].u = 0;
3096 cRefs--;
3097 if (!cRefs)
3098 return bRet;
3099 }
3100#ifdef LOG_ENABLED
3101 Log(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent));
3102 for (unsigned i = 0; i < RT_ELEMENTS(pPT->a); i++)
3103 if ((pPT->a[i].u & (EPT_PTE_PG_MASK | X86_PTE_P)) == u64)
3104 {
3105 Log(("i=%d cRefs=%d\n", i, cRefs--));
3106 }
3107#endif
3108 AssertFatalMsgFailed(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent));
3109 break;
3110 }
3111
3112#ifdef PGM_WITH_LARGE_PAGES
3113 /* Large page case only. */
3114 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
3115 {
3116 Assert(HWACCMIsNestedPagingActive(pVM));
3117 Assert(cRefs == 1);
3118
3119 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PDE4M_P | X86_PDE4M_PS;
3120 PEPTPD pPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3121 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPD->a); i++)
3122 if ((pPD->a[i].u & (EPT_PDE2M_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64)
3123 {
3124 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pde=%RX64 cRefs=%#x\n", i, pPD->a[i], cRefs));
3125 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3126 pPD->a[i].u = 0;
3127 cRefs--;
3128 if (!cRefs)
3129 return bRet;
3130 }
3131# ifdef LOG_ENABLED
3132 Log(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent));
3133 for (unsigned i = 0; i < RT_ELEMENTS(pPD->a); i++)
3134 if ((pPD->a[i].u & (EPT_PDE2M_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64)
3135 {
3136 Log(("i=%d cRefs=%d\n", i, cRefs--));
3137 }
3138# endif
3139 AssertFatalMsgFailed(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent));
3140 break;
3141 }
3142
3143 /* AMD-V nested paging - @todo merge with EPT as we only check the parts that are identical. */
3144 case PGMPOOLKIND_PAE_PD_PHYS:
3145 {
3146 Assert(HWACCMIsNestedPagingActive(pVM));
3147 Assert(cRefs == 1);
3148
3149 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PDE4M_P | X86_PDE4M_PS;
3150 PX86PD pPD = (PX86PD)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3151 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPD->a); i++)
3152 if ((pPD->a[i].u & (X86_PDE2M_PAE_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64)
3153 {
3154 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pde=%RX64 cRefs=%#x\n", i, pPD->a[i], cRefs));
3155 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3156 pPD->a[i].u = 0;
3157 cRefs--;
3158 if (!cRefs)
3159 return bRet;
3160 }
3161# ifdef LOG_ENABLED
3162 Log(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent));
3163 for (unsigned i = 0; i < RT_ELEMENTS(pPD->a); i++)
3164 if ((pPD->a[i].u & (X86_PDE2M_PAE_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64)
3165 {
3166 Log(("i=%d cRefs=%d\n", i, cRefs--));
3167 }
3168# endif
3169 AssertFatalMsgFailed(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent));
3170 break;
3171 }
3172#endif /* PGM_WITH_LARGE_PAGES */
3173
3174 default:
3175 AssertFatalMsgFailed(("enmKind=%d iShw=%d\n", pPage->enmKind, iShw));
3176 }
3177 return bRet;
3178}
3179
3180
3181/**
3182 * Scans one shadow page table for mappings of a physical page.
3183 *
3184 * @param pVM The VM handle.
3185 * @param pPhysPage The guest page in question.
3186 * @param fFlushPTEs Flush PTEs or allow them to be updated (e.g. in case of an RW bit change)
3187 * @param iShw The shadow page table.
3188 * @param cRefs The number of references made in that PT.
3189 */
3190static void pgmPoolTrackFlushGCPhysPT(PVM pVM, PPGMPAGE pPhysPage, bool fFlushPTEs, uint16_t iShw, uint16_t cRefs)
3191{
3192 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool);
3193
3194 Log2(("pgmPoolTrackFlushGCPhysPT: pPhysPage=%RHp iShw=%d cRefs=%d\n", PGM_PAGE_GET_HCPHYS(pPhysPage), iShw, cRefs));
3195 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPT, f);
3196 bool fKeptPTEs = pgmPoolTrackFlushGCPhysPTInt(pVM, pPhysPage, fFlushPTEs, iShw, cRefs);
3197 if (!fKeptPTEs)
3198 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
3199 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPT, f);
3200}
3201
3202
3203/**
3204 * Flushes a list of shadow page tables mapping the same physical page.
3205 *
3206 * @param pVM The VM handle.
3207 * @param pPhysPage The guest page in question.
3208 * @param fFlushPTEs Flush PTEs or allow them to be updated (e.g. in case of an RW bit change)
3209 * @param iPhysExt The physical cross reference extent list to flush.
3210 */
3211static void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, bool fFlushPTEs, uint16_t iPhysExt)
3212{
3213 Assert(PGMIsLockOwner(pVM));
3214 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3215 bool fKeepList = false;
3216
3217 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPTs, f);
3218 Log2(("pgmPoolTrackFlushGCPhysPTs: pPhysPage=%RHp iPhysExt\n", PGM_PAGE_GET_HCPHYS(pPhysPage), iPhysExt));
3219
3220 const uint16_t iPhysExtStart = iPhysExt;
3221 PPGMPOOLPHYSEXT pPhysExt;
3222 do
3223 {
3224 Assert(iPhysExt < pPool->cMaxPhysExts);
3225 pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
3226 for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++)
3227 {
3228 if (pPhysExt->aidx[i] != NIL_PGMPOOL_IDX)
3229 {
3230 bool fKeptPTEs = pgmPoolTrackFlushGCPhysPTInt(pVM, pPhysPage, fFlushPTEs, pPhysExt->aidx[i], 1);
3231 if (!fKeptPTEs)
3232 pPhysExt->aidx[i] = NIL_PGMPOOL_IDX;
3233 else
3234 fKeepList = true;
3235 }
3236 }
3237 /* next */
3238 iPhysExt = pPhysExt->iNext;
3239 } while (iPhysExt != NIL_PGMPOOL_PHYSEXT_INDEX);
3240
3241 if (!fKeepList)
3242 {
3243 /* insert the list into the free list and clear the ram range entry. */
3244 pPhysExt->iNext = pPool->iPhysExtFreeHead;
3245 pPool->iPhysExtFreeHead = iPhysExtStart;
3246 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
3247 }
3248
3249 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTs, f);
3250}
3251
3252
3253/**
3254 * Flushes all shadow page table mappings of the given guest page.
3255 *
3256 * This is typically called when the host page backing the guest one has been
3257 * replaced or when the page protection was changed due to an access handler.
3258 *
3259 * @returns VBox status code.
3260 * @retval VINF_SUCCESS if all references has been successfully cleared.
3261 * @retval VINF_PGM_SYNC_CR3 if we're better off with a CR3 sync and a page
3262 * pool cleaning. FF and sync flags are set.
3263 *
3264 * @param pVM The VM handle.
3265 * @param GCPhysPage GC physical address of the page in question
3266 * @param pPhysPage The guest page in question.
3267 * @param fFlushPTEs Flush PTEs or allow them to be updated (e.g. in case of an RW bit change)
3268 * @param pfFlushTLBs This is set to @a true if the shadow TLBs should be
3269 * flushed, it is NOT touched if this isn't necessary.
3270 * The caller MUST initialized this to @a false.
3271 */
3272int pgmPoolTrackUpdateGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs)
3273{
3274 PVMCPU pVCpu = VMMGetCpu(pVM);
3275 pgmLock(pVM);
3276 int rc = VINF_SUCCESS;
3277
3278#ifdef PGM_WITH_LARGE_PAGES
3279 /* Is this page part of a large page? */
3280 if (PGM_PAGE_GET_PDE_TYPE(pPhysPage) == PGM_PAGE_PDE_TYPE_PDE)
3281 {
3282 PPGMPAGE pPhysBase;
3283 RTGCPHYS GCPhysBase = GCPhysPage & X86_PDE2M_PAE_PG_MASK;
3284
3285 GCPhysPage &= X86_PDE_PAE_PG_MASK;
3286
3287 /* Fetch the large page base. */
3288 if (GCPhysBase != GCPhysPage)
3289 {
3290 pPhysBase = pgmPhysGetPage(&pVM->pgm.s, GCPhysBase);
3291 AssertFatal(pPhysBase);
3292 }
3293 else
3294 pPhysBase = pPhysPage;
3295
3296 Log(("pgmPoolTrackUpdateGCPhys: update large page PDE for %RGp (%RGp)\n", GCPhysBase, GCPhysPage));
3297
3298 if (PGM_PAGE_GET_PDE_TYPE(pPhysBase) == PGM_PAGE_PDE_TYPE_PDE)
3299 {
3300 /* Mark the large page as disabled as we need to break it up to change a single page in the 2 MB range. */
3301 PGM_PAGE_SET_PDE_TYPE(pPhysBase, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
3302
3303 /* Update the base as that *only* that one has a reference and there's only one PDE to clear. */
3304 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysBase, pPhysBase, fFlushPTEs, pfFlushTLBs);
3305
3306 *pfFlushTLBs = true;
3307 pgmUnlock(pVM);
3308 return rc;
3309 }
3310 }
3311#else
3312 NOREF(GCPhysPage);
3313#endif /* PGM_WITH_LARGE_PAGES */
3314
3315 const uint16_t u16 = PGM_PAGE_GET_TRACKING(pPhysPage);
3316 if (u16)
3317 {
3318 /*
3319 * The zero page is currently screwing up the tracking and we'll
3320 * have to flush the whole shebang. Unless VBOX_WITH_NEW_LAZY_PAGE_ALLOC
3321 * is defined, zero pages won't normally be mapped. Some kind of solution
3322 * will be needed for this problem of course, but it will have to wait...
3323 */
3324 if (PGM_PAGE_IS_ZERO(pPhysPage))
3325 rc = VINF_PGM_GCPHYS_ALIASED;
3326 else
3327 {
3328# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3329 /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow and
3330 pgmPoolTrackFlushGCPhysPTs will/may kill the pool otherwise. */
3331 uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
3332# endif
3333
3334 if (PGMPOOL_TD_GET_CREFS(u16) != PGMPOOL_TD_CREFS_PHYSEXT)
3335 pgmPoolTrackFlushGCPhysPT(pVM,
3336 pPhysPage,
3337 fFlushPTEs,
3338 PGMPOOL_TD_GET_IDX(u16),
3339 PGMPOOL_TD_GET_CREFS(u16));
3340 else if (u16 != PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED))
3341 pgmPoolTrackFlushGCPhysPTs(pVM, pPhysPage, fFlushPTEs, PGMPOOL_TD_GET_IDX(u16));
3342 else
3343 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPhysPage);
3344 *pfFlushTLBs = true;
3345
3346# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3347 PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
3348# endif
3349 }
3350 }
3351
3352 if (rc == VINF_PGM_GCPHYS_ALIASED)
3353 {
3354 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3355 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3356 rc = VINF_PGM_SYNC_CR3;
3357 }
3358 pgmUnlock(pVM);
3359 return rc;
3360}
3361
3362
3363/**
3364 * Scans all shadow page tables for mappings of a physical page.
3365 *
3366 * This may be slow, but it's most likely more efficient than cleaning
3367 * out the entire page pool / cache.
3368 *
3369 * @returns VBox status code.
3370 * @retval VINF_SUCCESS if all references has been successfully cleared.
3371 * @retval VINF_PGM_GCPHYS_ALIASED if we're better off with a CR3 sync and
3372 * a page pool cleaning.
3373 *
3374 * @param pVM The VM handle.
3375 * @param pPhysPage The guest page in question.
3376 */
3377int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage)
3378{
3379 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3380 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPTsSlow, s);
3381 LogFlow(("pgmPoolTrackFlushGCPhysPTsSlow: cUsedPages=%d cPresent=%d pPhysPage=%R[pgmpage]\n",
3382 pPool->cUsedPages, pPool->cPresent, pPhysPage));
3383
3384#if 1
3385 /*
3386 * There is a limit to what makes sense.
3387 */
3388 if (pPool->cPresent > 1024)
3389 {
3390 LogFlow(("pgmPoolTrackFlushGCPhysPTsSlow: giving up... (cPresent=%d)\n", pPool->cPresent));
3391 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTsSlow, s);
3392 return VINF_PGM_GCPHYS_ALIASED;
3393 }
3394#endif
3395
3396 /*
3397 * Iterate all the pages until we've encountered all that in use.
3398 * This is simple but not quite optimal solution.
3399 */
3400 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P;
3401 const uint32_t u32 = u64;
3402 unsigned cLeft = pPool->cUsedPages;
3403 unsigned iPage = pPool->cCurPages;
3404 while (--iPage >= PGMPOOL_IDX_FIRST)
3405 {
3406 PPGMPOOLPAGE pPage = &pPool->aPages[iPage];
3407 if (pPage->GCPhys != NIL_RTGCPHYS)
3408 {
3409 switch (pPage->enmKind)
3410 {
3411 /*
3412 * We only care about shadow page tables.
3413 */
3414 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
3415 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
3416 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
3417 {
3418 unsigned cPresent = pPage->cPresent;
3419 PX86PT pPT = (PX86PT)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3420 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
3421 if (pPT->a[i].n.u1Present)
3422 {
3423 if ((pPT->a[i].u & (X86_PTE_PG_MASK | X86_PTE_P)) == u32)
3424 {
3425 //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX32\n", iPage, i, pPT->a[i]));
3426 pPT->a[i].u = 0;
3427 }
3428 if (!--cPresent)
3429 break;
3430 }
3431 break;
3432 }
3433
3434 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
3435 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
3436 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
3437 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
3438 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
3439 {
3440 unsigned cPresent = pPage->cPresent;
3441 PX86PTPAE pPT = (PX86PTPAE)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3442 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
3443 if (pPT->a[i].n.u1Present)
3444 {
3445 if ((pPT->a[i].u & (X86_PTE_PAE_PG_MASK | X86_PTE_P)) == u64)
3446 {
3447 //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX64\n", iPage, i, pPT->a[i]));
3448 pPT->a[i].u = 0;
3449 }
3450 if (!--cPresent)
3451 break;
3452 }
3453 break;
3454 }
3455 }
3456 if (!--cLeft)
3457 break;
3458 }
3459 }
3460
3461 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
3462 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTsSlow, s);
3463 return VINF_SUCCESS;
3464}
3465
3466
3467/**
3468 * Clears the user entry in a user table.
3469 *
3470 * This is used to remove all references to a page when flushing it.
3471 */
3472static void pgmPoolTrackClearPageUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PCPGMPOOLUSER pUser)
3473{
3474 Assert(pUser->iUser != NIL_PGMPOOL_IDX);
3475 Assert(pUser->iUser < pPool->cCurPages);
3476 uint32_t iUserTable = pUser->iUserTable;
3477
3478 /*
3479 * Map the user page.
3480 */
3481 PPGMPOOLPAGE pUserPage = &pPool->aPages[pUser->iUser];
3482 union
3483 {
3484 uint64_t *pau64;
3485 uint32_t *pau32;
3486 } u;
3487 u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pUserPage);
3488
3489 LogFlow(("pgmPoolTrackClearPageUser: clear %x in %s (%RGp) (flushing %s)\n", iUserTable, pgmPoolPoolKindToStr(pUserPage->enmKind), pUserPage->Core.Key, pgmPoolPoolKindToStr(pPage->enmKind)));
3490
3491 /* Safety precaution in case we change the paging for other modes too in the future. */
3492 Assert(!pgmPoolIsPageLocked(&pPool->CTX_SUFF(pVM)->pgm.s, pPage));
3493
3494#ifdef VBOX_STRICT
3495 /*
3496 * Some sanity checks.
3497 */
3498 switch (pUserPage->enmKind)
3499 {
3500 case PGMPOOLKIND_32BIT_PD:
3501 case PGMPOOLKIND_32BIT_PD_PHYS:
3502 Assert(iUserTable < X86_PG_ENTRIES);
3503 break;
3504 case PGMPOOLKIND_PAE_PDPT:
3505 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
3506 case PGMPOOLKIND_PAE_PDPT_PHYS:
3507 Assert(iUserTable < 4);
3508 Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT));
3509 break;
3510 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
3511 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
3512 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
3513 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
3514 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
3515 case PGMPOOLKIND_PAE_PD_PHYS:
3516 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3517 break;
3518 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
3519 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3520 Assert(!(u.pau64[iUserTable] & PGM_PDFLAGS_MAPPING));
3521 break;
3522 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
3523 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3524 Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT));
3525 break;
3526 case PGMPOOLKIND_64BIT_PML4:
3527 Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT));
3528 /* GCPhys >> PAGE_SHIFT is the index here */
3529 break;
3530 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
3531 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
3532 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3533 break;
3534
3535 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
3536 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
3537 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3538 break;
3539
3540 case PGMPOOLKIND_ROOT_NESTED:
3541 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3542 break;
3543
3544 default:
3545 AssertMsgFailed(("enmKind=%d\n", pUserPage->enmKind));
3546 break;
3547 }
3548#endif /* VBOX_STRICT */
3549
3550 /*
3551 * Clear the entry in the user page.
3552 */
3553 switch (pUserPage->enmKind)
3554 {
3555 /* 32-bit entries */
3556 case PGMPOOLKIND_32BIT_PD:
3557 case PGMPOOLKIND_32BIT_PD_PHYS:
3558 ASMAtomicWriteSize(&u.pau32[iUserTable], 0);
3559 break;
3560
3561 /* 64-bit entries */
3562 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
3563 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
3564 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
3565 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
3566 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
3567#if defined(IN_RC)
3568 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
3569 * non-present PDPT will continue to cause page faults.
3570 */
3571 ASMReloadCR3();
3572#endif
3573 /* no break */
3574 case PGMPOOLKIND_PAE_PD_PHYS:
3575 case PGMPOOLKIND_PAE_PDPT_PHYS:
3576 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
3577 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
3578 case PGMPOOLKIND_64BIT_PML4:
3579 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
3580 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
3581 case PGMPOOLKIND_PAE_PDPT:
3582 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
3583 case PGMPOOLKIND_ROOT_NESTED:
3584 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
3585 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
3586 ASMAtomicWriteSize(&u.pau64[iUserTable], 0);
3587 break;
3588
3589 default:
3590 AssertFatalMsgFailed(("enmKind=%d iUser=%#x iUserTable=%#x\n", pUserPage->enmKind, pUser->iUser, pUser->iUserTable));
3591 }
3592}
3593
3594
3595/**
3596 * Clears all users of a page.
3597 */
3598static void pgmPoolTrackClearPageUsers(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
3599{
3600 /*
3601 * Free all the user records.
3602 */
3603 LogFlow(("pgmPoolTrackClearPageUsers %RGp\n", pPage->GCPhys));
3604
3605 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
3606 uint16_t i = pPage->iUserHead;
3607 while (i != NIL_PGMPOOL_USER_INDEX)
3608 {
3609 /* Clear enter in user table. */
3610 pgmPoolTrackClearPageUser(pPool, pPage, &paUsers[i]);
3611
3612 /* Free it. */
3613 const uint16_t iNext = paUsers[i].iNext;
3614 paUsers[i].iUser = NIL_PGMPOOL_IDX;
3615 paUsers[i].iNext = pPool->iUserFreeHead;
3616 pPool->iUserFreeHead = i;
3617
3618 /* Next. */
3619 i = iNext;
3620 }
3621 pPage->iUserHead = NIL_PGMPOOL_USER_INDEX;
3622}
3623
3624
3625/**
3626 * Allocates a new physical cross reference extent.
3627 *
3628 * @returns Pointer to the allocated extent on success. NULL if we're out of them.
3629 * @param pVM The VM handle.
3630 * @param piPhysExt Where to store the phys ext index.
3631 */
3632PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt)
3633{
3634 Assert(PGMIsLockOwner(pVM));
3635 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3636 uint16_t iPhysExt = pPool->iPhysExtFreeHead;
3637 if (iPhysExt == NIL_PGMPOOL_PHYSEXT_INDEX)
3638 {
3639 STAM_COUNTER_INC(&pPool->StamTrackPhysExtAllocFailures);
3640 return NULL;
3641 }
3642 PPGMPOOLPHYSEXT pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
3643 pPool->iPhysExtFreeHead = pPhysExt->iNext;
3644 pPhysExt->iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
3645 *piPhysExt = iPhysExt;
3646 return pPhysExt;
3647}
3648
3649
3650/**
3651 * Frees a physical cross reference extent.
3652 *
3653 * @param pVM The VM handle.
3654 * @param iPhysExt The extent to free.
3655 */
3656void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt)
3657{
3658 Assert(PGMIsLockOwner(pVM));
3659 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3660 Assert(iPhysExt < pPool->cMaxPhysExts);
3661 PPGMPOOLPHYSEXT pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
3662 for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++)
3663 pPhysExt->aidx[i] = NIL_PGMPOOL_IDX;
3664 pPhysExt->iNext = pPool->iPhysExtFreeHead;
3665 pPool->iPhysExtFreeHead = iPhysExt;
3666}
3667
3668
3669/**
3670 * Frees a physical cross reference extent.
3671 *
3672 * @param pVM The VM handle.
3673 * @param iPhysExt The extent to free.
3674 */
3675void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt)
3676{
3677 Assert(PGMIsLockOwner(pVM));
3678 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3679
3680 const uint16_t iPhysExtStart = iPhysExt;
3681 PPGMPOOLPHYSEXT pPhysExt;
3682 do
3683 {
3684 Assert(iPhysExt < pPool->cMaxPhysExts);
3685 pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
3686 for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++)
3687 pPhysExt->aidx[i] = NIL_PGMPOOL_IDX;
3688
3689 /* next */
3690 iPhysExt = pPhysExt->iNext;
3691 } while (iPhysExt != NIL_PGMPOOL_PHYSEXT_INDEX);
3692
3693 pPhysExt->iNext = pPool->iPhysExtFreeHead;
3694 pPool->iPhysExtFreeHead = iPhysExtStart;
3695}
3696
3697
3698/**
3699 * Insert a reference into a list of physical cross reference extents.
3700 *
3701 * @returns The new tracking data for PGMPAGE.
3702 *
3703 * @param pVM The VM handle.
3704 * @param iPhysExt The physical extent index of the list head.
3705 * @param iShwPT The shadow page table index.
3706 *
3707 */
3708static uint16_t pgmPoolTrackPhysExtInsert(PVM pVM, uint16_t iPhysExt, uint16_t iShwPT)
3709{
3710 Assert(PGMIsLockOwner(pVM));
3711 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3712 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
3713
3714 /* special common case. */
3715 if (paPhysExts[iPhysExt].aidx[2] == NIL_PGMPOOL_IDX)
3716 {
3717 paPhysExts[iPhysExt].aidx[2] = iShwPT;
3718 STAM_COUNTER_INC(&pVM->pgm.s.StatTrackAliasedMany);
3719 LogFlow(("pgmPoolTrackPhysExtInsert: %d:{,,%d}\n", iPhysExt, iShwPT));
3720 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExt);
3721 }
3722
3723 /* general treatment. */
3724 const uint16_t iPhysExtStart = iPhysExt;
3725 unsigned cMax = 15;
3726 for (;;)
3727 {
3728 Assert(iPhysExt < pPool->cMaxPhysExts);
3729 for (unsigned i = 0; i < RT_ELEMENTS(paPhysExts[iPhysExt].aidx); i++)
3730 if (paPhysExts[iPhysExt].aidx[i] == NIL_PGMPOOL_IDX)
3731 {
3732 paPhysExts[iPhysExt].aidx[i] = iShwPT;
3733 STAM_COUNTER_INC(&pVM->pgm.s.StatTrackAliasedMany);
3734 LogFlow(("pgmPoolTrackPhysExtInsert: %d:{%d} i=%d cMax=%d\n", iPhysExt, iShwPT, i, cMax));
3735 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExtStart);
3736 }
3737 if (!--cMax)
3738 {
3739 STAM_COUNTER_INC(&pVM->pgm.s.StatTrackOverflows);
3740 pgmPoolTrackPhysExtFreeList(pVM, iPhysExtStart);
3741 LogFlow(("pgmPoolTrackPhysExtInsert: overflow (1) iShwPT=%d\n", iShwPT));
3742 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED);
3743 }
3744 }
3745
3746 /* add another extent to the list. */
3747 PPGMPOOLPHYSEXT pNew = pgmPoolTrackPhysExtAlloc(pVM, &iPhysExt);
3748 if (!pNew)
3749 {
3750 STAM_COUNTER_INC(&pVM->pgm.s.StatTrackOverflows);
3751 pgmPoolTrackPhysExtFreeList(pVM, iPhysExtStart);
3752 LogFlow(("pgmPoolTrackPhysExtInsert: pgmPoolTrackPhysExtAlloc failed iShwPT=%d\n", iShwPT));
3753 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED);
3754 }
3755 pNew->iNext = iPhysExtStart;
3756 pNew->aidx[0] = iShwPT;
3757 LogFlow(("pgmPoolTrackPhysExtInsert: added new extent %d:{%d}->%d\n", iPhysExt, iShwPT, iPhysExtStart));
3758 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExt);
3759}
3760
3761
3762/**
3763 * Add a reference to guest physical page where extents are in use.
3764 *
3765 * @returns The new tracking data for PGMPAGE.
3766 *
3767 * @param pVM The VM handle.
3768 * @param u16 The ram range flags (top 16-bits).
3769 * @param iShwPT The shadow page table index.
3770 */
3771uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT)
3772{
3773 pgmLock(pVM);
3774 if (PGMPOOL_TD_GET_CREFS(u16) != PGMPOOL_TD_CREFS_PHYSEXT)
3775 {
3776 /*
3777 * Convert to extent list.
3778 */
3779 Assert(PGMPOOL_TD_GET_CREFS(u16) == 1);
3780 uint16_t iPhysExt;
3781 PPGMPOOLPHYSEXT pPhysExt = pgmPoolTrackPhysExtAlloc(pVM, &iPhysExt);
3782 if (pPhysExt)
3783 {
3784 LogFlow(("pgmPoolTrackPhysExtAddref: new extent: %d:{%d, %d}\n", iPhysExt, PGMPOOL_TD_GET_IDX(u16), iShwPT));
3785 STAM_COUNTER_INC(&pVM->pgm.s.StatTrackAliased);
3786 pPhysExt->aidx[0] = PGMPOOL_TD_GET_IDX(u16);
3787 pPhysExt->aidx[1] = iShwPT;
3788 u16 = PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExt);
3789 }
3790 else
3791 u16 = PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED);
3792 }
3793 else if (u16 != PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED))
3794 {
3795 /*
3796 * Insert into the extent list.
3797 */
3798 u16 = pgmPoolTrackPhysExtInsert(pVM, PGMPOOL_TD_GET_IDX(u16), iShwPT);
3799 }
3800 else
3801 STAM_COUNTER_INC(&pVM->pgm.s.StatTrackAliasedLots);
3802 pgmUnlock(pVM);
3803 return u16;
3804}
3805
3806
3807/**
3808 * Clear references to guest physical memory.
3809 *
3810 * @param pPool The pool.
3811 * @param pPage The page.
3812 * @param pPhysPage Pointer to the aPages entry in the ram range.
3813 */
3814void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMPAGE pPhysPage)
3815{
3816 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
3817 AssertFatalMsg(cRefs == PGMPOOL_TD_CREFS_PHYSEXT, ("cRefs=%d pPhysPage=%R[pgmpage] pPage=%p:{.idx=%d}\n", cRefs, pPhysPage, pPage, pPage->idx));
3818
3819 uint16_t iPhysExt = PGM_PAGE_GET_TD_IDX(pPhysPage);
3820 if (iPhysExt != PGMPOOL_TD_IDX_OVERFLOWED)
3821 {
3822 PVM pVM = pPool->CTX_SUFF(pVM);
3823 pgmLock(pVM);
3824
3825 uint16_t iPhysExtPrev = NIL_PGMPOOL_PHYSEXT_INDEX;
3826 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
3827 do
3828 {
3829 Assert(iPhysExt < pPool->cMaxPhysExts);
3830
3831 /*
3832 * Look for the shadow page and check if it's all freed.
3833 */
3834 for (unsigned i = 0; i < RT_ELEMENTS(paPhysExts[iPhysExt].aidx); i++)
3835 {
3836 if (paPhysExts[iPhysExt].aidx[i] == pPage->idx)
3837 {
3838 paPhysExts[iPhysExt].aidx[i] = NIL_PGMPOOL_IDX;
3839
3840 for (i = 0; i < RT_ELEMENTS(paPhysExts[iPhysExt].aidx); i++)
3841 if (paPhysExts[iPhysExt].aidx[i] != NIL_PGMPOOL_IDX)
3842 {
3843 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d\n", pPhysPage, pPage->idx));
3844 pgmUnlock(pVM);
3845 return;
3846 }
3847
3848 /* we can free the node. */
3849 const uint16_t iPhysExtNext = paPhysExts[iPhysExt].iNext;
3850 if ( iPhysExtPrev == NIL_PGMPOOL_PHYSEXT_INDEX
3851 && iPhysExtNext == NIL_PGMPOOL_PHYSEXT_INDEX)
3852 {
3853 /* lonely node */
3854 pgmPoolTrackPhysExtFree(pVM, iPhysExt);
3855 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d lonely\n", pPhysPage, pPage->idx));
3856 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
3857 }
3858 else if (iPhysExtPrev == NIL_PGMPOOL_PHYSEXT_INDEX)
3859 {
3860 /* head */
3861 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d head\n", pPhysPage, pPage->idx));
3862 PGM_PAGE_SET_TRACKING(pPhysPage, PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExtNext));
3863 pgmPoolTrackPhysExtFree(pVM, iPhysExt);
3864 }
3865 else
3866 {
3867 /* in list */
3868 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d\n", pPhysPage, pPage->idx));
3869 paPhysExts[iPhysExtPrev].iNext = iPhysExtNext;
3870 pgmPoolTrackPhysExtFree(pVM, iPhysExt);
3871 }
3872 iPhysExt = iPhysExtNext;
3873 pgmUnlock(pVM);
3874 return;
3875 }
3876 }
3877
3878 /* next */
3879 iPhysExtPrev = iPhysExt;
3880 iPhysExt = paPhysExts[iPhysExt].iNext;
3881 } while (iPhysExt != NIL_PGMPOOL_PHYSEXT_INDEX);
3882
3883 pgmUnlock(pVM);
3884 AssertFatalMsgFailed(("not-found! cRefs=%d pPhysPage=%R[pgmpage] pPage=%p:{.idx=%d}\n", cRefs, pPhysPage, pPage, pPage->idx));
3885 }
3886 else /* nothing to do */
3887 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage]\n", pPhysPage));
3888}
3889
3890
3891/**
3892 * Clear references to guest physical memory.
3893 *
3894 * This is the same as pgmPoolTracDerefGCPhys except that the guest physical address
3895 * is assumed to be correct, so the linear search can be skipped and we can assert
3896 * at an earlier point.
3897 *
3898 * @param pPool The pool.
3899 * @param pPage The page.
3900 * @param HCPhys The host physical address corresponding to the guest page.
3901 * @param GCPhys The guest physical address corresponding to HCPhys.
3902 */
3903static void pgmPoolTracDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhys)
3904{
3905 /*
3906 * Walk range list.
3907 */
3908 PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);
3909 while (pRam)
3910 {
3911 RTGCPHYS off = GCPhys - pRam->GCPhys;
3912 if (off < pRam->cb)
3913 {
3914 /* does it match? */
3915 const unsigned iPage = off >> PAGE_SHIFT;
3916 Assert(PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]));
3917#ifdef LOG_ENABLED
3918RTHCPHYS HCPhysPage = PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]);
3919Log2(("pgmPoolTracDerefGCPhys %RHp vs %RHp\n", HCPhysPage, HCPhys));
3920#endif
3921 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
3922 {
3923 pgmTrackDerefGCPhys(pPool, pPage, &pRam->aPages[iPage]);
3924 return;
3925 }
3926 break;
3927 }
3928 pRam = pRam->CTX_SUFF(pNext);
3929 }
3930 AssertFatalMsgFailed(("HCPhys=%RHp GCPhys=%RGp\n", HCPhys, GCPhys));
3931}
3932
3933
3934/**
3935 * Clear references to guest physical memory.
3936 *
3937 * @param pPool The pool.
3938 * @param pPage The page.
3939 * @param HCPhys The host physical address corresponding to the guest page.
3940 * @param GCPhysHint The guest physical address which may corresponding to HCPhys.
3941 */
3942void pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint)
3943{
3944 Log4(("pgmPoolTracDerefGCPhysHint %RHp %RGp\n", HCPhys, GCPhysHint));
3945
3946 /*
3947 * Walk range list.
3948 */
3949 PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);
3950 while (pRam)
3951 {
3952 RTGCPHYS off = GCPhysHint - pRam->GCPhys;
3953 if (off < pRam->cb)
3954 {
3955 /* does it match? */
3956 const unsigned iPage = off >> PAGE_SHIFT;
3957 Assert(PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]));
3958 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
3959 {
3960 pgmTrackDerefGCPhys(pPool, pPage, &pRam->aPages[iPage]);
3961 return;
3962 }
3963 break;
3964 }
3965 pRam = pRam->CTX_SUFF(pNext);
3966 }
3967
3968 /*
3969 * Damn, the hint didn't work. We'll have to do an expensive linear search.
3970 */
3971 STAM_COUNTER_INC(&pPool->StatTrackLinearRamSearches);
3972 pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);
3973 while (pRam)
3974 {
3975 unsigned iPage = pRam->cb >> PAGE_SHIFT;
3976 while (iPage-- > 0)
3977 {
3978 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
3979 {
3980 Log4(("pgmPoolTracDerefGCPhysHint: Linear HCPhys=%RHp GCPhysHint=%RGp GCPhysReal=%RGp\n",
3981 HCPhys, GCPhysHint, pRam->GCPhys + (iPage << PAGE_SHIFT)));
3982 pgmTrackDerefGCPhys(pPool, pPage, &pRam->aPages[iPage]);
3983 return;
3984 }
3985 }
3986 pRam = pRam->CTX_SUFF(pNext);
3987 }
3988
3989 AssertFatalMsgFailed(("HCPhys=%RHp GCPhysHint=%RGp\n", HCPhys, GCPhysHint));
3990}
3991
3992
3993/**
3994 * Clear references to guest physical memory in a 32-bit / 32-bit page table.
3995 *
3996 * @param pPool The pool.
3997 * @param pPage The page.
3998 * @param pShwPT The shadow page table (mapping of the page).
3999 * @param pGstPT The guest page table.
4000 */
4001DECLINLINE(void) pgmPoolTrackDerefPT32Bit32Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PT pShwPT, PCX86PT pGstPT)
4002{
4003 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
4004 if (pShwPT->a[i].n.u1Present)
4005 {
4006 Log4(("pgmPoolTrackDerefPT32Bit32Bit: i=%d pte=%RX32 hint=%RX32\n",
4007 i, pShwPT->a[i].u & X86_PTE_PG_MASK, pGstPT->a[i].u & X86_PTE_PG_MASK));
4008 pgmPoolTracDerefGCPhysHint(pPool, pPage, pShwPT->a[i].u & X86_PTE_PG_MASK, pGstPT->a[i].u & X86_PTE_PG_MASK);
4009 if (!--pPage->cPresent)
4010 break;
4011 }
4012}
4013
4014
4015/**
4016 * Clear references to guest physical memory in a PAE / 32-bit page table.
4017 *
4018 * @param pPool The pool.
4019 * @param pPage The page.
4020 * @param pShwPT The shadow page table (mapping of the page).
4021 * @param pGstPT The guest page table (just a half one).
4022 */
4023DECLINLINE(void) pgmPoolTrackDerefPTPae32Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT, PCX86PT pGstPT)
4024{
4025 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
4026 if (pShwPT->a[i].n.u1Present)
4027 {
4028 Log4(("pgmPoolTrackDerefPTPae32Bit: i=%d pte=%RX64 hint=%RX32\n",
4029 i, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pGstPT->a[i].u & X86_PTE_PG_MASK));
4030 pgmPoolTracDerefGCPhysHint(pPool, pPage, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pGstPT->a[i].u & X86_PTE_PG_MASK);
4031 if (!--pPage->cPresent)
4032 break;
4033 }
4034}
4035
4036
4037/**
4038 * Clear references to guest physical memory in a PAE / PAE page table.
4039 *
4040 * @param pPool The pool.
4041 * @param pPage The page.
4042 * @param pShwPT The shadow page table (mapping of the page).
4043 * @param pGstPT The guest page table.
4044 */
4045DECLINLINE(void) pgmPoolTrackDerefPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT, PCX86PTPAE pGstPT)
4046{
4047 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
4048 if (pShwPT->a[i].n.u1Present)
4049 {
4050 Log4(("pgmPoolTrackDerefPTPaePae: i=%d pte=%RX32 hint=%RX32\n",
4051 i, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pGstPT->a[i].u & X86_PTE_PAE_PG_MASK));
4052 pgmPoolTracDerefGCPhysHint(pPool, pPage, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, pGstPT->a[i].u & X86_PTE_PAE_PG_MASK);
4053 if (!--pPage->cPresent)
4054 break;
4055 }
4056}
4057
4058
4059/**
4060 * Clear references to guest physical memory in a 32-bit / 4MB page table.
4061 *
4062 * @param pPool The pool.
4063 * @param pPage The page.
4064 * @param pShwPT The shadow page table (mapping of the page).
4065 */
4066DECLINLINE(void) pgmPoolTrackDerefPT32Bit4MB(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PT pShwPT)
4067{
4068 RTGCPHYS GCPhys = pPage->GCPhys + PAGE_SIZE * pPage->iFirstPresent;
4069 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE)
4070 if (pShwPT->a[i].n.u1Present)
4071 {
4072 Log4(("pgmPoolTrackDerefPT32Bit4MB: i=%d pte=%RX32 GCPhys=%RGp\n",
4073 i, pShwPT->a[i].u & X86_PTE_PG_MASK, GCPhys));
4074 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPT->a[i].u & X86_PTE_PG_MASK, GCPhys);
4075 if (!--pPage->cPresent)
4076 break;
4077 }
4078}
4079
4080
4081/**
4082 * Clear references to guest physical memory in a PAE / 2/4MB page table.
4083 *
4084 * @param pPool The pool.
4085 * @param pPage The page.
4086 * @param pShwPT The shadow page table (mapping of the page).
4087 */
4088DECLINLINE(void) pgmPoolTrackDerefPTPaeBig(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT)
4089{
4090 RTGCPHYS GCPhys = pPage->GCPhys + PAGE_SIZE * pPage->iFirstPresent;
4091 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE)
4092 if (pShwPT->a[i].n.u1Present)
4093 {
4094 Log4(("pgmPoolTrackDerefPTPaeBig: i=%d pte=%RX64 hint=%RGp\n",
4095 i, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, GCPhys));
4096 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPT->a[i].u & X86_PTE_PAE_PG_MASK, GCPhys);
4097 if (!--pPage->cPresent)
4098 break;
4099 }
4100}
4101
4102
4103/**
4104 * Clear references to shadowed pages in an EPT page table.
4105 *
4106 * @param pPool The pool.
4107 * @param pPage The page.
4108 * @param pShwPML4 The shadow page directory pointer table (mapping of the page).
4109 */
4110DECLINLINE(void) pgmPoolTrackDerefPTEPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPT pShwPT)
4111{
4112 RTGCPHYS GCPhys = pPage->GCPhys + PAGE_SIZE * pPage->iFirstPresent;
4113 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE)
4114 if (pShwPT->a[i].n.u1Present)
4115 {
4116 Log4(("pgmPoolTrackDerefPTEPT: i=%d pte=%RX64 GCPhys=%RX64\n",
4117 i, pShwPT->a[i].u & EPT_PTE_PG_MASK, pPage->GCPhys));
4118 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPT->a[i].u & EPT_PTE_PG_MASK, GCPhys);
4119 if (!--pPage->cPresent)
4120 break;
4121 }
4122}
4123
4124
4125
4126/**
4127 * Clear references to shadowed pages in a 32 bits page directory.
4128 *
4129 * @param pPool The pool.
4130 * @param pPage The page.
4131 * @param pShwPD The shadow page directory (mapping of the page).
4132 */
4133DECLINLINE(void) pgmPoolTrackDerefPD(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PD pShwPD)
4134{
4135 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
4136 {
4137 if ( pShwPD->a[i].n.u1Present
4138 && !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING)
4139 )
4140 {
4141 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PG_MASK);
4142 if (pSubPage)
4143 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4144 else
4145 AssertFatalMsgFailed(("%x\n", pShwPD->a[i].u & X86_PDE_PG_MASK));
4146 }
4147 }
4148}
4149
4150/**
4151 * Clear references to shadowed pages in a PAE (legacy or 64 bits) page directory.
4152 *
4153 * @param pPool The pool.
4154 * @param pPage The page.
4155 * @param pShwPD The shadow page directory (mapping of the page).
4156 */
4157DECLINLINE(void) pgmPoolTrackDerefPDPae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PDPAE pShwPD)
4158{
4159 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
4160 {
4161 if ( pShwPD->a[i].n.u1Present
4162 && !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING)
4163 )
4164 {
4165 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PAE_PG_MASK);
4166 if (pSubPage)
4167 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4168 else
4169 AssertFatalMsgFailed(("%RX64\n", pShwPD->a[i].u & X86_PDE_PAE_PG_MASK));
4170 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4171 }
4172 }
4173}
4174
4175/**
4176 * Clear references to shadowed pages in a PAE page directory pointer table.
4177 *
4178 * @param pPool The pool.
4179 * @param pPage The page.
4180 * @param pShwPDPT The shadow page directory pointer table (mapping of the page).
4181 */
4182DECLINLINE(void) pgmPoolTrackDerefPDPTPae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PDPT pShwPDPT)
4183{
4184 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
4185 {
4186 if ( pShwPDPT->a[i].n.u1Present
4187 && !(pShwPDPT->a[i].u & PGM_PLXFLAGS_MAPPING)
4188 )
4189 {
4190 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & X86_PDPE_PG_MASK);
4191 if (pSubPage)
4192 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4193 else
4194 AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u & X86_PDPE_PG_MASK));
4195 }
4196 }
4197}
4198
4199
4200/**
4201 * Clear references to shadowed pages in a 64-bit page directory pointer table.
4202 *
4203 * @param pPool The pool.
4204 * @param pPage The page.
4205 * @param pShwPDPT The shadow page directory pointer table (mapping of the page).
4206 */
4207DECLINLINE(void) pgmPoolTrackDerefPDPT64Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PDPT pShwPDPT)
4208{
4209 for (unsigned i = 0; i < RT_ELEMENTS(pShwPDPT->a); i++)
4210 {
4211 Assert(!(pShwPDPT->a[i].u & PGM_PLXFLAGS_MAPPING));
4212 if (pShwPDPT->a[i].n.u1Present)
4213 {
4214 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & X86_PDPE_PG_MASK);
4215 if (pSubPage)
4216 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4217 else
4218 AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u & X86_PDPE_PG_MASK));
4219 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4220 }
4221 }
4222}
4223
4224
4225/**
4226 * Clear references to shadowed pages in a 64-bit level 4 page table.
4227 *
4228 * @param pPool The pool.
4229 * @param pPage The page.
4230 * @param pShwPML4 The shadow page directory pointer table (mapping of the page).
4231 */
4232DECLINLINE(void) pgmPoolTrackDerefPML464Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PML4 pShwPML4)
4233{
4234 for (unsigned i = 0; i < RT_ELEMENTS(pShwPML4->a); i++)
4235 {
4236 if (pShwPML4->a[i].n.u1Present)
4237 {
4238 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPML4->a[i].u & X86_PDPE_PG_MASK);
4239 if (pSubPage)
4240 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4241 else
4242 AssertFatalMsgFailed(("%RX64\n", pShwPML4->a[i].u & X86_PML4E_PG_MASK));
4243 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4244 }
4245 }
4246}
4247
4248
4249/**
4250 * Clear references to shadowed pages in an EPT page directory.
4251 *
4252 * @param pPool The pool.
4253 * @param pPage The page.
4254 * @param pShwPD The shadow page directory (mapping of the page).
4255 */
4256DECLINLINE(void) pgmPoolTrackDerefPDEPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPD pShwPD)
4257{
4258 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
4259 {
4260 if (pShwPD->a[i].n.u1Present)
4261 {
4262 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & EPT_PDE_PG_MASK);
4263 if (pSubPage)
4264 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4265 else
4266 AssertFatalMsgFailed(("%RX64\n", pShwPD->a[i].u & EPT_PDE_PG_MASK));
4267 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4268 }
4269 }
4270}
4271
4272
4273/**
4274 * Clear references to shadowed pages in an EPT page directory pointer table.
4275 *
4276 * @param pPool The pool.
4277 * @param pPage The page.
4278 * @param pShwPDPT The shadow page directory pointer table (mapping of the page).
4279 */
4280DECLINLINE(void) pgmPoolTrackDerefPDPTEPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPDPT pShwPDPT)
4281{
4282 for (unsigned i = 0; i < RT_ELEMENTS(pShwPDPT->a); i++)
4283 {
4284 if (pShwPDPT->a[i].n.u1Present)
4285 {
4286 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & EPT_PDPTE_PG_MASK);
4287 if (pSubPage)
4288 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4289 else
4290 AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u & EPT_PDPTE_PG_MASK));
4291 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4292 }
4293 }
4294}
4295
4296
4297/**
4298 * Clears all references made by this page.
4299 *
4300 * This includes other shadow pages and GC physical addresses.
4301 *
4302 * @param pPool The pool.
4303 * @param pPage The page.
4304 */
4305static void pgmPoolTrackDeref(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
4306{
4307 /*
4308 * Map the shadow page and take action according to the page kind.
4309 */
4310 void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage);
4311 switch (pPage->enmKind)
4312 {
4313 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
4314 {
4315 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4316 void *pvGst;
4317 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
4318 pgmPoolTrackDerefPT32Bit32Bit(pPool, pPage, (PX86PT)pvShw, (PCX86PT)pvGst);
4319 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4320 break;
4321 }
4322
4323 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
4324 {
4325 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4326 void *pvGst;
4327 int rc = PGM_GCPHYS_2_PTR_EX(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
4328 pgmPoolTrackDerefPTPae32Bit(pPool, pPage, (PX86PTPAE)pvShw, (PCX86PT)pvGst);
4329 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4330 break;
4331 }
4332
4333 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
4334 {
4335 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4336 void *pvGst;
4337 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
4338 pgmPoolTrackDerefPTPaePae(pPool, pPage, (PX86PTPAE)pvShw, (PCX86PTPAE)pvGst);
4339 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4340 break;
4341 }
4342
4343 case PGMPOOLKIND_32BIT_PT_FOR_PHYS: /* treat it like a 4 MB page */
4344 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
4345 {
4346 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4347 pgmPoolTrackDerefPT32Bit4MB(pPool, pPage, (PX86PT)pvShw);
4348 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4349 break;
4350 }
4351
4352 case PGMPOOLKIND_PAE_PT_FOR_PHYS: /* treat it like a 2 MB page */
4353 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
4354 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
4355 {
4356 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4357 pgmPoolTrackDerefPTPaeBig(pPool, pPage, (PX86PTPAE)pvShw);
4358 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4359 break;
4360 }
4361
4362 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
4363 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
4364 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
4365 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
4366 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
4367 case PGMPOOLKIND_PAE_PD_PHYS:
4368 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
4369 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
4370 pgmPoolTrackDerefPDPae(pPool, pPage, (PX86PDPAE)pvShw);
4371 break;
4372
4373 case PGMPOOLKIND_32BIT_PD_PHYS:
4374 case PGMPOOLKIND_32BIT_PD:
4375 pgmPoolTrackDerefPD(pPool, pPage, (PX86PD)pvShw);
4376 break;
4377
4378 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
4379 case PGMPOOLKIND_PAE_PDPT:
4380 case PGMPOOLKIND_PAE_PDPT_PHYS:
4381 pgmPoolTrackDerefPDPTPae(pPool, pPage, (PX86PDPT)pvShw);
4382 break;
4383
4384 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
4385 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
4386 pgmPoolTrackDerefPDPT64Bit(pPool, pPage, (PX86PDPT)pvShw);
4387 break;
4388
4389 case PGMPOOLKIND_64BIT_PML4:
4390 pgmPoolTrackDerefPML464Bit(pPool, pPage, (PX86PML4)pvShw);
4391 break;
4392
4393 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
4394 pgmPoolTrackDerefPTEPT(pPool, pPage, (PEPTPT)pvShw);
4395 break;
4396
4397 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
4398 pgmPoolTrackDerefPDEPT(pPool, pPage, (PEPTPD)pvShw);
4399 break;
4400
4401 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
4402 pgmPoolTrackDerefPDPTEPT(pPool, pPage, (PEPTPDPT)pvShw);
4403 break;
4404
4405 default:
4406 AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind));
4407 }
4408
4409 /* paranoia, clear the shadow page. Remove this laser (i.e. let Alloc and ClearAll do it). */
4410 STAM_PROFILE_START(&pPool->StatZeroPage, z);
4411 ASMMemZeroPage(pvShw);
4412 STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
4413 pPage->fZeroed = true;
4414 PGMPOOL_UNLOCK_PTR(pPool->CTX_SUFF(pVM), pvShw);
4415}
4416
4417/**
4418 * Flushes a pool page.
4419 *
4420 * This moves the page to the free list after removing all user references to it.
4421 *
4422 * @returns VBox status code.
4423 * @retval VINF_SUCCESS on success.
4424 * @param pPool The pool.
4425 * @param HCPhys The HC physical address of the shadow page.
4426 * @param fFlush Flush the TLBS when required (should only be false in very specific use cases!!)
4427 */
4428int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fFlush)
4429{
4430 PVM pVM = pPool->CTX_SUFF(pVM);
4431 bool fFlushRequired = false;
4432
4433 int rc = VINF_SUCCESS;
4434 STAM_PROFILE_START(&pPool->StatFlushPage, f);
4435 LogFlow(("pgmPoolFlushPage: pPage=%p:{.Key=%RHp, .idx=%d, .enmKind=%s, .GCPhys=%RGp}\n",
4436 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys));
4437
4438 /*
4439 * Quietly reject any attempts at flushing any of the special root pages.
4440 */
4441 if (pPage->idx < PGMPOOL_IDX_FIRST)
4442 {
4443 AssertFailed(); /* can no longer happen */
4444 Log(("pgmPoolFlushPage: special root page, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
4445 return VINF_SUCCESS;
4446 }
4447
4448 pgmLock(pVM);
4449
4450 /*
4451 * Quietly reject any attempts at flushing the currently active shadow CR3 mapping
4452 */
4453 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPage))
4454 {
4455 AssertMsg( pPage->enmKind == PGMPOOLKIND_64BIT_PML4
4456 || pPage->enmKind == PGMPOOLKIND_PAE_PDPT
4457 || pPage->enmKind == PGMPOOLKIND_PAE_PDPT_FOR_32BIT
4458 || pPage->enmKind == PGMPOOLKIND_32BIT_PD
4459 || pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD
4460 || pPage->enmKind == PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD
4461 || pPage->enmKind == PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD
4462 || pPage->enmKind == PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD
4463 || pPage->enmKind == PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
4464 ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(VMMGetCpu(pVM)), pPage->Core.Key, pPage->enmKind));
4465 Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
4466 pgmUnlock(pVM);
4467 return VINF_SUCCESS;
4468 }
4469
4470#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4471 /* Start a subset so we won't run out of mapping space. */
4472 PVMCPU pVCpu = VMMGetCpu(pVM);
4473 uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
4474#endif
4475
4476 /*
4477 * Mark the page as being in need of an ASMMemZeroPage().
4478 */
4479 pPage->fZeroed = false;
4480
4481#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
4482 if (pPage->fDirty)
4483 pgmPoolFlushDirtyPage(pVM, pPool, pPage->idxDirty, false /* do not remove */);
4484#endif
4485
4486 /* If there are any users of this table, then we *must* issue a tlb flush on all VCPUs. */
4487 if (pPage->iUserHead != NIL_PGMPOOL_USER_INDEX)
4488 fFlushRequired = true;
4489
4490 /*
4491 * Clear the page.
4492 */
4493 pgmPoolTrackClearPageUsers(pPool, pPage);
4494 STAM_PROFILE_START(&pPool->StatTrackDeref,a);
4495 pgmPoolTrackDeref(pPool, pPage);
4496 STAM_PROFILE_STOP(&pPool->StatTrackDeref,a);
4497
4498 /*
4499 * Flush it from the cache.
4500 */
4501 pgmPoolCacheFlushPage(pPool, pPage);
4502
4503#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4504 /* Heavy stuff done. */
4505 PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
4506#endif
4507
4508 /*
4509 * Deregistering the monitoring.
4510 */
4511 if (pPage->fMonitored)
4512 rc = pgmPoolMonitorFlush(pPool, pPage);
4513
4514 /*
4515 * Free the page.
4516 */
4517 Assert(pPage->iNext == NIL_PGMPOOL_IDX);
4518 pPage->iNext = pPool->iFreeHead;
4519 pPool->iFreeHead = pPage->idx;
4520 pPage->enmKind = PGMPOOLKIND_FREE;
4521 pPage->enmAccess = PGMPOOLACCESS_DONTCARE;
4522 pPage->GCPhys = NIL_RTGCPHYS;
4523 pPage->fReusedFlushPending = false;
4524
4525 pPool->cUsedPages--;
4526
4527 /* Flush the TLBs of all VCPUs if required. */
4528 if ( fFlushRequired
4529 && fFlush)
4530 {
4531 PGM_INVL_ALL_VCPU_TLBS(pVM);
4532 }
4533
4534 pgmUnlock(pVM);
4535 STAM_PROFILE_STOP(&pPool->StatFlushPage, f);
4536 return rc;
4537}
4538
4539
4540/**
4541 * Frees a usage of a pool page.
4542 *
4543 * The caller is responsible to updating the user table so that it no longer
4544 * references the shadow page.
4545 *
4546 * @param pPool The pool.
4547 * @param HCPhys The HC physical address of the shadow page.
4548 * @param iUser The shadow page pool index of the user table.
4549 * @param iUserTable The index into the user table (shadowed).
4550 */
4551void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
4552{
4553 PVM pVM = pPool->CTX_SUFF(pVM);
4554
4555 STAM_PROFILE_START(&pPool->StatFree, a);
4556 LogFlow(("pgmPoolFreeByPage: pPage=%p:{.Key=%RHp, .idx=%d, enmKind=%s} iUser=%#x iUserTable=%#x\n",
4557 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), iUser, iUserTable));
4558 Assert(pPage->idx >= PGMPOOL_IDX_FIRST);
4559 pgmLock(pVM);
4560 pgmPoolTrackFreeUser(pPool, pPage, iUser, iUserTable);
4561 if (!pPage->fCached)
4562 pgmPoolFlushPage(pPool, pPage);
4563 pgmUnlock(pVM);
4564 STAM_PROFILE_STOP(&pPool->StatFree, a);
4565}
4566
4567
4568/**
4569 * Makes one or more free page free.
4570 *
4571 * @returns VBox status code.
4572 * @retval VINF_SUCCESS on success.
4573 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
4574 *
4575 * @param pPool The pool.
4576 * @param enmKind Page table kind
4577 * @param iUser The user of the page.
4578 */
4579static int pgmPoolMakeMoreFreePages(PPGMPOOL pPool, PGMPOOLKIND enmKind, uint16_t iUser)
4580{
4581 PVM pVM = pPool->CTX_SUFF(pVM);
4582
4583 LogFlow(("pgmPoolMakeMoreFreePages: iUser=%#x\n", iUser));
4584
4585 /*
4586 * If the pool isn't full grown yet, expand it.
4587 */
4588 if ( pPool->cCurPages < pPool->cMaxPages
4589#if defined(IN_RC)
4590 /* Hack alert: we can't deal with jumps to ring 3 when called from MapCR3 and allocating pages for PAE PDs. */
4591 && enmKind != PGMPOOLKIND_PAE_PD_FOR_PAE_PD
4592 && (enmKind < PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD || enmKind > PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD)
4593#endif
4594 )
4595 {
4596 STAM_PROFILE_ADV_SUSPEND(&pPool->StatAlloc, a);
4597#ifdef IN_RING3
4598 int rc = PGMR3PoolGrow(pVM);
4599#else
4600 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_POOL_GROW, 0);
4601#endif
4602 if (RT_FAILURE(rc))
4603 return rc;
4604 STAM_PROFILE_ADV_RESUME(&pPool->StatAlloc, a);
4605 if (pPool->iFreeHead != NIL_PGMPOOL_IDX)
4606 return VINF_SUCCESS;
4607 }
4608
4609 /*
4610 * Free one cached page.
4611 */
4612 return pgmPoolCacheFreeOne(pPool, iUser);
4613}
4614
4615/**
4616 * Allocates a page from the pool.
4617 *
4618 * This page may actually be a cached page and not in need of any processing
4619 * on the callers part.
4620 *
4621 * @returns VBox status code.
4622 * @retval VINF_SUCCESS if a NEW page was allocated.
4623 * @retval VINF_PGM_CACHED_PAGE if a CACHED page was returned.
4624 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
4625 * @param pVM The VM handle.
4626 * @param GCPhys The GC physical address of the page we're gonna shadow.
4627 * For 4MB and 2MB PD entries, it's the first address the
4628 * shadow PT is covering.
4629 * @param enmKind The kind of mapping.
4630 * @param enmAccess Access type for the mapping (only relevant for big pages)
4631 * @param iUser The shadow page pool index of the user table.
4632 * @param iUserTable The index into the user table (shadowed).
4633 * @param ppPage Where to store the pointer to the page. NULL is stored here on failure.
4634 * @param fLockPage Lock the page
4635 */
4636int pgmPoolAllocEx(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage)
4637{
4638 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4639 STAM_PROFILE_ADV_START(&pPool->StatAlloc, a);
4640 LogFlow(("pgmPoolAlloc: GCPhys=%RGp enmKind=%s iUser=%#x iUserTable=%#x\n", GCPhys, pgmPoolPoolKindToStr(enmKind), iUser, iUserTable));
4641 *ppPage = NULL;
4642 /** @todo CSAM/PGMPrefetchPage messes up here during CSAMR3CheckGates
4643 * (TRPMR3SyncIDT) because of FF priority. Try fix that?
4644 * Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)); */
4645
4646 pgmLock(pVM);
4647
4648 if (pPool->fCacheEnabled)
4649 {
4650 int rc2 = pgmPoolCacheAlloc(pPool, GCPhys, enmKind, enmAccess, iUser, iUserTable, ppPage);
4651 if (RT_SUCCESS(rc2))
4652 {
4653 if (fLockPage)
4654 pgmPoolLockPage(pPool, *ppPage);
4655 pgmUnlock(pVM);
4656 STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
4657 LogFlow(("pgmPoolAlloc: cached returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d}\n", rc2, *ppPage, (*ppPage)->Core.Key, (*ppPage)->idx));
4658 return rc2;
4659 }
4660 }
4661
4662 /*
4663 * Allocate a new one.
4664 */
4665 int rc = VINF_SUCCESS;
4666 uint16_t iNew = pPool->iFreeHead;
4667 if (iNew == NIL_PGMPOOL_IDX)
4668 {
4669 rc = pgmPoolMakeMoreFreePages(pPool, enmKind, iUser);
4670 if (RT_FAILURE(rc))
4671 {
4672 pgmUnlock(pVM);
4673 Log(("pgmPoolAlloc: returns %Rrc (Free)\n", rc));
4674 STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
4675 return rc;
4676 }
4677 iNew = pPool->iFreeHead;
4678 AssertReleaseReturn(iNew != NIL_PGMPOOL_IDX, VERR_INTERNAL_ERROR);
4679 }
4680
4681 /* unlink the free head */
4682 PPGMPOOLPAGE pPage = &pPool->aPages[iNew];
4683 pPool->iFreeHead = pPage->iNext;
4684 pPage->iNext = NIL_PGMPOOL_IDX;
4685
4686 /*
4687 * Initialize it.
4688 */
4689 pPool->cUsedPages++; /* physical handler registration / pgmPoolTrackFlushGCPhysPTsSlow requirement. */
4690 pPage->enmKind = enmKind;
4691 pPage->enmAccess = enmAccess;
4692 pPage->GCPhys = GCPhys;
4693 pPage->fSeenNonGlobal = false; /* Set this to 'true' to disable this feature. */
4694 pPage->fMonitored = false;
4695 pPage->fCached = false;
4696#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
4697 pPage->fDirty = false;
4698#endif
4699 pPage->fReusedFlushPending = false;
4700 pPage->cModifications = 0;
4701 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
4702 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
4703 pPage->cPresent = 0;
4704 pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX;
4705 pPage->pvLastAccessHandlerFault = 0;
4706 pPage->cLastAccessHandlerCount = 0;
4707 pPage->pvLastAccessHandlerRip = 0;
4708
4709 /*
4710 * Insert into the tracking and cache. If this fails, free the page.
4711 */
4712 int rc3 = pgmPoolTrackInsert(pPool, pPage, GCPhys, iUser, iUserTable);
4713 if (RT_FAILURE(rc3))
4714 {
4715 pPool->cUsedPages--;
4716 pPage->enmKind = PGMPOOLKIND_FREE;
4717 pPage->enmAccess = PGMPOOLACCESS_DONTCARE;
4718 pPage->GCPhys = NIL_RTGCPHYS;
4719 pPage->iNext = pPool->iFreeHead;
4720 pPool->iFreeHead = pPage->idx;
4721 pgmUnlock(pVM);
4722 STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
4723 Log(("pgmPoolAlloc: returns %Rrc (Insert)\n", rc3));
4724 return rc3;
4725 }
4726
4727 /*
4728 * Commit the allocation, clear the page and return.
4729 */
4730#ifdef VBOX_WITH_STATISTICS
4731 if (pPool->cUsedPages > pPool->cUsedPagesHigh)
4732 pPool->cUsedPagesHigh = pPool->cUsedPages;
4733#endif
4734
4735 if (!pPage->fZeroed)
4736 {
4737 STAM_PROFILE_START(&pPool->StatZeroPage, z);
4738 void *pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
4739 ASMMemZeroPage(pv);
4740 STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
4741 }
4742
4743 *ppPage = pPage;
4744 if (fLockPage)
4745 pgmPoolLockPage(pPool, pPage);
4746 pgmUnlock(pVM);
4747 LogFlow(("pgmPoolAlloc: returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d, .fCached=%RTbool, .fMonitored=%RTbool}\n",
4748 rc, pPage, pPage->Core.Key, pPage->idx, pPage->fCached, pPage->fMonitored));
4749 STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
4750 return rc;
4751}
4752
4753
4754/**
4755 * Frees a usage of a pool page.
4756 *
4757 * @param pVM The VM handle.
4758 * @param HCPhys The HC physical address of the shadow page.
4759 * @param iUser The shadow page pool index of the user table.
4760 * @param iUserTable The index into the user table (shadowed).
4761 */
4762void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable)
4763{
4764 LogFlow(("pgmPoolFree: HCPhys=%RHp iUser=%#x iUserTable=%#x\n", HCPhys, iUser, iUserTable));
4765 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4766 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, HCPhys), iUser, iUserTable);
4767}
4768
4769/**
4770 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
4771 *
4772 * @returns Pointer to the shadow page structure.
4773 * @param pPool The pool.
4774 * @param HCPhys The HC physical address of the shadow page.
4775 */
4776PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
4777{
4778 PVM pVM = pPool->CTX_SUFF(pVM);
4779
4780 Assert(PGMIsLockOwner(pVM));
4781
4782 /*
4783 * Look up the page.
4784 */
4785 pgmLock(pVM);
4786 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
4787 pgmUnlock(pVM);
4788
4789 AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%RHp pPage=%p idx=%d\n", HCPhys, pPage, (pPage) ? pPage->idx : 0));
4790 return pPage;
4791}
4792
4793#ifdef IN_RING3 /* currently only used in ring 3; save some space in the R0 & GC modules (left it here as we might need it elsewhere later on) */
4794/**
4795 * Flush the specified page if present
4796 *
4797 * @param pVM The VM handle.
4798 * @param GCPhys Guest physical address of the page to flush
4799 */
4800void pgmPoolFlushPageByGCPhys(PVM pVM, RTGCPHYS GCPhys)
4801{
4802 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4803
4804 VM_ASSERT_EMT(pVM);
4805
4806 /*
4807 * Look up the GCPhys in the hash.
4808 */
4809 GCPhys = GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1);
4810 unsigned i = pPool->aiHash[PGMPOOL_HASH(GCPhys)];
4811 if (i == NIL_PGMPOOL_IDX)
4812 return;
4813
4814 do
4815 {
4816 PPGMPOOLPAGE pPage = &pPool->aPages[i];
4817 if (pPage->GCPhys - GCPhys < PAGE_SIZE)
4818 {
4819 switch (pPage->enmKind)
4820 {
4821 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
4822 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
4823 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
4824 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
4825 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
4826 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
4827 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
4828 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
4829 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
4830 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
4831 case PGMPOOLKIND_64BIT_PML4:
4832 case PGMPOOLKIND_32BIT_PD:
4833 case PGMPOOLKIND_PAE_PDPT:
4834 {
4835 Log(("PGMPoolFlushPage: found pgm pool pages for %RGp\n", GCPhys));
4836#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
4837 if (pPage->fDirty)
4838 STAM_COUNTER_INC(&pPool->StatForceFlushDirtyPage);
4839 else
4840#endif
4841 STAM_COUNTER_INC(&pPool->StatForceFlushPage);
4842 Assert(!pgmPoolIsPageLocked(&pVM->pgm.s, pPage));
4843 pgmPoolMonitorChainFlush(pPool, pPage);
4844 return;
4845 }
4846
4847 /* ignore, no monitoring. */
4848 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
4849 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
4850 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
4851 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
4852 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
4853 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
4854 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
4855 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
4856 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
4857 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
4858 case PGMPOOLKIND_ROOT_NESTED:
4859 case PGMPOOLKIND_PAE_PD_PHYS:
4860 case PGMPOOLKIND_PAE_PDPT_PHYS:
4861 case PGMPOOLKIND_32BIT_PD_PHYS:
4862 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
4863 break;
4864
4865 default:
4866 AssertFatalMsgFailed(("enmKind=%d idx=%d\n", pPage->enmKind, pPage->idx));
4867 }
4868 }
4869
4870 /* next */
4871 i = pPage->iNext;
4872 } while (i != NIL_PGMPOOL_IDX);
4873 return;
4874}
4875#endif /* IN_RING3 */
4876
4877#ifdef IN_RING3
4878
4879
4880/**
4881 * Reset CPU on hot plugging.
4882 *
4883 * @param pVM The VM handle.
4884 * @param pVCpu The virtual CPU.
4885 */
4886void pgmR3PoolResetUnpluggedCpu(PVM pVM, PVMCPU pVCpu)
4887{
4888 pgmR3ExitShadowModeBeforePoolFlush(pVM, pVCpu);
4889
4890 pgmR3ReEnterShadowModeAfterPoolFlush(pVM, pVCpu);
4891 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
4892 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
4893}
4894
4895
4896/**
4897 * Flushes the entire cache.
4898 *
4899 * It will assert a global CR3 flush (FF) and assumes the caller is aware of
4900 * this and execute this CR3 flush.
4901 *
4902 * @param pPool The pool.
4903 */
4904void pgmR3PoolReset(PVM pVM)
4905{
4906 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4907
4908 Assert(PGMIsLockOwner(pVM));
4909 STAM_PROFILE_START(&pPool->StatR3Reset, a);
4910 LogFlow(("pgmR3PoolReset:\n"));
4911
4912 /*
4913 * If there are no pages in the pool, there is nothing to do.
4914 */
4915 if (pPool->cCurPages <= PGMPOOL_IDX_FIRST)
4916 {
4917 STAM_PROFILE_STOP(&pPool->StatR3Reset, a);
4918 return;
4919 }
4920
4921 /*
4922 * Exit the shadow mode since we're going to clear everything,
4923 * including the root page.
4924 */
4925 for (VMCPUID i = 0; i < pVM->cCpus; i++)
4926 {
4927 PVMCPU pVCpu = &pVM->aCpus[i];
4928 pgmR3ExitShadowModeBeforePoolFlush(pVM, pVCpu);
4929 }
4930
4931 /*
4932 * Nuke the free list and reinsert all pages into it.
4933 */
4934 for (unsigned i = pPool->cCurPages - 1; i >= PGMPOOL_IDX_FIRST; i--)
4935 {
4936 PPGMPOOLPAGE pPage = &pPool->aPages[i];
4937
4938 Assert(pPage->Core.Key == MMPage2Phys(pVM, pPage->pvPageR3));
4939 if (pPage->fMonitored)
4940 pgmPoolMonitorFlush(pPool, pPage);
4941 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
4942 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
4943 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
4944 pPage->iMonitoredPrev = NIL_PGMPOOL_IDX;
4945 pPage->cModifications = 0;
4946 pPage->GCPhys = NIL_RTGCPHYS;
4947 pPage->enmKind = PGMPOOLKIND_FREE;
4948 pPage->enmAccess = PGMPOOLACCESS_DONTCARE;
4949 Assert(pPage->idx == i);
4950 pPage->iNext = i + 1;
4951 pPage->fZeroed = false; /* This could probably be optimized, but better safe than sorry. */
4952 pPage->fSeenNonGlobal = false;
4953 pPage->fMonitored = false;
4954#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
4955 pPage->fDirty = false;
4956#endif
4957 pPage->fCached = false;
4958 pPage->fReusedFlushPending = false;
4959 pPage->iUserHead = NIL_PGMPOOL_USER_INDEX;
4960 pPage->iAgeNext = NIL_PGMPOOL_IDX;
4961 pPage->iAgePrev = NIL_PGMPOOL_IDX;
4962 pPage->cLocked = 0;
4963 }
4964 pPool->aPages[pPool->cCurPages - 1].iNext = NIL_PGMPOOL_IDX;
4965 pPool->iFreeHead = PGMPOOL_IDX_FIRST;
4966 pPool->cUsedPages = 0;
4967
4968 /*
4969 * Zap and reinitialize the user records.
4970 */
4971 pPool->cPresent = 0;
4972 pPool->iUserFreeHead = 0;
4973 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
4974 const unsigned cMaxUsers = pPool->cMaxUsers;
4975 for (unsigned i = 0; i < cMaxUsers; i++)
4976 {
4977 paUsers[i].iNext = i + 1;
4978 paUsers[i].iUser = NIL_PGMPOOL_IDX;
4979 paUsers[i].iUserTable = 0xfffffffe;
4980 }
4981 paUsers[cMaxUsers - 1].iNext = NIL_PGMPOOL_USER_INDEX;
4982
4983 /*
4984 * Clear all the GCPhys links and rebuild the phys ext free list.
4985 */
4986 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
4987 pRam;
4988 pRam = pRam->CTX_SUFF(pNext))
4989 {
4990 unsigned iPage = pRam->cb >> PAGE_SHIFT;
4991 while (iPage-- > 0)
4992 PGM_PAGE_SET_TRACKING(&pRam->aPages[iPage], 0);
4993 }
4994
4995 pPool->iPhysExtFreeHead = 0;
4996 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
4997 const unsigned cMaxPhysExts = pPool->cMaxPhysExts;
4998 for (unsigned i = 0; i < cMaxPhysExts; i++)
4999 {
5000 paPhysExts[i].iNext = i + 1;
5001 paPhysExts[i].aidx[0] = NIL_PGMPOOL_IDX;
5002 paPhysExts[i].aidx[1] = NIL_PGMPOOL_IDX;
5003 paPhysExts[i].aidx[2] = NIL_PGMPOOL_IDX;
5004 }
5005 paPhysExts[cMaxPhysExts - 1].iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
5006
5007 /*
5008 * Just zap the modified list.
5009 */
5010 pPool->cModifiedPages = 0;
5011 pPool->iModifiedHead = NIL_PGMPOOL_IDX;
5012
5013 /*
5014 * Clear the GCPhys hash and the age list.
5015 */
5016 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aiHash); i++)
5017 pPool->aiHash[i] = NIL_PGMPOOL_IDX;
5018 pPool->iAgeHead = NIL_PGMPOOL_IDX;
5019 pPool->iAgeTail = NIL_PGMPOOL_IDX;
5020
5021#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
5022 /* Clear all dirty pages. */
5023 pPool->idxFreeDirtyPage = 0;
5024 pPool->cDirtyPages = 0;
5025 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
5026 pPool->aIdxDirtyPages[i] = NIL_PGMPOOL_IDX;
5027#endif
5028
5029 /*
5030 * Reinsert active pages into the hash and ensure monitoring chains are correct.
5031 */
5032 for (unsigned i = PGMPOOL_IDX_FIRST_SPECIAL; i < PGMPOOL_IDX_FIRST; i++)
5033 {
5034 PPGMPOOLPAGE pPage = &pPool->aPages[i];
5035 pPage->iNext = NIL_PGMPOOL_IDX;
5036 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
5037 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
5038 pPage->cModifications = 0;
5039 /* ASSUMES that we're not sharing with any of the other special pages (safe for now). */
5040 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
5041 pPage->iMonitoredPrev = NIL_PGMPOOL_IDX;
5042 if (pPage->fMonitored)
5043 {
5044 int rc = PGMHandlerPhysicalChangeCallbacks(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1),
5045 pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pPage),
5046 pPool->pfnAccessHandlerR0, MMHyperCCToR0(pVM, pPage),
5047 pPool->pfnAccessHandlerRC, MMHyperCCToRC(pVM, pPage),
5048 pPool->pszAccessHandler);
5049 AssertFatalRCSuccess(rc);
5050 pgmPoolHashInsert(pPool, pPage);
5051 }
5052 Assert(pPage->iUserHead == NIL_PGMPOOL_USER_INDEX); /* for now */
5053 Assert(pPage->iAgeNext == NIL_PGMPOOL_IDX);
5054 Assert(pPage->iAgePrev == NIL_PGMPOOL_IDX);
5055 }
5056
5057 for (VMCPUID i = 0; i < pVM->cCpus; i++)
5058 {
5059 /*
5060 * Re-enter the shadowing mode and assert Sync CR3 FF.
5061 */
5062 PVMCPU pVCpu = &pVM->aCpus[i];
5063 pgmR3ReEnterShadowModeAfterPoolFlush(pVM, pVCpu);
5064 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
5065 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
5066 }
5067
5068 STAM_PROFILE_STOP(&pPool->StatR3Reset, a);
5069}
5070#endif /* IN_RING3 */
5071
5072#ifdef LOG_ENABLED
5073static const char *pgmPoolPoolKindToStr(uint8_t enmKind)
5074{
5075 switch(enmKind)
5076 {
5077 case PGMPOOLKIND_INVALID:
5078 return "PGMPOOLKIND_INVALID";
5079 case PGMPOOLKIND_FREE:
5080 return "PGMPOOLKIND_FREE";
5081 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
5082 return "PGMPOOLKIND_32BIT_PT_FOR_PHYS";
5083 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
5084 return "PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT";
5085 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
5086 return "PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB";
5087 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
5088 return "PGMPOOLKIND_PAE_PT_FOR_PHYS";
5089 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
5090 return "PGMPOOLKIND_PAE_PT_FOR_32BIT_PT";
5091 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
5092 return "PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB";
5093 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
5094 return "PGMPOOLKIND_PAE_PT_FOR_PAE_PT";
5095 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
5096 return "PGMPOOLKIND_PAE_PT_FOR_PAE_2MB";
5097 case PGMPOOLKIND_32BIT_PD:
5098 return "PGMPOOLKIND_32BIT_PD";
5099 case PGMPOOLKIND_32BIT_PD_PHYS:
5100 return "PGMPOOLKIND_32BIT_PD_PHYS";
5101 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
5102 return "PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD";
5103 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
5104 return "PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD";
5105 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
5106 return "PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD";
5107 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
5108 return "PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD";
5109 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
5110 return "PGMPOOLKIND_PAE_PD_FOR_PAE_PD";
5111 case PGMPOOLKIND_PAE_PD_PHYS:
5112 return "PGMPOOLKIND_PAE_PD_PHYS";
5113 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
5114 return "PGMPOOLKIND_PAE_PDPT_FOR_32BIT";
5115 case PGMPOOLKIND_PAE_PDPT:
5116 return "PGMPOOLKIND_PAE_PDPT";
5117 case PGMPOOLKIND_PAE_PDPT_PHYS:
5118 return "PGMPOOLKIND_PAE_PDPT_PHYS";
5119 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
5120 return "PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT";
5121 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
5122 return "PGMPOOLKIND_64BIT_PDPT_FOR_PHYS";
5123 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
5124 return "PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD";
5125 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
5126 return "PGMPOOLKIND_64BIT_PD_FOR_PHYS";
5127 case PGMPOOLKIND_64BIT_PML4:
5128 return "PGMPOOLKIND_64BIT_PML4";
5129 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
5130 return "PGMPOOLKIND_EPT_PDPT_FOR_PHYS";
5131 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
5132 return "PGMPOOLKIND_EPT_PD_FOR_PHYS";
5133 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
5134 return "PGMPOOLKIND_EPT_PT_FOR_PHYS";
5135 case PGMPOOLKIND_ROOT_NESTED:
5136 return "PGMPOOLKIND_ROOT_NESTED";
5137 }
5138 return "Unknown kind!";
5139}
5140#endif /* LOG_ENABLED*/
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette