VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInternal.h@ 28656

Last change on this file since 28656 was 28656, checked in by vboxsync, 14 years ago

pgmPoolTrackPhysExtDerefGCPhys needs to check the PTE index as well in order not to kick out the wrong slot

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 158.6 KB
Line 
1/* $Id: PGMInternal.h 28656 2010-04-23 14:43:35Z vboxsync $ */
2/** @file
3 * PGM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#ifndef ___PGMInternal_h
23#define ___PGMInternal_h
24
25#include <VBox/cdefs.h>
26#include <VBox/types.h>
27#include <VBox/err.h>
28#include <VBox/stam.h>
29#include <VBox/param.h>
30#include <VBox/vmm.h>
31#include <VBox/mm.h>
32#include <VBox/pdmcritsect.h>
33#include <VBox/pdmapi.h>
34#include <VBox/dis.h>
35#include <VBox/dbgf.h>
36#include <VBox/log.h>
37#include <VBox/gmm.h>
38#include <VBox/hwaccm.h>
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/avl.h>
42#include <iprt/critsect.h>
43#include <iprt/sha.h>
44
45
46
47/** @defgroup grp_pgm_int Internals
48 * @ingroup grp_pgm
49 * @internal
50 * @{
51 */
52
53
54/** @name PGM Compile Time Config
55 * @{
56 */
57
58/**
59 * Indicates that there are no guest mappings to care about.
60 * Currently on raw-mode related code uses mappings, i.e. RC and R3 code.
61 */
62#if defined(IN_RING0) || !defined(VBOX_WITH_RAW_MODE)
63# define PGM_WITHOUT_MAPPINGS
64#endif
65
66/**
67 * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).
68 * Comment it if it will break something.
69 */
70#define PGM_OUT_OF_SYNC_IN_GC
71
72/**
73 * Check and skip global PDEs for non-global flushes
74 */
75#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
76
77/**
78 * Optimization for PAE page tables that are modified often
79 */
80//#if 0 /* disabled again while debugging */
81#ifndef IN_RC
82# define PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
83#endif
84//#endif
85
86/**
87 * Large page support enabled only on 64 bits hosts; applies to nested paging only.
88 */
89#if (HC_ARCH_BITS == 64) && !defined(IN_RC)
90# define PGM_WITH_LARGE_PAGES
91#endif
92
93/**
94 * Sync N pages instead of a whole page table
95 */
96#define PGM_SYNC_N_PAGES
97
98/**
99 * Number of pages to sync during a page fault
100 *
101 * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
102 * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
103 *
104 * Note that \#PFs are much more expensive in the VT-x/AMD-V case due to
105 * world switch overhead, so let's sync more.
106 */
107# ifdef IN_RING0
108/* Chose 32 based on the compile test in #4219; 64 shows worse stats.
109 * 32 again shows better results than 16; slightly more overhead in the \#PF handler,
110 * but ~5% fewer faults.
111 */
112# define PGM_SYNC_NR_PAGES 32
113#else
114# define PGM_SYNC_NR_PAGES 8
115#endif
116
117/**
118 * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
119 */
120#define PGM_MAX_PHYSCACHE_ENTRIES 64
121#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
122
123
124/** @def PGMPOOL_CFG_MAX_GROW
125 * The maximum number of pages to add to the pool in one go.
126 */
127#define PGMPOOL_CFG_MAX_GROW (_256K >> PAGE_SHIFT)
128
129/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
130 * Enables some extra assertions for virtual handlers (mainly phys2virt related).
131 */
132#ifdef VBOX_STRICT
133# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
134#endif
135
136/** @def VBOX_WITH_NEW_LAZY_PAGE_ALLOC
137 * Enables the experimental lazy page allocation code. */
138/*#define VBOX_WITH_NEW_LAZY_PAGE_ALLOC */
139
140/** @def VBOX_WITH_REAL_WRITE_MONITORED_PAGES
141 * Enables real write monitoring of pages, i.e. mapping them read-only and
142 * only making them writable when getting a write access #PF. */
143#define VBOX_WITH_REAL_WRITE_MONITORED_PAGES
144
145/** @} */
146
147
148/** @name PDPT and PML4 flags.
149 * These are placed in the three bits available for system programs in
150 * the PDPT and PML4 entries.
151 * @{ */
152/** The entry is a permanent one and it's must always be present.
153 * Never free such an entry. */
154#define PGM_PLXFLAGS_PERMANENT RT_BIT_64(10)
155/** Mapping (hypervisor allocated pagetable). */
156#define PGM_PLXFLAGS_MAPPING RT_BIT_64(11)
157/** @} */
158
159/** @name Page directory flags.
160 * These are placed in the three bits available for system programs in
161 * the page directory entries.
162 * @{ */
163/** Mapping (hypervisor allocated pagetable). */
164#define PGM_PDFLAGS_MAPPING RT_BIT_64(10)
165/** Made read-only to facilitate dirty bit tracking. */
166#define PGM_PDFLAGS_TRACK_DIRTY RT_BIT_64(11)
167/** @} */
168
169/** @name Page flags.
170 * These are placed in the three bits available for system programs in
171 * the page entries.
172 * @{ */
173/** Made read-only to facilitate dirty bit tracking. */
174#define PGM_PTFLAGS_TRACK_DIRTY RT_BIT_64(9)
175
176#ifndef PGM_PTFLAGS_CSAM_VALIDATED
177/** Scanned and approved by CSAM (tm).
178 * NOTE: Must be identical to the one defined in CSAMInternal.h!!
179 * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/pgm.h. */
180#define PGM_PTFLAGS_CSAM_VALIDATED RT_BIT_64(11)
181#endif
182
183/** @} */
184
185/** @name Defines used to indicate the shadow and guest paging in the templates.
186 * @{ */
187#define PGM_TYPE_REAL 1
188#define PGM_TYPE_PROT 2
189#define PGM_TYPE_32BIT 3
190#define PGM_TYPE_PAE 4
191#define PGM_TYPE_AMD64 5
192#define PGM_TYPE_NESTED 6
193#define PGM_TYPE_EPT 7
194#define PGM_TYPE_MAX PGM_TYPE_EPT
195/** @} */
196
197/** Macro for checking if the guest is using paging.
198 * @param uGstType PGM_TYPE_*
199 * @param uShwType PGM_TYPE_*
200 * @remark ASSUMES certain order of the PGM_TYPE_* values.
201 */
202#define PGM_WITH_PAGING(uGstType, uShwType) \
203 ( (uGstType) >= PGM_TYPE_32BIT \
204 && (uShwType) != PGM_TYPE_NESTED \
205 && (uShwType) != PGM_TYPE_EPT)
206
207/** Macro for checking if the guest supports the NX bit.
208 * @param uGstType PGM_TYPE_*
209 * @param uShwType PGM_TYPE_*
210 * @remark ASSUMES certain order of the PGM_TYPE_* values.
211 */
212#define PGM_WITH_NX(uGstType, uShwType) \
213 ( (uGstType) >= PGM_TYPE_PAE \
214 && (uShwType) != PGM_TYPE_NESTED \
215 && (uShwType) != PGM_TYPE_EPT)
216
217
218/** @def PGM_HCPHYS_2_PTR
219 * Maps a HC physical page pool address to a virtual address.
220 *
221 * @returns VBox status code.
222 * @param pVM The VM handle.
223 * @param HCPhys The HC physical address to map to a virtual one.
224 * @param ppv Where to store the virtual address. No need to cast this.
225 *
226 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
227 * small page window employeed by that function. Be careful.
228 * @remark There is no need to assert on the result.
229 */
230#ifdef IN_RC
231# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
232 PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
233#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
234# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
235 pgmR0DynMapHCPageInlined(&(pVM)->pgm.s, HCPhys, (void **)(ppv))
236#else
237# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
238 MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
239#endif
240
241/** @def PGM_HCPHYS_2_PTR_BY_PGM
242 * Maps a HC physical page pool address to a virtual address.
243 *
244 * @returns VBox status code.
245 * @param pPGM The PGM instance data.
246 * @param HCPhys The HC physical address to map to a virtual one.
247 * @param ppv Where to store the virtual address. No need to cast this.
248 *
249 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
250 * small page window employeed by that function. Be careful.
251 * @remark There is no need to assert on the result.
252 */
253#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
254# define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
255 pgmR0DynMapHCPageInlined(pPGM, HCPhys, (void **)(ppv))
256#else
257# define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
258 PGM_HCPHYS_2_PTR(PGM2VM(pPGM), HCPhys, (void **)(ppv))
259#endif
260
261/** @def PGM_GCPHYS_2_PTR
262 * Maps a GC physical page address to a virtual address.
263 *
264 * @returns VBox status code.
265 * @param pVM The VM handle.
266 * @param GCPhys The GC physical address to map to a virtual one.
267 * @param ppv Where to store the virtual address. No need to cast this.
268 *
269 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
270 * small page window employeed by that function. Be careful.
271 * @remark There is no need to assert on the result.
272 */
273#ifdef IN_RC
274# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
275 PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
276#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
277# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
278 pgmR0DynMapGCPageInlined(&(pVM)->pgm.s, GCPhys, (void **)(ppv))
279#else
280# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
281 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
282#endif
283
284/** @def PGM_GCPHYS_2_PTR_BY_PGMCPU
285 * Maps a GC physical page address to a virtual address.
286 *
287 * @returns VBox status code.
288 * @param pPGM Pointer to the PGM instance data.
289 * @param GCPhys The GC physical address to map to a virtual one.
290 * @param ppv Where to store the virtual address. No need to cast this.
291 *
292 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
293 * small page window employeed by that function. Be careful.
294 * @remark There is no need to assert on the result.
295 */
296#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
297# define PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, GCPhys, ppv) \
298 pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), GCPhys, (void **)(ppv))
299#else
300# define PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, GCPhys, ppv) \
301 PGM_GCPHYS_2_PTR(PGMCPU2VM(pPGM), GCPhys, ppv)
302#endif
303
304/** @def PGM_GCPHYS_2_PTR_EX
305 * Maps a unaligned GC physical page address to a virtual address.
306 *
307 * @returns VBox status code.
308 * @param pVM The VM handle.
309 * @param GCPhys The GC physical address to map to a virtual one.
310 * @param ppv Where to store the virtual address. No need to cast this.
311 *
312 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
313 * small page window employeed by that function. Be careful.
314 * @remark There is no need to assert on the result.
315 */
316#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
317# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
318 PGMDynMapGCPageOff(pVM, GCPhys, (void **)(ppv))
319#else
320# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
321 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
322#endif
323
324/** @def PGM_INVL_PG
325 * Invalidates a page.
326 *
327 * @param pVCpu The VMCPU handle.
328 * @param GCVirt The virtual address of the page to invalidate.
329 */
330#ifdef IN_RC
331# define PGM_INVL_PG(pVCpu, GCVirt) ASMInvalidatePage((void *)(uintptr_t)(GCVirt))
332#elif defined(IN_RING0)
333# define PGM_INVL_PG(pVCpu, GCVirt) HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
334#else
335# define PGM_INVL_PG(pVCpu, GCVirt) HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
336#endif
337
338/** @def PGM_INVL_PG_ALL_VCPU
339 * Invalidates a page on all VCPUs
340 *
341 * @param pVM The VM handle.
342 * @param GCVirt The virtual address of the page to invalidate.
343 */
344#ifdef IN_RC
345# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) ASMInvalidatePage((void *)(uintptr_t)(GCVirt))
346#elif defined(IN_RING0)
347# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
348#else
349# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
350#endif
351
352/** @def PGM_INVL_BIG_PG
353 * Invalidates a 4MB page directory entry.
354 *
355 * @param pVCpu The VMCPU handle.
356 * @param GCVirt The virtual address within the page directory to invalidate.
357 */
358#ifdef IN_RC
359# define PGM_INVL_BIG_PG(pVCpu, GCVirt) ASMReloadCR3()
360#elif defined(IN_RING0)
361# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HWACCMFlushTLB(pVCpu)
362#else
363# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HWACCMFlushTLB(pVCpu)
364#endif
365
366/** @def PGM_INVL_VCPU_TLBS()
367 * Invalidates the TLBs of the specified VCPU
368 *
369 * @param pVCpu The VMCPU handle.
370 */
371#ifdef IN_RC
372# define PGM_INVL_VCPU_TLBS(pVCpu) ASMReloadCR3()
373#elif defined(IN_RING0)
374# define PGM_INVL_VCPU_TLBS(pVCpu) HWACCMFlushTLB(pVCpu)
375#else
376# define PGM_INVL_VCPU_TLBS(pVCpu) HWACCMFlushTLB(pVCpu)
377#endif
378
379/** @def PGM_INVL_ALL_VCPU_TLBS()
380 * Invalidates the TLBs of all VCPUs
381 *
382 * @param pVM The VM handle.
383 */
384#ifdef IN_RC
385# define PGM_INVL_ALL_VCPU_TLBS(pVM) ASMReloadCR3()
386#elif defined(IN_RING0)
387# define PGM_INVL_ALL_VCPU_TLBS(pVM) HWACCMFlushTLBOnAllVCpus(pVM)
388#else
389# define PGM_INVL_ALL_VCPU_TLBS(pVM) HWACCMFlushTLBOnAllVCpus(pVM)
390#endif
391
392/** Size of the GCPtrConflict array in PGMMAPPING.
393 * @remarks Must be a power of two. */
394#define PGMMAPPING_CONFLICT_MAX 8
395
396/**
397 * Structure for tracking GC Mappings.
398 *
399 * This structure is used by linked list in both GC and HC.
400 */
401typedef struct PGMMAPPING
402{
403 /** Pointer to next entry. */
404 R3PTRTYPE(struct PGMMAPPING *) pNextR3;
405 /** Pointer to next entry. */
406 R0PTRTYPE(struct PGMMAPPING *) pNextR0;
407 /** Pointer to next entry. */
408 RCPTRTYPE(struct PGMMAPPING *) pNextRC;
409 /** Indicate whether this entry is finalized. */
410 bool fFinalized;
411 /** Start Virtual address. */
412 RTGCPTR GCPtr;
413 /** Last Virtual address (inclusive). */
414 RTGCPTR GCPtrLast;
415 /** Range size (bytes). */
416 RTGCPTR cb;
417 /** Pointer to relocation callback function. */
418 R3PTRTYPE(PFNPGMRELOCATE) pfnRelocate;
419 /** User argument to the callback. */
420 R3PTRTYPE(void *) pvUser;
421 /** Mapping description / name. For easing debugging. */
422 R3PTRTYPE(const char *) pszDesc;
423 /** Last 8 addresses that caused conflicts. */
424 RTGCPTR aGCPtrConflicts[PGMMAPPING_CONFLICT_MAX];
425 /** Number of conflicts for this hypervisor mapping. */
426 uint32_t cConflicts;
427 /** Number of page tables. */
428 uint32_t cPTs;
429
430 /** Array of page table mapping data. Each entry
431 * describes one page table. The array can be longer
432 * than the declared length.
433 */
434 struct
435 {
436 /** The HC physical address of the page table. */
437 RTHCPHYS HCPhysPT;
438 /** The HC physical address of the first PAE page table. */
439 RTHCPHYS HCPhysPaePT0;
440 /** The HC physical address of the second PAE page table. */
441 RTHCPHYS HCPhysPaePT1;
442 /** The HC virtual address of the 32-bit page table. */
443 R3PTRTYPE(PX86PT) pPTR3;
444 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */
445 R3PTRTYPE(PX86PTPAE) paPaePTsR3;
446 /** The RC virtual address of the 32-bit page table. */
447 RCPTRTYPE(PX86PT) pPTRC;
448 /** The RC virtual address of the two PAE page table. */
449 RCPTRTYPE(PX86PTPAE) paPaePTsRC;
450 /** The R0 virtual address of the 32-bit page table. */
451 R0PTRTYPE(PX86PT) pPTR0;
452 /** The R0 virtual address of the two PAE page table. */
453 R0PTRTYPE(PX86PTPAE) paPaePTsR0;
454 } aPTs[1];
455} PGMMAPPING;
456/** Pointer to structure for tracking GC Mappings. */
457typedef struct PGMMAPPING *PPGMMAPPING;
458
459
460/**
461 * Physical page access handler structure.
462 *
463 * This is used to keep track of physical address ranges
464 * which are being monitored in some kind of way.
465 */
466typedef struct PGMPHYSHANDLER
467{
468 AVLROGCPHYSNODECORE Core;
469 /** Access type. */
470 PGMPHYSHANDLERTYPE enmType;
471 /** Number of pages to update. */
472 uint32_t cPages;
473 /** Pointer to R3 callback function. */
474 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3;
475 /** User argument for R3 handlers. */
476 R3PTRTYPE(void *) pvUserR3;
477 /** Pointer to R0 callback function. */
478 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0;
479 /** User argument for R0 handlers. */
480 R0PTRTYPE(void *) pvUserR0;
481 /** Pointer to RC callback function. */
482 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC;
483 /** User argument for RC handlers. */
484 RCPTRTYPE(void *) pvUserRC;
485 /** Description / Name. For easing debugging. */
486 R3PTRTYPE(const char *) pszDesc;
487#ifdef VBOX_WITH_STATISTICS
488 /** Profiling of this handler. */
489 STAMPROFILE Stat;
490#endif
491} PGMPHYSHANDLER;
492/** Pointer to a physical page access handler structure. */
493typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
494
495
496/**
497 * Cache node for the physical addresses covered by a virtual handler.
498 */
499typedef struct PGMPHYS2VIRTHANDLER
500{
501 /** Core node for the tree based on physical ranges. */
502 AVLROGCPHYSNODECORE Core;
503 /** Offset from this struct to the PGMVIRTHANDLER structure. */
504 int32_t offVirtHandler;
505 /** Offset of the next alias relative to this one.
506 * Bit 0 is used for indicating whether we're in the tree.
507 * Bit 1 is used for indicating that we're the head node.
508 */
509 int32_t offNextAlias;
510} PGMPHYS2VIRTHANDLER;
511/** Pointer to a phys to virtual handler structure. */
512typedef PGMPHYS2VIRTHANDLER *PPGMPHYS2VIRTHANDLER;
513
514/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
515 * node is in the tree. */
516#define PGMPHYS2VIRTHANDLER_IN_TREE RT_BIT(0)
517/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
518 * node is in the head of an alias chain.
519 * The PGMPHYS2VIRTHANDLER_IN_TREE is always set if this bit is set. */
520#define PGMPHYS2VIRTHANDLER_IS_HEAD RT_BIT(1)
521/** The mask to apply to PGMPHYS2VIRTHANDLER::offNextAlias to get the offset. */
522#define PGMPHYS2VIRTHANDLER_OFF_MASK (~(int32_t)3)
523
524
525/**
526 * Virtual page access handler structure.
527 *
528 * This is used to keep track of virtual address ranges
529 * which are being monitored in some kind of way.
530 */
531typedef struct PGMVIRTHANDLER
532{
533 /** Core node for the tree based on virtual ranges. */
534 AVLROGCPTRNODECORE Core;
535 /** Size of the range (in bytes). */
536 RTGCPTR cb;
537 /** Number of cache pages. */
538 uint32_t cPages;
539 /** Access type. */
540 PGMVIRTHANDLERTYPE enmType;
541 /** Pointer to the RC callback function. */
542 RCPTRTYPE(PFNPGMRCVIRTHANDLER) pfnHandlerRC;
543#if HC_ARCH_BITS == 64
544 RTRCPTR padding;
545#endif
546 /** Pointer to the R3 callback function for invalidation. */
547 R3PTRTYPE(PFNPGMR3VIRTINVALIDATE) pfnInvalidateR3;
548 /** Pointer to the R3 callback function. */
549 R3PTRTYPE(PFNPGMR3VIRTHANDLER) pfnHandlerR3;
550 /** Description / Name. For easing debugging. */
551 R3PTRTYPE(const char *) pszDesc;
552#ifdef VBOX_WITH_STATISTICS
553 /** Profiling of this handler. */
554 STAMPROFILE Stat;
555#endif
556 /** Array of cached physical addresses for the monitored ranged. */
557 PGMPHYS2VIRTHANDLER aPhysToVirt[HC_ARCH_BITS == 32 ? 1 : 2];
558} PGMVIRTHANDLER;
559/** Pointer to a virtual page access handler structure. */
560typedef PGMVIRTHANDLER *PPGMVIRTHANDLER;
561
562
563/**
564 * Page type.
565 *
566 * @remarks This enum has to fit in a 3-bit field (see PGMPAGE::u3Type).
567 * @remarks This is used in the saved state, so changes to it requires bumping
568 * the saved state version.
569 * @todo So, convert to \#defines!
570 */
571typedef enum PGMPAGETYPE
572{
573 /** The usual invalid zero entry. */
574 PGMPAGETYPE_INVALID = 0,
575 /** RAM page. (RWX) */
576 PGMPAGETYPE_RAM,
577 /** MMIO2 page. (RWX) */
578 PGMPAGETYPE_MMIO2,
579 /** MMIO2 page aliased over an MMIO page. (RWX)
580 * See PGMHandlerPhysicalPageAlias(). */
581 PGMPAGETYPE_MMIO2_ALIAS_MMIO,
582 /** Shadowed ROM. (RWX) */
583 PGMPAGETYPE_ROM_SHADOW,
584 /** ROM page. (R-X) */
585 PGMPAGETYPE_ROM,
586 /** MMIO page. (---) */
587 PGMPAGETYPE_MMIO,
588 /** End of valid entries. */
589 PGMPAGETYPE_END
590} PGMPAGETYPE;
591AssertCompile(PGMPAGETYPE_END <= 7);
592
593/** @name Page type predicates.
594 * @{ */
595#define PGMPAGETYPE_IS_READABLE(type) ( (type) <= PGMPAGETYPE_ROM )
596#define PGMPAGETYPE_IS_WRITEABLE(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
597#define PGMPAGETYPE_IS_RWX(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
598#define PGMPAGETYPE_IS_ROX(type) ( (type) == PGMPAGETYPE_ROM )
599#define PGMPAGETYPE_IS_NP(type) ( (type) == PGMPAGETYPE_MMIO )
600/** @} */
601
602
603/**
604 * A Physical Guest Page tracking structure.
605 *
606 * The format of this structure is complicated because we have to fit a lot
607 * of information into as few bits as possible. The format is also subject
608 * to change (there is one comming up soon). Which means that for we'll be
609 * using PGM_PAGE_GET_*, PGM_PAGE_IS_ and PGM_PAGE_SET_* macros for *all*
610 * accesses to the structure.
611 */
612typedef struct PGMPAGE
613{
614 /** The physical address and the Page ID. */
615 RTHCPHYS HCPhysAndPageID;
616 /** Combination of:
617 * - [0-7]: u2HandlerPhysStateY - the physical handler state
618 * (PGM_PAGE_HNDL_PHYS_STATE_*).
619 * - [8-9]: u2HandlerVirtStateY - the virtual handler state
620 * (PGM_PAGE_HNDL_VIRT_STATE_*).
621 * - [13-14]: u2PDEType - paging structure needed to map the page (PGM_PAGE_PDE_TYPE_*)
622 * - [15]: fWrittenToY - flag indicating that a write monitored page was
623 * written to when set.
624 * - [10-13]: 4 unused bits.
625 * @remarks Warning! All accesses to the bits are hardcoded.
626 *
627 * @todo Change this to a union with both bitfields, u8 and u accessors.
628 * That'll help deal with some of the hardcoded accesses.
629 *
630 * @todo Include uStateY and uTypeY as well so it becomes 32-bit. This
631 * will make it possible to turn some of the 16-bit accesses into
632 * 32-bit ones, which may be efficient (stalls).
633 */
634 RTUINT16U u16MiscY;
635 /** The page state.
636 * Only 3 bits are really needed for this. */
637 uint16_t uStateY : 3;
638 /** The page type (PGMPAGETYPE).
639 * Only 3 bits are really needed for this. */
640 uint16_t uTypeY : 3;
641 /** PTE index for usage tracking (page pool). */
642 uint16_t uPteIdx : 10;
643 /** Usage tracking (page pool). */
644 uint16_t u16TrackingY;
645 /** The number of read locks on this page. */
646 uint8_t cReadLocksY;
647 /** The number of write locks on this page. */
648 uint8_t cWriteLocksY;
649} PGMPAGE;
650AssertCompileSize(PGMPAGE, 16);
651/** Pointer to a physical guest page. */
652typedef PGMPAGE *PPGMPAGE;
653/** Pointer to a const physical guest page. */
654typedef const PGMPAGE *PCPGMPAGE;
655/** Pointer to a physical guest page pointer. */
656typedef PPGMPAGE *PPPGMPAGE;
657
658
659/**
660 * Clears the page structure.
661 * @param pPage Pointer to the physical guest page tracking structure.
662 */
663#define PGM_PAGE_CLEAR(pPage) \
664 do { \
665 (pPage)->HCPhysAndPageID = 0; \
666 (pPage)->uStateY = 0; \
667 (pPage)->uTypeY = 0; \
668 (pPage)->uPteIdx = 0; \
669 (pPage)->u16MiscY.u = 0; \
670 (pPage)->u16TrackingY = 0; \
671 (pPage)->cReadLocksY = 0; \
672 (pPage)->cWriteLocksY = 0; \
673 } while (0)
674
675/**
676 * Initializes the page structure.
677 * @param pPage Pointer to the physical guest page tracking structure.
678 */
679#define PGM_PAGE_INIT(pPage, _HCPhys, _idPage, _uType, _uState) \
680 do { \
681 RTHCPHYS SetHCPhysTmp = (_HCPhys); \
682 AssertFatal(!(SetHCPhysTmp & ~UINT64_C(0x0000fffffffff000))); \
683 (pPage)->HCPhysAndPageID = (SetHCPhysTmp << (28-12)) | ((_idPage) & UINT32_C(0x0fffffff)); \
684 (pPage)->uStateY = (_uState); \
685 (pPage)->uTypeY = (_uType); \
686 (pPage)->uPteIdx = 0; \
687 (pPage)->u16MiscY.u = 0; \
688 (pPage)->u16TrackingY = 0; \
689 (pPage)->cReadLocksY = 0; \
690 (pPage)->cWriteLocksY = 0; \
691 } while (0)
692
693/**
694 * Initializes the page structure of a ZERO page.
695 * @param pPage Pointer to the physical guest page tracking structure.
696 * @param pVM The VM handle (for getting the zero page address).
697 * @param uType The page type (PGMPAGETYPE).
698 */
699#define PGM_PAGE_INIT_ZERO(pPage, pVM, uType) \
700 PGM_PAGE_INIT((pPage), (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (uType), PGM_PAGE_STATE_ZERO)
701
702
703/** @name The Page state, PGMPAGE::uStateY.
704 * @{ */
705/** The zero page.
706 * This is a per-VM page that's never ever mapped writable. */
707#define PGM_PAGE_STATE_ZERO 0
708/** A allocated page.
709 * This is a per-VM page allocated from the page pool (or wherever
710 * we get MMIO2 pages from if the type is MMIO2).
711 */
712#define PGM_PAGE_STATE_ALLOCATED 1
713/** A allocated page that's being monitored for writes.
714 * The shadow page table mappings are read-only. When a write occurs, the
715 * fWrittenTo member is set, the page remapped as read-write and the state
716 * moved back to allocated. */
717#define PGM_PAGE_STATE_WRITE_MONITORED 2
718/** The page is shared, aka. copy-on-write.
719 * This is a page that's shared with other VMs. */
720#define PGM_PAGE_STATE_SHARED 3
721/** The page is ballooned, so no longer available for this VM. */
722#define PGM_PAGE_STATE_BALLOONED 4
723/** @} */
724
725
726/**
727 * Gets the page state.
728 * @returns page state (PGM_PAGE_STATE_*).
729 * @param pPage Pointer to the physical guest page tracking structure.
730 */
731#define PGM_PAGE_GET_STATE(pPage) ( (pPage)->uStateY )
732
733/**
734 * Sets the page state.
735 * @param pPage Pointer to the physical guest page tracking structure.
736 * @param _uState The new page state.
737 */
738#define PGM_PAGE_SET_STATE(pPage, _uState) do { (pPage)->uStateY = (_uState); } while (0)
739
740
741/**
742 * Gets the host physical address of the guest page.
743 * @returns host physical address (RTHCPHYS).
744 * @param pPage Pointer to the physical guest page tracking structure.
745 */
746#define PGM_PAGE_GET_HCPHYS(pPage) ( ((pPage)->HCPhysAndPageID >> 28) << 12 )
747
748/**
749 * Sets the host physical address of the guest page.
750 * @param pPage Pointer to the physical guest page tracking structure.
751 * @param _HCPhys The new host physical address.
752 */
753#define PGM_PAGE_SET_HCPHYS(pPage, _HCPhys) \
754 do { \
755 RTHCPHYS SetHCPhysTmp = (_HCPhys); \
756 AssertFatal(!(SetHCPhysTmp & ~UINT64_C(0x0000fffffffff000))); \
757 (pPage)->HCPhysAndPageID = ((pPage)->HCPhysAndPageID & UINT32_C(0x0fffffff)) \
758 | (SetHCPhysTmp << (28-12)); \
759 } while (0)
760
761/**
762 * Get the Page ID.
763 * @returns The Page ID; NIL_GMM_PAGEID if it's a ZERO page.
764 * @param pPage Pointer to the physical guest page tracking structure.
765 */
766#define PGM_PAGE_GET_PAGEID(pPage) ( (uint32_t)((pPage)->HCPhysAndPageID & UINT32_C(0x0fffffff)) )
767
768/**
769 * Sets the Page ID.
770 * @param pPage Pointer to the physical guest page tracking structure.
771 */
772#define PGM_PAGE_SET_PAGEID(pPage, _idPage) \
773 do { \
774 (pPage)->HCPhysAndPageID = (((pPage)->HCPhysAndPageID) & UINT64_C(0xfffffffff0000000)) \
775 | ((_idPage) & UINT32_C(0x0fffffff)); \
776 } while (0)
777
778/**
779 * Get the Chunk ID.
780 * @returns The Chunk ID; NIL_GMM_CHUNKID if it's a ZERO page.
781 * @param pPage Pointer to the physical guest page tracking structure.
782 */
783#define PGM_PAGE_GET_CHUNKID(pPage) ( PGM_PAGE_GET_PAGEID(pPage) >> GMM_CHUNKID_SHIFT )
784
785/**
786 * Get the index of the page within the allocation chunk.
787 * @returns The page index.
788 * @param pPage Pointer to the physical guest page tracking structure.
789 */
790#define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhysAndPageID & GMM_PAGEID_IDX_MASK) )
791
792/**
793 * Gets the page type.
794 * @returns The page type.
795 * @param pPage Pointer to the physical guest page tracking structure.
796 */
797#define PGM_PAGE_GET_TYPE(pPage) (pPage)->uTypeY
798
799/**
800 * Sets the page type.
801 * @param pPage Pointer to the physical guest page tracking structure.
802 * @param _enmType The new page type (PGMPAGETYPE).
803 */
804#define PGM_PAGE_SET_TYPE(pPage, _enmType) do { (pPage)->uTypeY = (_enmType); } while (0)
805
806/**
807 * Gets the page table index
808 * @returns The page table index.
809 * @param pPage Pointer to the physical guest page tracking structure.
810 */
811#define PGM_PAGE_GET_PTE_INDEX(pPage) (pPage)->uPteIdx
812
813/**
814 * Sets the page table index
815 * @param pPage Pointer to the physical guest page tracking structure.
816 * @param iPte New page table index.
817 */
818#define PGM_PAGE_SET_PTE_INDEX(pPage, _iPte) do { (pPage)->uPteIdx = (_iPte); } while (0)
819
820/**
821 * Checks if the page is marked for MMIO.
822 * @returns true/false.
823 * @param pPage Pointer to the physical guest page tracking structure.
824 */
825#define PGM_PAGE_IS_MMIO(pPage) ( (pPage)->uTypeY == PGMPAGETYPE_MMIO )
826
827/**
828 * Checks if the page is backed by the ZERO page.
829 * @returns true/false.
830 * @param pPage Pointer to the physical guest page tracking structure.
831 */
832#define PGM_PAGE_IS_ZERO(pPage) ( (pPage)->uStateY == PGM_PAGE_STATE_ZERO )
833
834/**
835 * Checks if the page is backed by a SHARED page.
836 * @returns true/false.
837 * @param pPage Pointer to the physical guest page tracking structure.
838 */
839#define PGM_PAGE_IS_SHARED(pPage) ( (pPage)->uStateY == PGM_PAGE_STATE_SHARED )
840
841/**
842 * Checks if the page is ballooned.
843 * @returns true/false.
844 * @param pPage Pointer to the physical guest page tracking structure.
845 */
846#define PGM_PAGE_IS_BALLOONED(pPage) ( (pPage)->uStateY == PGM_PAGE_STATE_BALLOONED )
847
848/**
849 * Marks the page as written to (for GMM change monitoring).
850 * @param pPage Pointer to the physical guest page tracking structure.
851 */
852#define PGM_PAGE_SET_WRITTEN_TO(pPage) do { (pPage)->u16MiscY.au8[1] |= UINT8_C(0x80); } while (0)
853
854/**
855 * Clears the written-to indicator.
856 * @param pPage Pointer to the physical guest page tracking structure.
857 */
858#define PGM_PAGE_CLEAR_WRITTEN_TO(pPage) do { (pPage)->u16MiscY.au8[1] &= UINT8_C(0x7f); } while (0)
859
860/**
861 * Checks if the page was marked as written-to.
862 * @returns true/false.
863 * @param pPage Pointer to the physical guest page tracking structure.
864 */
865#define PGM_PAGE_IS_WRITTEN_TO(pPage) ( !!((pPage)->u16MiscY.au8[1] & UINT8_C(0x80)) )
866
867/** @name PT usage values (PGMPAGE::u2PDEType).
868 *
869 * @{ */
870/** Either as a PT or PDE. */
871#define PGM_PAGE_PDE_TYPE_DONTCARE 0
872/** Must use a page table to map the range. */
873#define PGM_PAGE_PDE_TYPE_PT 1
874/** Can use a page directory entry to map the continous range. */
875#define PGM_PAGE_PDE_TYPE_PDE 2
876/** Can use a page directory entry to map the continous range - temporarily disabled (by page monitoring). */
877#define PGM_PAGE_PDE_TYPE_PDE_DISABLED 3
878/** @} */
879
880/**
881 * Set the PDE type of the page
882 * @param pPage Pointer to the physical guest page tracking structure.
883 * @param uType PGM_PAGE_PDE_TYPE_*
884 */
885#define PGM_PAGE_SET_PDE_TYPE(pPage, uType) \
886 do { \
887 (pPage)->u16MiscY.au8[1] = ((pPage)->u16MiscY.au8[1] & UINT8_C(0x9f)) \
888 | (((uType) & UINT8_C(0x03)) << 5); \
889 } while (0)
890
891/**
892 * Checks if the page was marked being part of a large page
893 * @returns true/false.
894 * @param pPage Pointer to the physical guest page tracking structure.
895 */
896#define PGM_PAGE_GET_PDE_TYPE(pPage) ( ((pPage)->u16MiscY.au8[1] & UINT8_C(0x60)) >> 5)
897
898/** Enabled optimized access handler tests.
899 * These optimizations makes ASSUMPTIONS about the state values and the u16MiscY
900 * layout. When enabled, the compiler should normally generate more compact
901 * code.
902 */
903#define PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS 1
904
905/** @name Physical Access Handler State values (PGMPAGE::u2HandlerPhysStateY).
906 *
907 * @remarks The values are assigned in order of priority, so we can calculate
908 * the correct state for a page with different handlers installed.
909 * @{ */
910/** No handler installed. */
911#define PGM_PAGE_HNDL_PHYS_STATE_NONE 0
912/** Monitoring is temporarily disabled. */
913#define PGM_PAGE_HNDL_PHYS_STATE_DISABLED 1
914/** Write access is monitored. */
915#define PGM_PAGE_HNDL_PHYS_STATE_WRITE 2
916/** All access is monitored. */
917#define PGM_PAGE_HNDL_PHYS_STATE_ALL 3
918/** @} */
919
920/**
921 * Gets the physical access handler state of a page.
922 * @returns PGM_PAGE_HNDL_PHYS_STATE_* value.
923 * @param pPage Pointer to the physical guest page tracking structure.
924 */
925#define PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) \
926 ( (pPage)->u16MiscY.au8[0] )
927
928/**
929 * Sets the physical access handler state of a page.
930 * @param pPage Pointer to the physical guest page tracking structure.
931 * @param _uState The new state value.
932 */
933#define PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, _uState) \
934 do { (pPage)->u16MiscY.au8[0] = (_uState); } while (0)
935
936/**
937 * Checks if the page has any physical access handlers, including temporariliy disabled ones.
938 * @returns true/false
939 * @param pPage Pointer to the physical guest page tracking structure.
940 */
941#define PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) \
942 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE )
943
944/**
945 * Checks if the page has any active physical access handlers.
946 * @returns true/false
947 * @param pPage Pointer to the physical guest page tracking structure.
948 */
949#define PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage) \
950 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE )
951
952
953/** @name Virtual Access Handler State values (PGMPAGE::u2HandlerVirtStateY).
954 *
955 * @remarks The values are assigned in order of priority, so we can calculate
956 * the correct state for a page with different handlers installed.
957 * @{ */
958/** No handler installed. */
959#define PGM_PAGE_HNDL_VIRT_STATE_NONE 0
960/* 1 is reserved so the lineup is identical with the physical ones. */
961/** Write access is monitored. */
962#define PGM_PAGE_HNDL_VIRT_STATE_WRITE 2
963/** All access is monitored. */
964#define PGM_PAGE_HNDL_VIRT_STATE_ALL 3
965/** @} */
966
967/**
968 * Gets the virtual access handler state of a page.
969 * @returns PGM_PAGE_HNDL_VIRT_STATE_* value.
970 * @param pPage Pointer to the physical guest page tracking structure.
971 */
972#define PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) ((uint8_t)( (pPage)->u16MiscY.au8[1] & UINT8_C(0x03) ))
973
974/**
975 * Sets the virtual access handler state of a page.
976 * @param pPage Pointer to the physical guest page tracking structure.
977 * @param _uState The new state value.
978 */
979#define PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, _uState) \
980 do { \
981 (pPage)->u16MiscY.au8[1] = ((pPage)->u16MiscY.au8[1] & UINT8_C(0xfc)) \
982 | ((_uState) & UINT8_C(0x03)); \
983 } while (0)
984
985/**
986 * Checks if the page has any virtual access handlers.
987 * @returns true/false
988 * @param pPage Pointer to the physical guest page tracking structure.
989 */
990#define PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) \
991 ( PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) != PGM_PAGE_HNDL_VIRT_STATE_NONE )
992
993/**
994 * Same as PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS - can't disable pages in
995 * virtual handlers.
996 * @returns true/false
997 * @param pPage Pointer to the physical guest page tracking structure.
998 */
999#define PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage) \
1000 PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage)
1001
1002
1003/**
1004 * Checks if the page has any access handlers, including temporarily disabled ones.
1005 * @returns true/false
1006 * @param pPage Pointer to the physical guest page tracking structure.
1007 */
1008#ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
1009# define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
1010 ( ((pPage)->u16MiscY.u & UINT16_C(0x0303)) != 0 )
1011#else
1012# define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
1013 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE \
1014 || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) != PGM_PAGE_HNDL_VIRT_STATE_NONE )
1015#endif
1016
1017/**
1018 * Checks if the page has any active access handlers.
1019 * @returns true/false
1020 * @param pPage Pointer to the physical guest page tracking structure.
1021 */
1022#ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
1023# define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
1024 ( ((pPage)->u16MiscY.u & UINT16_C(0x0202)) != 0 )
1025#else
1026# define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
1027 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE \
1028 || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) >= PGM_PAGE_HNDL_VIRT_STATE_WRITE )
1029#endif
1030
1031/**
1032 * Checks if the page has any active access handlers catching all accesses.
1033 * @returns true/false
1034 * @param pPage Pointer to the physical guest page tracking structure.
1035 */
1036#ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
1037# define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
1038 ( ( ((pPage)->u16MiscY.au8[0] | (pPage)->u16MiscY.au8[1]) & UINT8_C(0x3) ) \
1039 == PGM_PAGE_HNDL_PHYS_STATE_ALL )
1040#else
1041# define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
1042 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL \
1043 || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL )
1044#endif
1045
1046
1047/** @def PGM_PAGE_GET_TRACKING
1048 * Gets the packed shadow page pool tracking data associated with a guest page.
1049 * @returns uint16_t containing the data.
1050 * @param pPage Pointer to the physical guest page tracking structure.
1051 */
1052#define PGM_PAGE_GET_TRACKING(pPage) ( (pPage)->u16TrackingY )
1053
1054/** @def PGM_PAGE_SET_TRACKING
1055 * Sets the packed shadow page pool tracking data associated with a guest page.
1056 * @param pPage Pointer to the physical guest page tracking structure.
1057 * @param u16TrackingData The tracking data to store.
1058 */
1059#define PGM_PAGE_SET_TRACKING(pPage, u16TrackingData) \
1060 do { (pPage)->u16TrackingY = (u16TrackingData); } while (0)
1061
1062/** @def PGM_PAGE_GET_TD_CREFS
1063 * Gets the @a cRefs tracking data member.
1064 * @returns cRefs.
1065 * @param pPage Pointer to the physical guest page tracking structure.
1066 */
1067#define PGM_PAGE_GET_TD_CREFS(pPage) \
1068 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK)
1069
1070/** @def PGM_PAGE_GET_TD_IDX
1071 * Gets the @a idx tracking data member.
1072 * @returns idx.
1073 * @param pPage Pointer to the physical guest page tracking structure.
1074 */
1075#define PGM_PAGE_GET_TD_IDX(pPage) \
1076 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK)
1077
1078
1079/** Max number of locks on a page. */
1080#define PGM_PAGE_MAX_LOCKS UINT8_C(254)
1081
1082/** Get the read lock count.
1083 * @returns count.
1084 * @param pPage Pointer to the physical guest page tracking structure.
1085 */
1086#define PGM_PAGE_GET_READ_LOCKS(pPage) ( (pPage)->cReadLocksY )
1087
1088/** Get the write lock count.
1089 * @returns count.
1090 * @param pPage Pointer to the physical guest page tracking structure.
1091 */
1092#define PGM_PAGE_GET_WRITE_LOCKS(pPage) ( (pPage)->cWriteLocksY )
1093
1094/** Decrement the read lock counter.
1095 * @param pPage Pointer to the physical guest page tracking structure.
1096 */
1097#define PGM_PAGE_DEC_READ_LOCKS(pPage) do { --(pPage)->cReadLocksY; } while (0)
1098
1099/** Decrement the write lock counter.
1100 * @param pPage Pointer to the physical guest page tracking structure.
1101 */
1102#define PGM_PAGE_DEC_WRITE_LOCKS(pPage) do { --(pPage)->cWriteLocksY; } while (0)
1103
1104/** Increment the read lock counter.
1105 * @param pPage Pointer to the physical guest page tracking structure.
1106 */
1107#define PGM_PAGE_INC_READ_LOCKS(pPage) do { ++(pPage)->cReadLocksY; } while (0)
1108
1109/** Increment the write lock counter.
1110 * @param pPage Pointer to the physical guest page tracking structure.
1111 */
1112#define PGM_PAGE_INC_WRITE_LOCKS(pPage) do { ++(pPage)->cWriteLocksY; } while (0)
1113
1114
1115#if 0
1116/** Enables sanity checking of write monitoring using CRC-32. */
1117# define PGMLIVESAVERAMPAGE_WITH_CRC32
1118#endif
1119
1120/**
1121 * Per page live save tracking data.
1122 */
1123typedef struct PGMLIVESAVERAMPAGE
1124{
1125 /** Number of times it has been dirtied. */
1126 uint32_t cDirtied : 24;
1127 /** Whether it is currently dirty. */
1128 uint32_t fDirty : 1;
1129 /** Ignore the page.
1130 * This is used for pages that has been MMIO, MMIO2 or ROM pages once. We will
1131 * deal with these after pausing the VM and DevPCI have said it bit about
1132 * remappings. */
1133 uint32_t fIgnore : 1;
1134 /** Was a ZERO page last time around. */
1135 uint32_t fZero : 1;
1136 /** Was a SHARED page last time around. */
1137 uint32_t fShared : 1;
1138 /** Whether the page is/was write monitored in a previous pass. */
1139 uint32_t fWriteMonitored : 1;
1140 /** Whether the page is/was write monitored earlier in this pass. */
1141 uint32_t fWriteMonitoredJustNow : 1;
1142 /** Bits reserved for future use. */
1143 uint32_t u2Reserved : 2;
1144#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1145 /** CRC-32 for the page. This is for internal consistency checks. */
1146 uint32_t u32Crc;
1147#endif
1148} PGMLIVESAVERAMPAGE;
1149#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1150AssertCompileSize(PGMLIVESAVERAMPAGE, 8);
1151#else
1152AssertCompileSize(PGMLIVESAVERAMPAGE, 4);
1153#endif
1154/** Pointer to the per page live save tracking data. */
1155typedef PGMLIVESAVERAMPAGE *PPGMLIVESAVERAMPAGE;
1156
1157/** The max value of PGMLIVESAVERAMPAGE::cDirtied. */
1158#define PGMLIVSAVEPAGE_MAX_DIRTIED 0x00fffff0
1159
1160
1161/**
1162 * Ram range for GC Phys to HC Phys conversion.
1163 *
1164 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
1165 * conversions too, but we'll let MM handle that for now.
1166 *
1167 * This structure is used by linked lists in both GC and HC.
1168 */
1169typedef struct PGMRAMRANGE
1170{
1171 /** Start of the range. Page aligned. */
1172 RTGCPHYS GCPhys;
1173 /** Size of the range. (Page aligned of course). */
1174 RTGCPHYS cb;
1175 /** Pointer to the next RAM range - for R3. */
1176 R3PTRTYPE(struct PGMRAMRANGE *) pNextR3;
1177 /** Pointer to the next RAM range - for R0. */
1178 R0PTRTYPE(struct PGMRAMRANGE *) pNextR0;
1179 /** Pointer to the next RAM range - for RC. */
1180 RCPTRTYPE(struct PGMRAMRANGE *) pNextRC;
1181 /** PGM_RAM_RANGE_FLAGS_* flags. */
1182 uint32_t fFlags;
1183 /** Last address in the range (inclusive). Page aligned (-1). */
1184 RTGCPHYS GCPhysLast;
1185 /** Start of the HC mapping of the range. This is only used for MMIO2. */
1186 R3PTRTYPE(void *) pvR3;
1187 /** Live save per page tracking data. */
1188 R3PTRTYPE(PPGMLIVESAVERAMPAGE) paLSPages;
1189 /** The range description. */
1190 R3PTRTYPE(const char *) pszDesc;
1191 /** Pointer to self - R0 pointer. */
1192 R0PTRTYPE(struct PGMRAMRANGE *) pSelfR0;
1193 /** Pointer to self - RC pointer. */
1194 RCPTRTYPE(struct PGMRAMRANGE *) pSelfRC;
1195 /** Padding to make aPage aligned on sizeof(PGMPAGE). */
1196 uint32_t au32Alignment2[HC_ARCH_BITS == 32 ? 1 : 3];
1197 /** Array of physical guest page tracking structures. */
1198 PGMPAGE aPages[1];
1199} PGMRAMRANGE;
1200/** Pointer to Ram range for GC Phys to HC Phys conversion. */
1201typedef PGMRAMRANGE *PPGMRAMRANGE;
1202
1203/** @name PGMRAMRANGE::fFlags
1204 * @{ */
1205/** The RAM range is floating around as an independent guest mapping. */
1206#define PGM_RAM_RANGE_FLAGS_FLOATING RT_BIT(20)
1207/** Ad hoc RAM range for an ROM mapping. */
1208#define PGM_RAM_RANGE_FLAGS_AD_HOC_ROM RT_BIT(21)
1209/** Ad hoc RAM range for an MMIO mapping. */
1210#define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO RT_BIT(22)
1211/** Ad hoc RAM range for an MMIO2 mapping. */
1212#define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2 RT_BIT(23)
1213/** @} */
1214
1215/** Tests if a RAM range is an ad hoc one or not.
1216 * @returns true/false.
1217 * @param pRam The RAM range.
1218 */
1219#define PGM_RAM_RANGE_IS_AD_HOC(pRam) \
1220 (!!( (pRam)->fFlags & (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2) ) )
1221
1222
1223/**
1224 * Per page tracking structure for ROM image.
1225 *
1226 * A ROM image may have a shadow page, in which case we may have two pages
1227 * backing it. This structure contains the PGMPAGE for both while
1228 * PGMRAMRANGE have a copy of the active one. It is important that these
1229 * aren't out of sync in any regard other than page pool tracking data.
1230 */
1231typedef struct PGMROMPAGE
1232{
1233 /** The page structure for the virgin ROM page. */
1234 PGMPAGE Virgin;
1235 /** The page structure for the shadow RAM page. */
1236 PGMPAGE Shadow;
1237 /** The current protection setting. */
1238 PGMROMPROT enmProt;
1239 /** Live save status information. Makes use of unused alignment space. */
1240 struct
1241 {
1242 /** The previous protection value. */
1243 uint8_t u8Prot;
1244 /** Written to flag set by the handler. */
1245 bool fWrittenTo;
1246 /** Whether the shadow page is dirty or not. */
1247 bool fDirty;
1248 /** Whether it was dirtied in the recently. */
1249 bool fDirtiedRecently;
1250 } LiveSave;
1251} PGMROMPAGE;
1252AssertCompileSizeAlignment(PGMROMPAGE, 8);
1253/** Pointer to a ROM page tracking structure. */
1254typedef PGMROMPAGE *PPGMROMPAGE;
1255
1256
1257/**
1258 * A registered ROM image.
1259 *
1260 * This is needed to keep track of ROM image since they generally intrude
1261 * into a PGMRAMRANGE. It also keeps track of additional info like the
1262 * two page sets (read-only virgin and read-write shadow), the current
1263 * state of each page.
1264 *
1265 * Because access handlers cannot easily be executed in a different
1266 * context, the ROM ranges needs to be accessible and in all contexts.
1267 */
1268typedef struct PGMROMRANGE
1269{
1270 /** Pointer to the next range - R3. */
1271 R3PTRTYPE(struct PGMROMRANGE *) pNextR3;
1272 /** Pointer to the next range - R0. */
1273 R0PTRTYPE(struct PGMROMRANGE *) pNextR0;
1274 /** Pointer to the next range - RC. */
1275 RCPTRTYPE(struct PGMROMRANGE *) pNextRC;
1276 /** Pointer alignment */
1277 RTRCPTR RCPtrAlignment;
1278 /** Address of the range. */
1279 RTGCPHYS GCPhys;
1280 /** Address of the last byte in the range. */
1281 RTGCPHYS GCPhysLast;
1282 /** Size of the range. */
1283 RTGCPHYS cb;
1284 /** The flags (PGMPHYS_ROM_FLAGS_*). */
1285 uint32_t fFlags;
1286 /** The saved state range ID. */
1287 uint8_t idSavedState;
1288 /** Alignment padding. */
1289 uint8_t au8Alignment[3];
1290 /** Alignment padding ensuring that aPages is sizeof(PGMROMPAGE) aligned. */
1291 uint32_t au32Alignemnt[HC_ARCH_BITS == 32 ? 6 : 2];
1292 /** Pointer to the original bits when PGMPHYS_ROM_FLAGS_PERMANENT_BINARY was specified.
1293 * This is used for strictness checks. */
1294 R3PTRTYPE(const void *) pvOriginal;
1295 /** The ROM description. */
1296 R3PTRTYPE(const char *) pszDesc;
1297 /** The per page tracking structures. */
1298 PGMROMPAGE aPages[1];
1299} PGMROMRANGE;
1300/** Pointer to a ROM range. */
1301typedef PGMROMRANGE *PPGMROMRANGE;
1302
1303
1304/**
1305 * Live save per page data for an MMIO2 page.
1306 *
1307 * Not using PGMLIVESAVERAMPAGE here because we cannot use normal write monitoring
1308 * of MMIO2 pages. The current approach is using some optimisitic SHA-1 +
1309 * CRC-32 for detecting changes as well as special handling of zero pages. This
1310 * is a TEMPORARY measure which isn't perfect, but hopefully it is good enough
1311 * for speeding things up. (We're using SHA-1 and not SHA-256 or SHA-512
1312 * because of speed (2.5x and 6x slower).)
1313 *
1314 * @todo Implement dirty MMIO2 page reporting that can be enabled during live
1315 * save but normally is disabled. Since we can write monitore guest
1316 * accesses on our own, we only need this for host accesses. Shouldn't be
1317 * too difficult for DevVGA, VMMDev might be doable, the planned
1318 * networking fun will be fun since it involves ring-0.
1319 */
1320typedef struct PGMLIVESAVEMMIO2PAGE
1321{
1322 /** Set if the page is considered dirty. */
1323 bool fDirty;
1324 /** The number of scans this page has remained unchanged for.
1325 * Only updated for dirty pages. */
1326 uint8_t cUnchangedScans;
1327 /** Whether this page was zero at the last scan. */
1328 bool fZero;
1329 /** Alignment padding. */
1330 bool fReserved;
1331 /** CRC-32 for the first half of the page.
1332 * This is used together with u32CrcH2 to quickly detect changes in the page
1333 * during the non-final passes. */
1334 uint32_t u32CrcH1;
1335 /** CRC-32 for the second half of the page. */
1336 uint32_t u32CrcH2;
1337 /** SHA-1 for the saved page.
1338 * This is used in the final pass to skip pages without changes. */
1339 uint8_t abSha1Saved[RTSHA1_HASH_SIZE];
1340} PGMLIVESAVEMMIO2PAGE;
1341/** Pointer to a live save status data for an MMIO2 page. */
1342typedef PGMLIVESAVEMMIO2PAGE *PPGMLIVESAVEMMIO2PAGE;
1343
1344/**
1345 * A registered MMIO2 (= Device RAM) range.
1346 *
1347 * There are a few reason why we need to keep track of these
1348 * registrations. One of them is the deregistration & cleanup stuff,
1349 * while another is that the PGMRAMRANGE associated with such a region may
1350 * have to be removed from the ram range list.
1351 *
1352 * Overlapping with a RAM range has to be 100% or none at all. The pages
1353 * in the existing RAM range must not be ROM nor MMIO. A guru meditation
1354 * will be raised if a partial overlap or an overlap of ROM pages is
1355 * encountered. On an overlap we will free all the existing RAM pages and
1356 * put in the ram range pages instead.
1357 */
1358typedef struct PGMMMIO2RANGE
1359{
1360 /** The owner of the range. (a device) */
1361 PPDMDEVINSR3 pDevInsR3;
1362 /** Pointer to the ring-3 mapping of the allocation. */
1363 RTR3PTR pvR3;
1364 /** Pointer to the next range - R3. */
1365 R3PTRTYPE(struct PGMMMIO2RANGE *) pNextR3;
1366 /** Whether it's mapped or not. */
1367 bool fMapped;
1368 /** Whether it's overlapping or not. */
1369 bool fOverlapping;
1370 /** The PCI region number.
1371 * @remarks This ASSUMES that nobody will ever really need to have multiple
1372 * PCI devices with matching MMIO region numbers on a single device. */
1373 uint8_t iRegion;
1374 /** The saved state range ID. */
1375 uint8_t idSavedState;
1376 /** Alignment padding for putting the ram range on a PGMPAGE alignment boundrary. */
1377 uint8_t abAlignemnt[HC_ARCH_BITS == 32 ? 12 : 12];
1378 /** Live save per page tracking data. */
1379 R3PTRTYPE(PPGMLIVESAVEMMIO2PAGE) paLSPages;
1380 /** The associated RAM range. */
1381 PGMRAMRANGE RamRange;
1382} PGMMMIO2RANGE;
1383/** Pointer to a MMIO2 range. */
1384typedef PGMMMIO2RANGE *PPGMMMIO2RANGE;
1385
1386
1387
1388
1389/**
1390 * PGMPhysRead/Write cache entry
1391 */
1392typedef struct PGMPHYSCACHEENTRY
1393{
1394 /** R3 pointer to physical page. */
1395 R3PTRTYPE(uint8_t *) pbR3;
1396 /** GC Physical address for cache entry */
1397 RTGCPHYS GCPhys;
1398#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1399 RTGCPHYS u32Padding0; /**< alignment padding. */
1400#endif
1401} PGMPHYSCACHEENTRY;
1402
1403/**
1404 * PGMPhysRead/Write cache to reduce REM memory access overhead
1405 */
1406typedef struct PGMPHYSCACHE
1407{
1408 /** Bitmap of valid cache entries */
1409 uint64_t aEntries;
1410 /** Cache entries */
1411 PGMPHYSCACHEENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES];
1412} PGMPHYSCACHE;
1413
1414
1415/** Pointer to an allocation chunk ring-3 mapping. */
1416typedef struct PGMCHUNKR3MAP *PPGMCHUNKR3MAP;
1417/** Pointer to an allocation chunk ring-3 mapping pointer. */
1418typedef PPGMCHUNKR3MAP *PPPGMCHUNKR3MAP;
1419
1420/**
1421 * Ring-3 tracking structore for an allocation chunk ring-3 mapping.
1422 *
1423 * The primary tree (Core) uses the chunk id as key.
1424 * The secondary tree (AgeCore) is used for ageing and uses ageing sequence number as key.
1425 */
1426typedef struct PGMCHUNKR3MAP
1427{
1428 /** The key is the chunk id. */
1429 AVLU32NODECORE Core;
1430 /** The key is the ageing sequence number. */
1431 AVLLU32NODECORE AgeCore;
1432 /** The current age thingy. */
1433 uint32_t iAge;
1434 /** The current reference count. */
1435 uint32_t volatile cRefs;
1436 /** The current permanent reference count. */
1437 uint32_t volatile cPermRefs;
1438 /** The mapping address. */
1439 void *pv;
1440} PGMCHUNKR3MAP;
1441
1442/**
1443 * Allocation chunk ring-3 mapping TLB entry.
1444 */
1445typedef struct PGMCHUNKR3MAPTLBE
1446{
1447 /** The chunk id. */
1448 uint32_t volatile idChunk;
1449#if HC_ARCH_BITS == 64
1450 uint32_t u32Padding; /**< alignment padding. */
1451#endif
1452 /** The chunk map. */
1453#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1454 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1455#else
1456 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1457#endif
1458} PGMCHUNKR3MAPTLBE;
1459/** Pointer to the an allocation chunk ring-3 mapping TLB entry. */
1460typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE;
1461
1462/** The number of TLB entries in PGMCHUNKR3MAPTLB.
1463 * @remark Must be a power of two value. */
1464#define PGM_CHUNKR3MAPTLB_ENTRIES 64
1465
1466/**
1467 * Allocation chunk ring-3 mapping TLB.
1468 *
1469 * @remarks We use a TLB to speed up lookups by avoiding walking the AVL.
1470 * At first glance this might look kinda odd since AVL trees are
1471 * supposed to give the most optimial lookup times of all trees
1472 * due to their balancing. However, take a tree with 1023 nodes
1473 * in it, that's 10 levels, meaning that most searches has to go
1474 * down 9 levels before they find what they want. This isn't fast
1475 * compared to a TLB hit. There is the factor of cache misses,
1476 * and of course the problem with trees and branch prediction.
1477 * This is why we use TLBs in front of most of the trees.
1478 *
1479 * @todo Generalize this TLB + AVL stuff, shouldn't be all that
1480 * difficult when we switch to the new inlined AVL trees (from kStuff).
1481 */
1482typedef struct PGMCHUNKR3MAPTLB
1483{
1484 /** The TLB entries. */
1485 PGMCHUNKR3MAPTLBE aEntries[PGM_CHUNKR3MAPTLB_ENTRIES];
1486} PGMCHUNKR3MAPTLB;
1487
1488/**
1489 * Calculates the index of a guest page in the Ring-3 Chunk TLB.
1490 * @returns Chunk TLB index.
1491 * @param idChunk The Chunk ID.
1492 */
1493#define PGM_CHUNKR3MAPTLB_IDX(idChunk) ( (idChunk) & (PGM_CHUNKR3MAPTLB_ENTRIES - 1) )
1494
1495
1496/**
1497 * Ring-3 guest page mapping TLB entry.
1498 * @remarks used in ring-0 as well at the moment.
1499 */
1500typedef struct PGMPAGER3MAPTLBE
1501{
1502 /** Address of the page. */
1503 RTGCPHYS volatile GCPhys;
1504 /** The guest page. */
1505#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1506 R3PTRTYPE(PPGMPAGE) volatile pPage;
1507#else
1508 R3R0PTRTYPE(PPGMPAGE) volatile pPage;
1509#endif
1510 /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */
1511#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1512 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1513#else
1514 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1515#endif
1516 /** The address */
1517#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1518 R3PTRTYPE(void *) volatile pv;
1519#else
1520 R3R0PTRTYPE(void *) volatile pv;
1521#endif
1522#if HC_ARCH_BITS == 32
1523 uint32_t u32Padding; /**< alignment padding. */
1524#endif
1525} PGMPAGER3MAPTLBE;
1526/** Pointer to an entry in the HC physical TLB. */
1527typedef PGMPAGER3MAPTLBE *PPGMPAGER3MAPTLBE;
1528
1529
1530/** The number of entries in the ring-3 guest page mapping TLB.
1531 * @remarks The value must be a power of two. */
1532#define PGM_PAGER3MAPTLB_ENTRIES 256
1533
1534/**
1535 * Ring-3 guest page mapping TLB.
1536 * @remarks used in ring-0 as well at the moment.
1537 */
1538typedef struct PGMPAGER3MAPTLB
1539{
1540 /** The TLB entries. */
1541 PGMPAGER3MAPTLBE aEntries[PGM_PAGER3MAPTLB_ENTRIES];
1542} PGMPAGER3MAPTLB;
1543/** Pointer to the ring-3 guest page mapping TLB. */
1544typedef PGMPAGER3MAPTLB *PPGMPAGER3MAPTLB;
1545
1546/**
1547 * Calculates the index of the TLB entry for the specified guest page.
1548 * @returns Physical TLB index.
1549 * @param GCPhys The guest physical address.
1550 */
1551#define PGM_PAGER3MAPTLB_IDX(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER3MAPTLB_ENTRIES - 1) )
1552
1553
1554/**
1555 * Mapping cache usage set entry.
1556 *
1557 * @remarks 16-bit ints was choosen as the set is not expected to be used beyond
1558 * the dynamic ring-0 and (to some extent) raw-mode context mapping
1559 * cache. If it's extended to include ring-3, well, then something will
1560 * have be changed here...
1561 */
1562typedef struct PGMMAPSETENTRY
1563{
1564 /** The mapping cache index. */
1565 uint16_t iPage;
1566 /** The number of references.
1567 * The max is UINT16_MAX - 1. */
1568 uint16_t cRefs;
1569#if HC_ARCH_BITS == 64
1570 uint32_t alignment;
1571#endif
1572 /** Pointer to the page. */
1573 RTR0PTR pvPage;
1574 /** The physical address for this entry. */
1575 RTHCPHYS HCPhys;
1576} PGMMAPSETENTRY;
1577/** Pointer to a mapping cache usage set entry. */
1578typedef PGMMAPSETENTRY *PPGMMAPSETENTRY;
1579
1580/**
1581 * Mapping cache usage set.
1582 *
1583 * This is used in ring-0 and the raw-mode context to track dynamic mappings
1584 * done during exits / traps. The set is
1585 */
1586typedef struct PGMMAPSET
1587{
1588 /** The number of occupied entries.
1589 * This is PGMMAPSET_CLOSED if the set is closed and we're not supposed to do
1590 * dynamic mappings. */
1591 uint32_t cEntries;
1592 /** The start of the current subset.
1593 * This is UINT32_MAX if no subset is currently open. */
1594 uint32_t iSubset;
1595 /** The index of the current CPU, only valid if the set is open. */
1596 int32_t iCpu;
1597 uint32_t alignment;
1598 /** The entries. */
1599 PGMMAPSETENTRY aEntries[64];
1600 /** HCPhys -> iEntry fast lookup table.
1601 * Use PGMMAPSET_HASH for hashing.
1602 * The entries may or may not be valid, check against cEntries. */
1603 uint8_t aiHashTable[128];
1604} PGMMAPSET;
1605AssertCompileSizeAlignment(PGMMAPSET, 8);
1606/** Pointer to the mapping cache set. */
1607typedef PGMMAPSET *PPGMMAPSET;
1608
1609/** PGMMAPSET::cEntries value for a closed set. */
1610#define PGMMAPSET_CLOSED UINT32_C(0xdeadc0fe)
1611
1612/** Hash function for aiHashTable. */
1613#define PGMMAPSET_HASH(HCPhys) (((HCPhys) >> PAGE_SHIFT) & 127)
1614
1615/** The max fill size (strict builds). */
1616#define PGMMAPSET_MAX_FILL (64U * 80U / 100U)
1617
1618
1619/** @name Context neutrual page mapper TLB.
1620 *
1621 * Hoping to avoid some code and bug duplication parts of the GCxxx->CCPtr
1622 * code is writting in a kind of context neutrual way. Time will show whether
1623 * this actually makes sense or not...
1624 *
1625 * @todo this needs to be reconsidered and dropped/redone since the ring-0
1626 * context ends up using a global mapping cache on some platforms
1627 * (darwin).
1628 *
1629 * @{ */
1630/** @typedef PPGMPAGEMAPTLB
1631 * The page mapper TLB pointer type for the current context. */
1632/** @typedef PPGMPAGEMAPTLB
1633 * The page mapper TLB entry pointer type for the current context. */
1634/** @typedef PPGMPAGEMAPTLB
1635 * The page mapper TLB entry pointer pointer type for the current context. */
1636/** @def PGM_PAGEMAPTLB_ENTRIES
1637 * The number of TLB entries in the page mapper TLB for the current context. */
1638/** @def PGM_PAGEMAPTLB_IDX
1639 * Calculate the TLB index for a guest physical address.
1640 * @returns The TLB index.
1641 * @param GCPhys The guest physical address. */
1642/** @typedef PPGMPAGEMAP
1643 * Pointer to a page mapper unit for current context. */
1644/** @typedef PPPGMPAGEMAP
1645 * Pointer to a page mapper unit pointer for current context. */
1646#ifdef IN_RC
1647// typedef PPGMPAGEGCMAPTLB PPGMPAGEMAPTLB;
1648// typedef PPGMPAGEGCMAPTLBE PPGMPAGEMAPTLBE;
1649// typedef PPGMPAGEGCMAPTLBE *PPPGMPAGEMAPTLBE;
1650# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGEGCMAPTLB_ENTRIES
1651# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGEGCMAPTLB_IDX(GCPhys)
1652 typedef void * PPGMPAGEMAP;
1653 typedef void ** PPPGMPAGEMAP;
1654//#elif IN_RING0
1655// typedef PPGMPAGER0MAPTLB PPGMPAGEMAPTLB;
1656// typedef PPGMPAGER0MAPTLBE PPGMPAGEMAPTLBE;
1657// typedef PPGMPAGER0MAPTLBE *PPPGMPAGEMAPTLBE;
1658//# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER0MAPTLB_ENTRIES
1659//# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER0MAPTLB_IDX(GCPhys)
1660// typedef PPGMCHUNKR0MAP PPGMPAGEMAP;
1661// typedef PPPGMCHUNKR0MAP PPPGMPAGEMAP;
1662#else
1663 typedef PPGMPAGER3MAPTLB PPGMPAGEMAPTLB;
1664 typedef PPGMPAGER3MAPTLBE PPGMPAGEMAPTLBE;
1665 typedef PPGMPAGER3MAPTLBE *PPPGMPAGEMAPTLBE;
1666# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER3MAPTLB_ENTRIES
1667# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER3MAPTLB_IDX(GCPhys)
1668 typedef PPGMCHUNKR3MAP PPGMPAGEMAP;
1669 typedef PPPGMCHUNKR3MAP PPPGMPAGEMAP;
1670#endif
1671/** @} */
1672
1673
1674/** @name PGM Pool Indexes.
1675 * Aka. the unique shadow page identifier.
1676 * @{ */
1677/** NIL page pool IDX. */
1678#define NIL_PGMPOOL_IDX 0
1679/** The first normal index. */
1680#define PGMPOOL_IDX_FIRST_SPECIAL 1
1681/** Page directory (32-bit root). */
1682#define PGMPOOL_IDX_PD 1
1683/** Page Directory Pointer Table (PAE root). */
1684#define PGMPOOL_IDX_PDPT 2
1685/** AMD64 CR3 level index.*/
1686#define PGMPOOL_IDX_AMD64_CR3 3
1687/** Nested paging root.*/
1688#define PGMPOOL_IDX_NESTED_ROOT 4
1689/** The first normal index. */
1690#define PGMPOOL_IDX_FIRST 5
1691/** The last valid index. (inclusive, 14 bits) */
1692#define PGMPOOL_IDX_LAST 0x3fff
1693/** @} */
1694
1695/** The NIL index for the parent chain. */
1696#define NIL_PGMPOOL_USER_INDEX ((uint16_t)0xffff)
1697#define NIL_PGMPOOL_PRESENT_INDEX ((uint16_t)0xffff)
1698
1699/**
1700 * Node in the chain linking a shadowed page to it's parent (user).
1701 */
1702#pragma pack(1)
1703typedef struct PGMPOOLUSER
1704{
1705 /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
1706 uint16_t iNext;
1707 /** The user page index. */
1708 uint16_t iUser;
1709 /** Index into the user table. */
1710 uint32_t iUserTable;
1711} PGMPOOLUSER, *PPGMPOOLUSER;
1712typedef const PGMPOOLUSER *PCPGMPOOLUSER;
1713#pragma pack()
1714
1715
1716/** The NIL index for the phys ext chain. */
1717#define NIL_PGMPOOL_PHYSEXT_INDEX ((uint16_t)0xffff)
1718/** The NIL pte index for a phys ext chain slot. */
1719#define NIL_PGMPOOL_PHYSEXT_IDX_PTE ((uint16_t)0xffff)
1720
1721/**
1722 * Node in the chain of physical cross reference extents.
1723 * @todo Calling this an 'extent' is not quite right, find a better name.
1724 * @todo find out the optimal size of the aidx array
1725 */
1726#pragma pack(1)
1727typedef struct PGMPOOLPHYSEXT
1728{
1729 /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
1730 uint16_t iNext;
1731 /** Alignment. */
1732 uint16_t u16Align;
1733 /** The user page index. */
1734 uint16_t aidx[3];
1735 /** The page table index or NIL_PGMPOOL_PHYSEXT_IDX_PTE if unknown. */
1736 uint16_t apte[3];
1737} PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
1738typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
1739#pragma pack()
1740
1741
1742/**
1743 * The kind of page that's being shadowed.
1744 */
1745typedef enum PGMPOOLKIND
1746{
1747 /** The virtual invalid 0 entry. */
1748 PGMPOOLKIND_INVALID = 0,
1749 /** The entry is free (=unused). */
1750 PGMPOOLKIND_FREE,
1751
1752 /** Shw: 32-bit page table; Gst: no paging */
1753 PGMPOOLKIND_32BIT_PT_FOR_PHYS,
1754 /** Shw: 32-bit page table; Gst: 32-bit page table. */
1755 PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
1756 /** Shw: 32-bit page table; Gst: 4MB page. */
1757 PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
1758 /** Shw: PAE page table; Gst: no paging */
1759 PGMPOOLKIND_PAE_PT_FOR_PHYS,
1760 /** Shw: PAE page table; Gst: 32-bit page table. */
1761 PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
1762 /** Shw: PAE page table; Gst: Half of a 4MB page. */
1763 PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
1764 /** Shw: PAE page table; Gst: PAE page table. */
1765 PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
1766 /** Shw: PAE page table; Gst: 2MB page. */
1767 PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
1768
1769 /** Shw: 32-bit page directory. Gst: 32-bit page directory. */
1770 PGMPOOLKIND_32BIT_PD,
1771 /** Shw: 32-bit page directory. Gst: no paging. */
1772 PGMPOOLKIND_32BIT_PD_PHYS,
1773 /** Shw: PAE page directory 0; Gst: 32-bit page directory. */
1774 PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD,
1775 /** Shw: PAE page directory 1; Gst: 32-bit page directory. */
1776 PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD,
1777 /** Shw: PAE page directory 2; Gst: 32-bit page directory. */
1778 PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD,
1779 /** Shw: PAE page directory 3; Gst: 32-bit page directory. */
1780 PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
1781 /** Shw: PAE page directory; Gst: PAE page directory. */
1782 PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
1783 /** Shw: PAE page directory; Gst: no paging. */
1784 PGMPOOLKIND_PAE_PD_PHYS,
1785
1786 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst 32 bits paging. */
1787 PGMPOOLKIND_PAE_PDPT_FOR_32BIT,
1788 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst PAE PDPT. */
1789 PGMPOOLKIND_PAE_PDPT,
1790 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst: no paging. */
1791 PGMPOOLKIND_PAE_PDPT_PHYS,
1792
1793 /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */
1794 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT,
1795 /** Shw: 64-bit page directory pointer table; Gst: no paging */
1796 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS,
1797 /** Shw: 64-bit page directory table; Gst: 64-bit page directory table. */
1798 PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD,
1799 /** Shw: 64-bit page directory table; Gst: no paging */
1800 PGMPOOLKIND_64BIT_PD_FOR_PHYS, /* 22 */
1801
1802 /** Shw: 64-bit PML4; Gst: 64-bit PML4. */
1803 PGMPOOLKIND_64BIT_PML4,
1804
1805 /** Shw: EPT page directory pointer table; Gst: no paging */
1806 PGMPOOLKIND_EPT_PDPT_FOR_PHYS,
1807 /** Shw: EPT page directory table; Gst: no paging */
1808 PGMPOOLKIND_EPT_PD_FOR_PHYS,
1809 /** Shw: EPT page table; Gst: no paging */
1810 PGMPOOLKIND_EPT_PT_FOR_PHYS,
1811
1812 /** Shw: Root Nested paging table. */
1813 PGMPOOLKIND_ROOT_NESTED,
1814
1815 /** The last valid entry. */
1816 PGMPOOLKIND_LAST = PGMPOOLKIND_ROOT_NESTED
1817} PGMPOOLKIND;
1818
1819/**
1820 * The access attributes of the page; only applies to big pages.
1821 */
1822typedef enum
1823{
1824 PGMPOOLACCESS_DONTCARE = 0,
1825 PGMPOOLACCESS_USER_RW,
1826 PGMPOOLACCESS_USER_R,
1827 PGMPOOLACCESS_USER_RW_NX,
1828 PGMPOOLACCESS_USER_R_NX,
1829 PGMPOOLACCESS_SUPERVISOR_RW,
1830 PGMPOOLACCESS_SUPERVISOR_R,
1831 PGMPOOLACCESS_SUPERVISOR_RW_NX,
1832 PGMPOOLACCESS_SUPERVISOR_R_NX
1833} PGMPOOLACCESS;
1834
1835/**
1836 * The tracking data for a page in the pool.
1837 */
1838typedef struct PGMPOOLPAGE
1839{
1840 /** AVL node code with the (R3) physical address of this page. */
1841 AVLOHCPHYSNODECORE Core;
1842 /** Pointer to the R3 mapping of the page. */
1843#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1844 R3PTRTYPE(void *) pvPageR3;
1845#else
1846 R3R0PTRTYPE(void *) pvPageR3;
1847#endif
1848 /** The guest physical address. */
1849#if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64
1850 uint32_t Alignment0;
1851#endif
1852 RTGCPHYS GCPhys;
1853
1854 /** Access handler statistics to determine whether the guest is (re)initializing a page table. */
1855 RTGCPTR pvLastAccessHandlerRip;
1856 RTGCPTR pvLastAccessHandlerFault;
1857 uint64_t cLastAccessHandlerCount;
1858
1859 /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
1860 uint8_t enmKind;
1861 /** The subkind of page we're shadowing. (This is really a PGMPOOLACCESS enum.) */
1862 uint8_t enmAccess;
1863 /** The index of this page. */
1864 uint16_t idx;
1865 /** The next entry in the list this page currently resides in.
1866 * It's either in the free list or in the GCPhys hash. */
1867 uint16_t iNext;
1868 /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
1869 uint16_t iUserHead;
1870 /** The number of present entries. */
1871 uint16_t cPresent;
1872 /** The first entry in the table which is present. */
1873 uint16_t iFirstPresent;
1874 /** The number of modifications to the monitored page. */
1875 uint16_t cModifications;
1876 /** The next modified page. NIL_PGMPOOL_IDX if tail. */
1877 uint16_t iModifiedNext;
1878 /** The previous modified page. NIL_PGMPOOL_IDX if head. */
1879 uint16_t iModifiedPrev;
1880 /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
1881 uint16_t iMonitoredNext;
1882 /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
1883 uint16_t iMonitoredPrev;
1884 /** The next page in the age list. */
1885 uint16_t iAgeNext;
1886 /** The previous page in the age list. */
1887 uint16_t iAgePrev;
1888 /** Used to indicate that the page is zeroed. */
1889 bool fZeroed;
1890 /** Used to indicate that a PT has non-global entries. */
1891 bool fSeenNonGlobal;
1892 /** Used to indicate that we're monitoring writes to the guest page. */
1893 bool fMonitored;
1894 /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
1895 * (All pages are in the age list.) */
1896 bool fCached;
1897 /** This is used by the R3 access handlers when invoked by an async thread.
1898 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
1899 bool volatile fReusedFlushPending;
1900 /** Used to mark the page as dirty (write monitoring if temporarily off. */
1901 bool fDirty;
1902
1903 /** Used to indicate that this page can't be flushed. Important for cr3 root pages or shadow pae pd pages). */
1904 uint32_t cLocked;
1905 uint32_t idxDirty;
1906 RTGCPTR pvDirtyFault;
1907} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
1908/** Pointer to a const pool page. */
1909typedef PGMPOOLPAGE const *PCPGMPOOLPAGE;
1910
1911
1912/** The hash table size. */
1913# define PGMPOOL_HASH_SIZE 0x40
1914/** The hash function. */
1915# define PGMPOOL_HASH(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
1916
1917
1918/**
1919 * The shadow page pool instance data.
1920 *
1921 * It's all one big allocation made at init time, except for the
1922 * pages that is. The user nodes follows immediatly after the
1923 * page structures.
1924 */
1925typedef struct PGMPOOL
1926{
1927 /** The VM handle - R3 Ptr. */
1928 PVMR3 pVMR3;
1929 /** The VM handle - R0 Ptr. */
1930 PVMR0 pVMR0;
1931 /** The VM handle - RC Ptr. */
1932 PVMRC pVMRC;
1933 /** The max pool size. This includes the special IDs. */
1934 uint16_t cMaxPages;
1935 /** The current pool size. */
1936 uint16_t cCurPages;
1937 /** The head of the free page list. */
1938 uint16_t iFreeHead;
1939 /* Padding. */
1940 uint16_t u16Padding;
1941 /** Head of the chain of free user nodes. */
1942 uint16_t iUserFreeHead;
1943 /** The number of user nodes we've allocated. */
1944 uint16_t cMaxUsers;
1945 /** The number of present page table entries in the entire pool. */
1946 uint32_t cPresent;
1947 /** Pointer to the array of user nodes - RC pointer. */
1948 RCPTRTYPE(PPGMPOOLUSER) paUsersRC;
1949 /** Pointer to the array of user nodes - R3 pointer. */
1950 R3PTRTYPE(PPGMPOOLUSER) paUsersR3;
1951 /** Pointer to the array of user nodes - R0 pointer. */
1952 R0PTRTYPE(PPGMPOOLUSER) paUsersR0;
1953 /** Head of the chain of free phys ext nodes. */
1954 uint16_t iPhysExtFreeHead;
1955 /** The number of user nodes we've allocated. */
1956 uint16_t cMaxPhysExts;
1957 /** Pointer to the array of physical xref extent - RC pointer. */
1958 RCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsRC;
1959 /** Pointer to the array of physical xref extent nodes - R3 pointer. */
1960 R3PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR3;
1961 /** Pointer to the array of physical xref extent nodes - R0 pointer. */
1962 R0PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR0;
1963 /** Hash table for GCPhys addresses. */
1964 uint16_t aiHash[PGMPOOL_HASH_SIZE];
1965 /** The head of the age list. */
1966 uint16_t iAgeHead;
1967 /** The tail of the age list. */
1968 uint16_t iAgeTail;
1969 /** Set if the cache is enabled. */
1970 bool fCacheEnabled;
1971 /** Alignment padding. */
1972 bool afPadding1[3];
1973 /** Head of the list of modified pages. */
1974 uint16_t iModifiedHead;
1975 /** The current number of modified pages. */
1976 uint16_t cModifiedPages;
1977 /** Access handler, RC. */
1978 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnAccessHandlerRC;
1979 /** Access handler, R0. */
1980 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnAccessHandlerR0;
1981 /** Access handler, R3. */
1982 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnAccessHandlerR3;
1983 /** The access handler description (R3 ptr). */
1984 R3PTRTYPE(const char *) pszAccessHandler;
1985# if HC_ARCH_BITS == 32
1986 /** Alignment padding. */
1987 uint32_t u32Padding2;
1988# endif
1989 /* Next available slot. */
1990 uint32_t idxFreeDirtyPage;
1991 /* Number of active dirty pages. */
1992 uint32_t cDirtyPages;
1993 /* Array of current dirty pgm pool page indices. */
1994 uint16_t aIdxDirtyPages[16];
1995 uint64_t aDirtyPages[16][512];
1996 /** The number of pages currently in use. */
1997 uint16_t cUsedPages;
1998#ifdef VBOX_WITH_STATISTICS
1999 /** The high water mark for cUsedPages. */
2000 uint16_t cUsedPagesHigh;
2001 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
2002 /** Profiling pgmPoolAlloc(). */
2003 STAMPROFILEADV StatAlloc;
2004 /** Profiling pgmR3PoolClearDoIt(). */
2005 STAMPROFILE StatClearAll;
2006 /** Profiling pgmR3PoolReset(). */
2007 STAMPROFILE StatR3Reset;
2008 /** Profiling pgmPoolFlushPage(). */
2009 STAMPROFILE StatFlushPage;
2010 /** Profiling pgmPoolFree(). */
2011 STAMPROFILE StatFree;
2012 /** Counting explicit flushes by PGMPoolFlushPage(). */
2013 STAMCOUNTER StatForceFlushPage;
2014 /** Counting explicit flushes of dirty pages by PGMPoolFlushPage(). */
2015 STAMCOUNTER StatForceFlushDirtyPage;
2016 /** Counting flushes for reused pages. */
2017 STAMCOUNTER StatForceFlushReused;
2018 /** Profiling time spent zeroing pages. */
2019 STAMPROFILE StatZeroPage;
2020 /** Profiling of pgmPoolTrackDeref. */
2021 STAMPROFILE StatTrackDeref;
2022 /** Profiling pgmTrackFlushGCPhysPT. */
2023 STAMPROFILE StatTrackFlushGCPhysPT;
2024 /** Profiling pgmTrackFlushGCPhysPTs. */
2025 STAMPROFILE StatTrackFlushGCPhysPTs;
2026 /** Profiling pgmTrackFlushGCPhysPTsSlow. */
2027 STAMPROFILE StatTrackFlushGCPhysPTsSlow;
2028 /** Number of times we've been out of user records. */
2029 STAMCOUNTER StatTrackFreeUpOneUser;
2030 /** Nr of flushed entries. */
2031 STAMCOUNTER StatTrackFlushEntry;
2032 /** Nr of updated entries. */
2033 STAMCOUNTER StatTrackFlushEntryKeep;
2034 /** Profiling deref activity related tracking GC physical pages. */
2035 STAMPROFILE StatTrackDerefGCPhys;
2036 /** Number of linear searches for a HCPhys in the ram ranges. */
2037 STAMCOUNTER StatTrackLinearRamSearches;
2038 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
2039 STAMCOUNTER StamTrackPhysExtAllocFailures;
2040 /** Profiling the RC/R0 access handler. */
2041 STAMPROFILE StatMonitorRZ;
2042 /** Times we've failed interpreting the instruction. */
2043 STAMCOUNTER StatMonitorRZEmulateInstr;
2044 /** Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler. */
2045 STAMPROFILE StatMonitorRZFlushPage;
2046 /* Times we've detected a page table reinit. */
2047 STAMCOUNTER StatMonitorRZFlushReinit;
2048 /** Counting flushes for pages that are modified too often. */
2049 STAMCOUNTER StatMonitorRZFlushModOverflow;
2050 /** Times we've detected fork(). */
2051 STAMCOUNTER StatMonitorRZFork;
2052 /** Profiling the RC/R0 access we've handled (except REP STOSD). */
2053 STAMPROFILE StatMonitorRZHandled;
2054 /** Times we've failed interpreting a patch code instruction. */
2055 STAMCOUNTER StatMonitorRZIntrFailPatch1;
2056 /** Times we've failed interpreting a patch code instruction during flushing. */
2057 STAMCOUNTER StatMonitorRZIntrFailPatch2;
2058 /** The number of times we've seen rep prefixes we can't handle. */
2059 STAMCOUNTER StatMonitorRZRepPrefix;
2060 /** Profiling the REP STOSD cases we've handled. */
2061 STAMPROFILE StatMonitorRZRepStosd;
2062 /** Nr of handled PT faults. */
2063 STAMCOUNTER StatMonitorRZFaultPT;
2064 /** Nr of handled PD faults. */
2065 STAMCOUNTER StatMonitorRZFaultPD;
2066 /** Nr of handled PDPT faults. */
2067 STAMCOUNTER StatMonitorRZFaultPDPT;
2068 /** Nr of handled PML4 faults. */
2069 STAMCOUNTER StatMonitorRZFaultPML4;
2070
2071 /** Profiling the R3 access handler. */
2072 STAMPROFILE StatMonitorR3;
2073 /** Times we've failed interpreting the instruction. */
2074 STAMCOUNTER StatMonitorR3EmulateInstr;
2075 /** Profiling the pgmPoolFlushPage calls made from the R3 access handler. */
2076 STAMPROFILE StatMonitorR3FlushPage;
2077 /* Times we've detected a page table reinit. */
2078 STAMCOUNTER StatMonitorR3FlushReinit;
2079 /** Counting flushes for pages that are modified too often. */
2080 STAMCOUNTER StatMonitorR3FlushModOverflow;
2081 /** Times we've detected fork(). */
2082 STAMCOUNTER StatMonitorR3Fork;
2083 /** Profiling the R3 access we've handled (except REP STOSD). */
2084 STAMPROFILE StatMonitorR3Handled;
2085 /** The number of times we've seen rep prefixes we can't handle. */
2086 STAMCOUNTER StatMonitorR3RepPrefix;
2087 /** Profiling the REP STOSD cases we've handled. */
2088 STAMPROFILE StatMonitorR3RepStosd;
2089 /** Nr of handled PT faults. */
2090 STAMCOUNTER StatMonitorR3FaultPT;
2091 /** Nr of handled PD faults. */
2092 STAMCOUNTER StatMonitorR3FaultPD;
2093 /** Nr of handled PDPT faults. */
2094 STAMCOUNTER StatMonitorR3FaultPDPT;
2095 /** Nr of handled PML4 faults. */
2096 STAMCOUNTER StatMonitorR3FaultPML4;
2097 /** The number of times we're called in an async thread an need to flush. */
2098 STAMCOUNTER StatMonitorR3Async;
2099 /** Times we've called pgmPoolResetDirtyPages (and there were dirty page). */
2100 STAMCOUNTER StatResetDirtyPages;
2101 /** Times we've called pgmPoolAddDirtyPage. */
2102 STAMCOUNTER StatDirtyPage;
2103 /** Times we've had to flush duplicates for dirty page management. */
2104 STAMCOUNTER StatDirtyPageDupFlush;
2105 /** Times we've had to flush because of overflow. */
2106 STAMCOUNTER StatDirtyPageOverFlowFlush;
2107
2108 /** The high wather mark for cModifiedPages. */
2109 uint16_t cModifiedPagesHigh;
2110 uint16_t Alignment2[3]; /**< Align the next member on a 64-bit boundrary. */
2111
2112 /** The number of cache hits. */
2113 STAMCOUNTER StatCacheHits;
2114 /** The number of cache misses. */
2115 STAMCOUNTER StatCacheMisses;
2116 /** The number of times we've got a conflict of 'kind' in the cache. */
2117 STAMCOUNTER StatCacheKindMismatches;
2118 /** Number of times we've been out of pages. */
2119 STAMCOUNTER StatCacheFreeUpOne;
2120 /** The number of cacheable allocations. */
2121 STAMCOUNTER StatCacheCacheable;
2122 /** The number of uncacheable allocations. */
2123 STAMCOUNTER StatCacheUncacheable;
2124#else
2125 uint32_t Alignment3; /**< Align the next member on a 64-bit boundrary. */
2126#endif
2127 /** The AVL tree for looking up a page by its HC physical address. */
2128 AVLOHCPHYSTREE HCPhysTree;
2129 uint32_t Alignment4; /**< Align the next member on a 64-bit boundrary. */
2130 /** Array of pages. (cMaxPages in length)
2131 * The Id is the index into thist array.
2132 */
2133 PGMPOOLPAGE aPages[PGMPOOL_IDX_FIRST];
2134} PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
2135AssertCompileMemberAlignment(PGMPOOL, iModifiedHead, 8);
2136AssertCompileMemberAlignment(PGMPOOL, aDirtyPages, 8);
2137AssertCompileMemberAlignment(PGMPOOL, cUsedPages, 8);
2138#ifdef VBOX_WITH_STATISTICS
2139AssertCompileMemberAlignment(PGMPOOL, StatAlloc, 8);
2140#endif
2141AssertCompileMemberAlignment(PGMPOOL, aPages, 8);
2142
2143
2144/** @def PGMPOOL_PAGE_2_PTR
2145 * Maps a pool page pool into the current context.
2146 *
2147 * @returns VBox status code.
2148 * @param pVM The VM handle.
2149 * @param pPage The pool page.
2150 *
2151 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
2152 * small page window employeed by that function. Be careful.
2153 * @remark There is no need to assert on the result.
2154 */
2155#if defined(IN_RC)
2156# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
2157#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2158# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
2159#elif defined(VBOX_STRICT)
2160# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageStrict(pPage)
2161DECLINLINE(void *) pgmPoolMapPageStrict(PPGMPOOLPAGE pPage)
2162{
2163 Assert(pPage && pPage->pvPageR3);
2164 return pPage->pvPageR3;
2165}
2166#else
2167# define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageR3)
2168#endif
2169
2170/** @def PGMPOOL_PAGE_2_PTR_BY_PGM
2171 * Maps a pool page pool into the current context.
2172 *
2173 * @returns VBox status code.
2174 * @param pPGM Pointer to the PGM instance data.
2175 * @param pPage The pool page.
2176 *
2177 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
2178 * small page window employeed by that function. Be careful.
2179 * @remark There is no need to assert on the result.
2180 */
2181#if defined(IN_RC)
2182# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined(pPGM, (pPage))
2183#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2184# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined(pPGM, (pPage))
2185#else
2186# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) PGMPOOL_PAGE_2_PTR(PGM2VM(pPGM), pPage)
2187#endif
2188
2189/** @def PGMPOOL_PAGE_2_PTR_BY_PGMCPU
2190 * Maps a pool page pool into the current context.
2191 *
2192 * @returns VBox status code.
2193 * @param pPGM Pointer to the PGMCPU instance data.
2194 * @param pPage The pool page.
2195 *
2196 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
2197 * small page window employeed by that function. Be careful.
2198 * @remark There is no need to assert on the result.
2199 */
2200#if defined(IN_RC)
2201# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) pgmPoolMapPageInlined(PGMCPU2PGM(pPGM), (pPage))
2202#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2203# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) pgmPoolMapPageInlined(PGMCPU2PGM(pPGM), (pPage))
2204#else
2205# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) PGMPOOL_PAGE_2_PTR(PGMCPU2VM(pPGM), pPage)
2206#endif
2207
2208
2209/** @name Per guest page tracking data.
2210 * This is currently as a 16-bit word in the PGMPAGE structure, the idea though
2211 * is to use more bits for it and split it up later on. But for now we'll play
2212 * safe and change as little as possible.
2213 *
2214 * The 16-bit word has two parts:
2215 *
2216 * The first 14-bit forms the @a idx field. It is either the index of a page in
2217 * the shadow page pool, or and index into the extent list.
2218 *
2219 * The 2 topmost bits makes up the @a cRefs field, which counts the number of
2220 * shadow page pool references to the page. If cRefs equals
2221 * PGMPOOL_CREFS_PHYSEXT, then the @a idx field is an indext into the extent
2222 * (misnomer) table and not the shadow page pool.
2223 *
2224 * See PGM_PAGE_GET_TRACKING and PGM_PAGE_SET_TRACKING for how to get and set
2225 * the 16-bit word.
2226 *
2227 * @{ */
2228/** The shift count for getting to the cRefs part. */
2229#define PGMPOOL_TD_CREFS_SHIFT 14
2230/** The mask applied after shifting the tracking data down by
2231 * PGMPOOL_TD_CREFS_SHIFT. */
2232#define PGMPOOL_TD_CREFS_MASK 0x3
2233/** The cRef value used to indiciate that the idx is the head of a
2234 * physical cross reference list. */
2235#define PGMPOOL_TD_CREFS_PHYSEXT PGMPOOL_TD_CREFS_MASK
2236/** The shift used to get idx. */
2237#define PGMPOOL_TD_IDX_SHIFT 0
2238/** The mask applied to the idx after shifting down by PGMPOOL_TD_IDX_SHIFT. */
2239#define PGMPOOL_TD_IDX_MASK 0x3fff
2240/** The idx value when we're out of of PGMPOOLPHYSEXT entries or/and there are
2241 * simply too many mappings of this page. */
2242#define PGMPOOL_TD_IDX_OVERFLOWED PGMPOOL_TD_IDX_MASK
2243
2244/** @def PGMPOOL_TD_MAKE
2245 * Makes a 16-bit tracking data word.
2246 *
2247 * @returns tracking data.
2248 * @param cRefs The @a cRefs field. Must be within bounds!
2249 * @param idx The @a idx field. Must also be within bounds! */
2250#define PGMPOOL_TD_MAKE(cRefs, idx) ( ((cRefs) << PGMPOOL_TD_CREFS_SHIFT) | (idx) )
2251
2252/** @def PGMPOOL_TD_GET_CREFS
2253 * Get the @a cRefs field from a tracking data word.
2254 *
2255 * @returns The @a cRefs field
2256 * @param u16 The tracking data word. */
2257#define PGMPOOL_TD_GET_CREFS(u16) ( ((u16) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK )
2258
2259/** @def PGMPOOL_TD_GET_IDX
2260 * Get the @a idx field from a tracking data word.
2261 *
2262 * @returns The @a idx field
2263 * @param u16 The tracking data word. */
2264#define PGMPOOL_TD_GET_IDX(u16) ( ((u16) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK )
2265/** @} */
2266
2267
2268/**
2269 * Trees are using self relative offsets as pointers.
2270 * So, all its data, including the root pointer, must be in the heap for HC and GC
2271 * to have the same layout.
2272 */
2273typedef struct PGMTREES
2274{
2275 /** Physical access handlers (AVL range+offsetptr tree). */
2276 AVLROGCPHYSTREE PhysHandlers;
2277 /** Virtual access handlers (AVL range + GC ptr tree). */
2278 AVLROGCPTRTREE VirtHandlers;
2279 /** Virtual access handlers (Phys range AVL range + offsetptr tree). */
2280 AVLROGCPHYSTREE PhysToVirtHandlers;
2281 /** Virtual access handlers for the hypervisor (AVL range + GC ptr tree). */
2282 AVLROGCPTRTREE HyperVirtHandlers;
2283} PGMTREES;
2284/** Pointer to PGM trees. */
2285typedef PGMTREES *PPGMTREES;
2286
2287
2288/** @name Paging mode macros
2289 * @{ */
2290#ifdef IN_RC
2291# define PGM_CTX(a,b) a##RC##b
2292# define PGM_CTX_STR(a,b) a "GC" b
2293# define PGM_CTX_DECL(type) VMMRCDECL(type)
2294#else
2295# ifdef IN_RING3
2296# define PGM_CTX(a,b) a##R3##b
2297# define PGM_CTX_STR(a,b) a "R3" b
2298# define PGM_CTX_DECL(type) DECLCALLBACK(type)
2299# else
2300# define PGM_CTX(a,b) a##R0##b
2301# define PGM_CTX_STR(a,b) a "R0" b
2302# define PGM_CTX_DECL(type) VMMDECL(type)
2303# endif
2304#endif
2305
2306#define PGM_GST_NAME_REAL(name) PGM_CTX(pgm,GstReal##name)
2307#define PGM_GST_NAME_RC_REAL_STR(name) "pgmRCGstReal" #name
2308#define PGM_GST_NAME_R0_REAL_STR(name) "pgmR0GstReal" #name
2309#define PGM_GST_NAME_PROT(name) PGM_CTX(pgm,GstProt##name)
2310#define PGM_GST_NAME_RC_PROT_STR(name) "pgmRCGstProt" #name
2311#define PGM_GST_NAME_R0_PROT_STR(name) "pgmR0GstProt" #name
2312#define PGM_GST_NAME_32BIT(name) PGM_CTX(pgm,Gst32Bit##name)
2313#define PGM_GST_NAME_RC_32BIT_STR(name) "pgmRCGst32Bit" #name
2314#define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
2315#define PGM_GST_NAME_PAE(name) PGM_CTX(pgm,GstPAE##name)
2316#define PGM_GST_NAME_RC_PAE_STR(name) "pgmRCGstPAE" #name
2317#define PGM_GST_NAME_R0_PAE_STR(name) "pgmR0GstPAE" #name
2318#define PGM_GST_NAME_AMD64(name) PGM_CTX(pgm,GstAMD64##name)
2319#define PGM_GST_NAME_RC_AMD64_STR(name) "pgmRCGstAMD64" #name
2320#define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
2321#define PGM_GST_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Gst##name))
2322#define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name)
2323
2324#define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name)
2325#define PGM_SHW_NAME_RC_32BIT_STR(name) "pgmRCShw32Bit" #name
2326#define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
2327#define PGM_SHW_NAME_PAE(name) PGM_CTX(pgm,ShwPAE##name)
2328#define PGM_SHW_NAME_RC_PAE_STR(name) "pgmRCShwPAE" #name
2329#define PGM_SHW_NAME_R0_PAE_STR(name) "pgmR0ShwPAE" #name
2330#define PGM_SHW_NAME_AMD64(name) PGM_CTX(pgm,ShwAMD64##name)
2331#define PGM_SHW_NAME_RC_AMD64_STR(name) "pgmRCShwAMD64" #name
2332#define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
2333#define PGM_SHW_NAME_NESTED(name) PGM_CTX(pgm,ShwNested##name)
2334#define PGM_SHW_NAME_RC_NESTED_STR(name) "pgmRCShwNested" #name
2335#define PGM_SHW_NAME_R0_NESTED_STR(name) "pgmR0ShwNested" #name
2336#define PGM_SHW_NAME_EPT(name) PGM_CTX(pgm,ShwEPT##name)
2337#define PGM_SHW_NAME_RC_EPT_STR(name) "pgmRCShwEPT" #name
2338#define PGM_SHW_NAME_R0_EPT_STR(name) "pgmR0ShwEPT" #name
2339#define PGM_SHW_DECL(type, name) PGM_CTX_DECL(type) PGM_SHW_NAME(name)
2340#define PGM_SHW_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Shw##name))
2341
2342/* Shw_Gst */
2343#define PGM_BTH_NAME_32BIT_REAL(name) PGM_CTX(pgm,Bth32BitReal##name)
2344#define PGM_BTH_NAME_32BIT_PROT(name) PGM_CTX(pgm,Bth32BitProt##name)
2345#define PGM_BTH_NAME_32BIT_32BIT(name) PGM_CTX(pgm,Bth32Bit32Bit##name)
2346#define PGM_BTH_NAME_PAE_REAL(name) PGM_CTX(pgm,BthPAEReal##name)
2347#define PGM_BTH_NAME_PAE_PROT(name) PGM_CTX(pgm,BthPAEProt##name)
2348#define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name)
2349#define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name)
2350#define PGM_BTH_NAME_AMD64_PROT(name) PGM_CTX(pgm,BthAMD64Prot##name)
2351#define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name)
2352#define PGM_BTH_NAME_NESTED_REAL(name) PGM_CTX(pgm,BthNestedReal##name)
2353#define PGM_BTH_NAME_NESTED_PROT(name) PGM_CTX(pgm,BthNestedProt##name)
2354#define PGM_BTH_NAME_NESTED_32BIT(name) PGM_CTX(pgm,BthNested32Bit##name)
2355#define PGM_BTH_NAME_NESTED_PAE(name) PGM_CTX(pgm,BthNestedPAE##name)
2356#define PGM_BTH_NAME_NESTED_AMD64(name) PGM_CTX(pgm,BthNestedAMD64##name)
2357#define PGM_BTH_NAME_EPT_REAL(name) PGM_CTX(pgm,BthEPTReal##name)
2358#define PGM_BTH_NAME_EPT_PROT(name) PGM_CTX(pgm,BthEPTProt##name)
2359#define PGM_BTH_NAME_EPT_32BIT(name) PGM_CTX(pgm,BthEPT32Bit##name)
2360#define PGM_BTH_NAME_EPT_PAE(name) PGM_CTX(pgm,BthEPTPAE##name)
2361#define PGM_BTH_NAME_EPT_AMD64(name) PGM_CTX(pgm,BthEPTAMD64##name)
2362
2363#define PGM_BTH_NAME_RC_32BIT_REAL_STR(name) "pgmRCBth32BitReal" #name
2364#define PGM_BTH_NAME_RC_32BIT_PROT_STR(name) "pgmRCBth32BitProt" #name
2365#define PGM_BTH_NAME_RC_32BIT_32BIT_STR(name) "pgmRCBth32Bit32Bit" #name
2366#define PGM_BTH_NAME_RC_PAE_REAL_STR(name) "pgmRCBthPAEReal" #name
2367#define PGM_BTH_NAME_RC_PAE_PROT_STR(name) "pgmRCBthPAEProt" #name
2368#define PGM_BTH_NAME_RC_PAE_32BIT_STR(name) "pgmRCBthPAE32Bit" #name
2369#define PGM_BTH_NAME_RC_PAE_PAE_STR(name) "pgmRCBthPAEPAE" #name
2370#define PGM_BTH_NAME_RC_AMD64_AMD64_STR(name) "pgmRCBthAMD64AMD64" #name
2371#define PGM_BTH_NAME_RC_NESTED_REAL_STR(name) "pgmRCBthNestedReal" #name
2372#define PGM_BTH_NAME_RC_NESTED_PROT_STR(name) "pgmRCBthNestedProt" #name
2373#define PGM_BTH_NAME_RC_NESTED_32BIT_STR(name) "pgmRCBthNested32Bit" #name
2374#define PGM_BTH_NAME_RC_NESTED_PAE_STR(name) "pgmRCBthNestedPAE" #name
2375#define PGM_BTH_NAME_RC_NESTED_AMD64_STR(name) "pgmRCBthNestedAMD64" #name
2376#define PGM_BTH_NAME_RC_EPT_REAL_STR(name) "pgmRCBthEPTReal" #name
2377#define PGM_BTH_NAME_RC_EPT_PROT_STR(name) "pgmRCBthEPTProt" #name
2378#define PGM_BTH_NAME_RC_EPT_32BIT_STR(name) "pgmRCBthEPT32Bit" #name
2379#define PGM_BTH_NAME_RC_EPT_PAE_STR(name) "pgmRCBthEPTPAE" #name
2380#define PGM_BTH_NAME_RC_EPT_AMD64_STR(name) "pgmRCBthEPTAMD64" #name
2381#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name
2382#define PGM_BTH_NAME_R0_32BIT_PROT_STR(name) "pgmR0Bth32BitProt" #name
2383#define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name) "pgmR0Bth32Bit32Bit" #name
2384#define PGM_BTH_NAME_R0_PAE_REAL_STR(name) "pgmR0BthPAEReal" #name
2385#define PGM_BTH_NAME_R0_PAE_PROT_STR(name) "pgmR0BthPAEProt" #name
2386#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name
2387#define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name
2388#define PGM_BTH_NAME_R0_AMD64_PROT_STR(name) "pgmR0BthAMD64Prot" #name
2389#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name
2390#define PGM_BTH_NAME_R0_NESTED_REAL_STR(name) "pgmR0BthNestedReal" #name
2391#define PGM_BTH_NAME_R0_NESTED_PROT_STR(name) "pgmR0BthNestedProt" #name
2392#define PGM_BTH_NAME_R0_NESTED_32BIT_STR(name) "pgmR0BthNested32Bit" #name
2393#define PGM_BTH_NAME_R0_NESTED_PAE_STR(name) "pgmR0BthNestedPAE" #name
2394#define PGM_BTH_NAME_R0_NESTED_AMD64_STR(name) "pgmR0BthNestedAMD64" #name
2395#define PGM_BTH_NAME_R0_EPT_REAL_STR(name) "pgmR0BthEPTReal" #name
2396#define PGM_BTH_NAME_R0_EPT_PROT_STR(name) "pgmR0BthEPTProt" #name
2397#define PGM_BTH_NAME_R0_EPT_32BIT_STR(name) "pgmR0BthEPT32Bit" #name
2398#define PGM_BTH_NAME_R0_EPT_PAE_STR(name) "pgmR0BthEPTPAE" #name
2399#define PGM_BTH_NAME_R0_EPT_AMD64_STR(name) "pgmR0BthEPTAMD64" #name
2400
2401#define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name)
2402#define PGM_BTH_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Bth##name))
2403/** @} */
2404
2405/**
2406 * Data for each paging mode.
2407 */
2408typedef struct PGMMODEDATA
2409{
2410 /** The guest mode type. */
2411 uint32_t uGstType;
2412 /** The shadow mode type. */
2413 uint32_t uShwType;
2414
2415 /** @name Function pointers for Shadow paging.
2416 * @{
2417 */
2418 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2419 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVMCPU pVCpu));
2420 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2421 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2422
2423 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2424 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2425
2426 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2427 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2428 /** @} */
2429
2430 /** @name Function pointers for Guest paging.
2431 * @{
2432 */
2433 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2434 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVMCPU pVCpu));
2435 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2436 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2437 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2438 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2439 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2440 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2441 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2442 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2443 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2444 /** @} */
2445
2446 /** @name Function pointers for Both Shadow and Guest paging.
2447 * @{
2448 */
2449 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2450 /* no pfnR3BthTrap0eHandler */
2451 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2452 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2453 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2454 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2455 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2456#ifdef VBOX_STRICT
2457 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2458#endif
2459 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2460 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVMCPU pVCpu));
2461
2462 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
2463 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2464 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2465 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2466 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2467 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2468#ifdef VBOX_STRICT
2469 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2470#endif
2471 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2472 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVMCPU pVCpu));
2473
2474 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
2475 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2476 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2477 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2478 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2479 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2480#ifdef VBOX_STRICT
2481 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2482#endif
2483 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2484 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVMCPU pVCpu));
2485 /** @} */
2486} PGMMODEDATA, *PPGMMODEDATA;
2487
2488
2489
2490/**
2491 * Converts a PGM pointer into a VM pointer.
2492 * @returns Pointer to the VM structure the PGM is part of.
2493 * @param pPGM Pointer to PGM instance data.
2494 */
2495#define PGM2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
2496
2497/**
2498 * PGM Data (part of VM)
2499 */
2500typedef struct PGM
2501{
2502 /** Offset to the VM structure. */
2503 RTINT offVM;
2504 /** Offset of the PGMCPU structure relative to VMCPU. */
2505 RTINT offVCpuPGM;
2506
2507 /** @cfgm{RamPreAlloc, boolean, false}
2508 * Indicates whether the base RAM should all be allocated before starting
2509 * the VM (default), or if it should be allocated when first written to.
2510 */
2511 bool fRamPreAlloc;
2512 /** Indicates whether write monitoring is currently in use.
2513 * This is used to prevent conflicts between live saving and page sharing
2514 * detection. */
2515 bool fPhysWriteMonitoringEngaged;
2516 /** Alignment padding. */
2517 bool afAlignment0[2];
2518
2519 /*
2520 * This will be redefined at least two more times before we're done, I'm sure.
2521 * The current code is only to get on with the coding.
2522 * - 2004-06-10: initial version, bird.
2523 * - 2004-07-02: 1st time, bird.
2524 * - 2004-10-18: 2nd time, bird.
2525 * - 2005-07-xx: 3rd time, bird.
2526 */
2527
2528 /** The host paging mode. (This is what SUPLib reports.) */
2529 SUPPAGINGMODE enmHostMode;
2530
2531 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2532 RCPTRTYPE(PX86PTE) paDynPageMap32BitPTEsGC;
2533 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2534 RCPTRTYPE(PX86PTEPAE) paDynPageMapPaePTEsGC;
2535
2536 /** 4 MB page mask; 32 or 36 bits depending on PSE-36 (identical for all VCPUs) */
2537 RTGCPHYS GCPhys4MBPSEMask;
2538
2539 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.
2540 * This is sorted by physical address and contains no overlapping ranges. */
2541 R3PTRTYPE(PPGMRAMRANGE) pRamRangesR3;
2542 /** R0 pointer corresponding to PGM::pRamRangesR3. */
2543 R0PTRTYPE(PPGMRAMRANGE) pRamRangesR0;
2544 /** RC pointer corresponding to PGM::pRamRangesR3. */
2545 RCPTRTYPE(PPGMRAMRANGE) pRamRangesRC;
2546 /** Generation ID for the RAM ranges. This member is incremented everytime a RAM
2547 * range is linked or unlinked. */
2548 uint32_t volatile idRamRangesGen;
2549
2550 /** Pointer to the list of ROM ranges - for R3.
2551 * This is sorted by physical address and contains no overlapping ranges. */
2552 R3PTRTYPE(PPGMROMRANGE) pRomRangesR3;
2553 /** R0 pointer corresponding to PGM::pRomRangesR3. */
2554 R0PTRTYPE(PPGMROMRANGE) pRomRangesR0;
2555 /** RC pointer corresponding to PGM::pRomRangesR3. */
2556 RCPTRTYPE(PPGMROMRANGE) pRomRangesRC;
2557#if HC_ARCH_BITS == 64
2558 /** Alignment padding. */
2559 RTRCPTR GCPtrPadding2;
2560#endif
2561
2562 /** Pointer to the list of MMIO2 ranges - for R3.
2563 * Registration order. */
2564 R3PTRTYPE(PPGMMMIO2RANGE) pMmio2RangesR3;
2565
2566 /** PGM offset based trees - R3 Ptr. */
2567 R3PTRTYPE(PPGMTREES) pTreesR3;
2568 /** PGM offset based trees - R0 Ptr. */
2569 R0PTRTYPE(PPGMTREES) pTreesR0;
2570 /** PGM offset based trees - RC Ptr. */
2571 RCPTRTYPE(PPGMTREES) pTreesRC;
2572
2573 /** Linked list of GC mappings - for RC.
2574 * The list is sorted ascending on address.
2575 */
2576 RCPTRTYPE(PPGMMAPPING) pMappingsRC;
2577 /** Linked list of GC mappings - for HC.
2578 * The list is sorted ascending on address.
2579 */
2580 R3PTRTYPE(PPGMMAPPING) pMappingsR3;
2581 /** Linked list of GC mappings - for R0.
2582 * The list is sorted ascending on address.
2583 */
2584 R0PTRTYPE(PPGMMAPPING) pMappingsR0;
2585
2586 /** Pointer to the 5 page CR3 content mapping.
2587 * The first page is always the CR3 (in some form) while the 4 other pages
2588 * are used of the PDs in PAE mode. */
2589 RTGCPTR GCPtrCR3Mapping;
2590#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
2591 uint32_t u32Alignment1;
2592#endif
2593
2594 /** Indicates that PGMR3FinalizeMappings has been called and that further
2595 * PGMR3MapIntermediate calls will be rejected. */
2596 bool fFinalizedMappings;
2597 /** If set no conflict checks are required. */
2598 bool fMappingsFixed;
2599 /** If set if restored as fixed but we were unable to re-fixate at the old
2600 * location because of room or address incompatibilities. */
2601 bool fMappingsFixedRestored;
2602 /** If set, then no mappings are put into the shadow page table.
2603 * Use pgmMapAreMappingsEnabled() instead of direct access. */
2604 bool fMappingsDisabled;
2605 /** Size of fixed mapping.
2606 * This is valid if either fMappingsFixed or fMappingsFixedRestored is set. */
2607 uint32_t cbMappingFixed;
2608 /** Base address (GC) of fixed mapping.
2609 * This is valid if either fMappingsFixed or fMappingsFixedRestored is set. */
2610 RTGCPTR GCPtrMappingFixed;
2611 /** The address of the previous RAM range mapping. */
2612 RTGCPTR GCPtrPrevRamRangeMapping;
2613
2614 /** @name Intermediate Context
2615 * @{ */
2616 /** Pointer to the intermediate page directory - Normal. */
2617 R3PTRTYPE(PX86PD) pInterPD;
2618 /** Pointer to the intermedate page tables - Normal.
2619 * There are two page tables, one for the identity mapping and one for
2620 * the host context mapping (of the core code). */
2621 R3PTRTYPE(PX86PT) apInterPTs[2];
2622 /** Pointer to the intermedate page tables - PAE. */
2623 R3PTRTYPE(PX86PTPAE) apInterPaePTs[2];
2624 /** Pointer to the intermedate page directory - PAE. */
2625 R3PTRTYPE(PX86PDPAE) apInterPaePDs[4];
2626 /** Pointer to the intermedate page directory - PAE. */
2627 R3PTRTYPE(PX86PDPT) pInterPaePDPT;
2628 /** Pointer to the intermedate page-map level 4 - AMD64. */
2629 R3PTRTYPE(PX86PML4) pInterPaePML4;
2630 /** Pointer to the intermedate page directory - AMD64. */
2631 R3PTRTYPE(PX86PDPT) pInterPaePDPT64;
2632 /** The Physical Address (HC) of the intermediate Page Directory - Normal. */
2633 RTHCPHYS HCPhysInterPD;
2634 /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */
2635 RTHCPHYS HCPhysInterPaePDPT;
2636 /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */
2637 RTHCPHYS HCPhysInterPaePML4;
2638 /** @} */
2639
2640 /** Base address of the dynamic page mapping area.
2641 * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
2642 */
2643 RCPTRTYPE(uint8_t *) pbDynPageMapBaseGC;
2644 /** The index of the last entry used in the dynamic page mapping area. */
2645 RTUINT iDynPageMapLast;
2646 /** Cache containing the last entries in the dynamic page mapping area.
2647 * The cache size is covering half of the mapping area. */
2648 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
2649 /** Keep a lock counter for the full (!) mapping area. */
2650 uint32_t aLockedDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)];
2651
2652 /** The address of the ring-0 mapping cache if we're making use of it. */
2653 RTR0PTR pvR0DynMapUsed;
2654#if HC_ARCH_BITS == 32
2655 /** Alignment padding that makes the next member start on a 8 byte boundrary. */
2656 uint32_t u32Alignment2;
2657#endif
2658
2659 /** PGM critical section.
2660 * This protects the physical & virtual access handlers, ram ranges,
2661 * and the page flag updating (some of it anyway).
2662 */
2663 PDMCRITSECT CritSect;
2664
2665 /** Pointer to SHW+GST mode data (function pointers).
2666 * The index into this table is made up from */
2667 R3PTRTYPE(PPGMMODEDATA) paModeData;
2668
2669 /** Shadow Page Pool - R3 Ptr. */
2670 R3PTRTYPE(PPGMPOOL) pPoolR3;
2671 /** Shadow Page Pool - R0 Ptr. */
2672 R0PTRTYPE(PPGMPOOL) pPoolR0;
2673 /** Shadow Page Pool - RC Ptr. */
2674 RCPTRTYPE(PPGMPOOL) pPoolRC;
2675
2676 /** We're not in a state which permits writes to guest memory.
2677 * (Only used in strict builds.) */
2678 bool fNoMorePhysWrites;
2679 /** Alignment padding that makes the next member start on a 8 byte boundrary. */
2680 bool afAlignment3[HC_ARCH_BITS == 32 ? 7: 3];
2681
2682 /**
2683 * Data associated with managing the ring-3 mappings of the allocation chunks.
2684 */
2685 struct
2686 {
2687 /** The chunk tree, ordered by chunk id. */
2688#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2689 R3PTRTYPE(PAVLU32NODECORE) pTree;
2690#else
2691 R3R0PTRTYPE(PAVLU32NODECORE) pTree;
2692#endif
2693 /** The chunk age tree, ordered by ageing sequence number. */
2694 R3PTRTYPE(PAVLLU32NODECORE) pAgeTree;
2695 /** The chunk mapping TLB. */
2696 PGMCHUNKR3MAPTLB Tlb;
2697 /** The number of mapped chunks. */
2698 uint32_t c;
2699 /** The maximum number of mapped chunks.
2700 * @cfgm PGM/MaxRing3Chunks */
2701 uint32_t cMax;
2702 /** The current time. */
2703 uint32_t iNow;
2704 /** Number of pgmR3PhysChunkFindUnmapCandidate calls left to the next ageing. */
2705 uint32_t AgeingCountdown;
2706 } ChunkR3Map;
2707
2708 /**
2709 * The page mapping TLB for ring-3 and (for the time being) ring-0.
2710 */
2711 PGMPAGER3MAPTLB PhysTlbHC;
2712
2713 /** @name The zero page.
2714 * @{ */
2715 /** The host physical address of the zero page. */
2716 RTHCPHYS HCPhysZeroPg;
2717 /** The ring-3 mapping of the zero page. */
2718 RTR3PTR pvZeroPgR3;
2719 /** The ring-0 mapping of the zero page. */
2720 RTR0PTR pvZeroPgR0;
2721 /** The GC mapping of the zero page. */
2722 RTGCPTR pvZeroPgRC;
2723 /** @}*/
2724
2725 /** The number of handy pages. */
2726 uint32_t cHandyPages;
2727
2728 /** The number of large handy pages. */
2729 uint32_t cLargeHandyPages;
2730
2731 /**
2732 * Array of handy pages.
2733 *
2734 * This array is used in a two way communication between pgmPhysAllocPage
2735 * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as
2736 * an intermediary.
2737 *
2738 * The size of this array is important, see pgmPhysEnsureHandyPage for details.
2739 * (The current size of 32 pages, means 128 KB of handy memory.)
2740 */
2741 GMMPAGEDESC aHandyPages[PGM_HANDY_PAGES];
2742
2743 /**
2744 * Array of large handy pages. (currently size 1)
2745 *
2746 * This array is used in a two way communication between pgmPhysAllocLargePage
2747 * and GMMR0AllocateLargePage, with PGMR3PhysAllocateLargePage serving as
2748 * an intermediary.
2749 */
2750 GMMPAGEDESC aLargeHandyPage[1];
2751
2752 /**
2753 * Live save data.
2754 */
2755 struct
2756 {
2757 /** Per type statistics. */
2758 struct
2759 {
2760 /** The number of ready pages. */
2761 uint32_t cReadyPages;
2762 /** The number of dirty pages. */
2763 uint32_t cDirtyPages;
2764 /** The number of ready zero pages. */
2765 uint32_t cZeroPages;
2766 /** The number of write monitored pages. */
2767 uint32_t cMonitoredPages;
2768 } Rom,
2769 Mmio2,
2770 Ram;
2771 /** The number of ignored pages in the RAM ranges (i.e. MMIO, MMIO2 and ROM). */
2772 uint32_t cIgnoredPages;
2773 /** Indicates that a live save operation is active. */
2774 bool fActive;
2775 /** Padding. */
2776 bool afReserved[2];
2777 /** The next history index. */
2778 uint8_t iDirtyPagesHistory;
2779 /** History of the total amount of dirty pages. */
2780 uint32_t acDirtyPagesHistory[64];
2781 /** Short term dirty page average. */
2782 uint32_t cDirtyPagesShort;
2783 /** Long term dirty page average. */
2784 uint32_t cDirtyPagesLong;
2785 /** The number of saved pages. This is used to get some kind of estimate of the
2786 * link speed so we can decide when we're done. It is reset after the first
2787 * 7 passes so the speed estimate doesn't get inflated by the initial set of
2788 * zero pages. */
2789 uint64_t cSavedPages;
2790 /** The nanosecond timestamp when cSavedPages was 0. */
2791 uint64_t uSaveStartNS;
2792 /** Pages per second (for statistics). */
2793 uint32_t cPagesPerSecond;
2794 uint32_t cAlignment;
2795 } LiveSave;
2796
2797 /** @name Error injection.
2798 * @{ */
2799 /** Inject handy page allocation errors pretending we're completely out of
2800 * memory. */
2801 bool volatile fErrInjHandyPages;
2802 /** Padding. */
2803 bool afReserved[3];
2804 /** @} */
2805
2806 /** @name Release Statistics
2807 * @{ */
2808 uint32_t cAllPages; /**< The total number of pages. (Should be Private + Shared + Zero + Pure MMIO.) */
2809 uint32_t cPrivatePages; /**< The number of private pages. */
2810 uint32_t cSharedPages; /**< The number of shared pages. */
2811 uint32_t cZeroPages; /**< The number of zero backed pages. */
2812 uint32_t cPureMmioPages; /**< The number of pure MMIO pages. */
2813 uint32_t cMonitoredPages; /**< The number of write monitored pages. */
2814 uint32_t cWrittenToPages; /**< The number of previously write monitored pages. */
2815 uint32_t cWriteLockedPages; /**< The number of write locked pages. */
2816 uint32_t cReadLockedPages; /**< The number of read locked pages. */
2817 uint32_t cBalloonedPages; /**< The number of ballooned pages. */
2818 uint32_t aAlignment4[1];
2819
2820 /** The number of times we were forced to change the hypervisor region location. */
2821 STAMCOUNTER cRelocations;
2822
2823 STAMCOUNTER StatLargePageAlloc; /**< The number of large pages we've allocated.*/
2824 STAMCOUNTER StatLargePageReused; /**< The number of large pages we've reused.*/
2825 STAMCOUNTER StatLargePageRefused; /**< The number of times we couldn't use a large page.*/
2826 STAMCOUNTER StatLargePageRecheck; /**< The number of times we rechecked a disabled large page.*/
2827 /** @} */
2828
2829#ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */
2830 /* R3 only: */
2831 STAMCOUNTER StatR3DetectedConflicts; /**< R3: Number of times PGMR3MapHasConflicts() detected a conflict. */
2832 STAMPROFILE StatR3ResolveConflict; /**< R3: pgmR3SyncPTResolveConflict() profiling (includes the entire relocation). */
2833
2834 STAMCOUNTER StatRZChunkR3MapTlbHits; /**< RC/R0: Ring-3/0 chunk mapper TLB hits. */
2835 STAMCOUNTER StatRZChunkR3MapTlbMisses; /**< RC/R0: Ring-3/0 chunk mapper TLB misses. */
2836 STAMCOUNTER StatRZPageMapTlbHits; /**< RC/R0: Ring-3/0 page mapper TLB hits. */
2837 STAMCOUNTER StatRZPageMapTlbMisses; /**< RC/R0: Ring-3/0 page mapper TLB misses. */
2838 STAMCOUNTER StatPageMapTlbFlushes; /**< ALL: Ring-3/0 page mapper TLB flushes. */
2839 STAMCOUNTER StatPageMapTlbFlushEntry; /**< ALL: Ring-3/0 page mapper TLB flushes. */
2840 STAMCOUNTER StatR3ChunkR3MapTlbHits; /**< R3: Ring-3/0 chunk mapper TLB hits. */
2841 STAMCOUNTER StatR3ChunkR3MapTlbMisses; /**< R3: Ring-3/0 chunk mapper TLB misses. */
2842 STAMCOUNTER StatR3PageMapTlbHits; /**< R3: Ring-3/0 page mapper TLB hits. */
2843 STAMCOUNTER StatR3PageMapTlbMisses; /**< R3: Ring-3/0 page mapper TLB misses. */
2844 STAMPROFILE StatRZSyncCR3HandlerVirtualReset; /**< RC/R0: Profiling of the virtual handler resets. */
2845 STAMPROFILE StatRZSyncCR3HandlerVirtualUpdate; /**< RC/R0: Profiling of the virtual handler updates. */
2846 STAMPROFILE StatR3SyncCR3HandlerVirtualReset; /**< R3: Profiling of the virtual handler resets. */
2847 STAMPROFILE StatR3SyncCR3HandlerVirtualUpdate; /**< R3: Profiling of the virtual handler updates. */
2848 STAMCOUNTER StatR3PhysHandlerReset; /**< R3: The number of times PGMHandlerPhysicalReset is called. */
2849 STAMCOUNTER StatRZPhysHandlerReset; /**< RC/R0: The number of times PGMHandlerPhysicalReset is called. */
2850 STAMPROFILE StatRZVirtHandlerSearchByPhys; /**< RC/R0: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2851 STAMPROFILE StatR3VirtHandlerSearchByPhys; /**< R3: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2852 STAMCOUNTER StatRZPageReplaceShared; /**< RC/R0: Times a shared page has been replaced by a private one. */
2853 STAMCOUNTER StatRZPageReplaceZero; /**< RC/R0: Times the zero page has been replaced by a private one. */
2854/// @todo STAMCOUNTER StatRZPageHandyAllocs; /**< RC/R0: The number of times we've executed GMMR3AllocateHandyPages. */
2855 STAMCOUNTER StatR3PageReplaceShared; /**< R3: Times a shared page has been replaced by a private one. */
2856 STAMCOUNTER StatR3PageReplaceZero; /**< R3: Times the zero page has been replaced by a private one. */
2857/// @todo STAMCOUNTER StatR3PageHandyAllocs; /**< R3: The number of times we've executed GMMR3AllocateHandyPages. */
2858
2859 /* RC only: */
2860 STAMCOUNTER StatRCDynMapCacheMisses; /**< RC: The number of dynamic page mapping cache misses */
2861 STAMCOUNTER StatRCDynMapCacheHits; /**< RC: The number of dynamic page mapping cache hits */
2862 STAMCOUNTER StatRCInvlPgConflict; /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */
2863 STAMCOUNTER StatRCInvlPgSyncMonCR3; /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */
2864
2865 STAMCOUNTER StatRZPhysRead;
2866 STAMCOUNTER StatRZPhysReadBytes;
2867 STAMCOUNTER StatRZPhysWrite;
2868 STAMCOUNTER StatRZPhysWriteBytes;
2869 STAMCOUNTER StatR3PhysRead;
2870 STAMCOUNTER StatR3PhysReadBytes;
2871 STAMCOUNTER StatR3PhysWrite;
2872 STAMCOUNTER StatR3PhysWriteBytes;
2873 STAMCOUNTER StatRCPhysRead;
2874 STAMCOUNTER StatRCPhysReadBytes;
2875 STAMCOUNTER StatRCPhysWrite;
2876 STAMCOUNTER StatRCPhysWriteBytes;
2877
2878 STAMCOUNTER StatRZPhysSimpleRead;
2879 STAMCOUNTER StatRZPhysSimpleReadBytes;
2880 STAMCOUNTER StatRZPhysSimpleWrite;
2881 STAMCOUNTER StatRZPhysSimpleWriteBytes;
2882 STAMCOUNTER StatR3PhysSimpleRead;
2883 STAMCOUNTER StatR3PhysSimpleReadBytes;
2884 STAMCOUNTER StatR3PhysSimpleWrite;
2885 STAMCOUNTER StatR3PhysSimpleWriteBytes;
2886 STAMCOUNTER StatRCPhysSimpleRead;
2887 STAMCOUNTER StatRCPhysSimpleReadBytes;
2888 STAMCOUNTER StatRCPhysSimpleWrite;
2889 STAMCOUNTER StatRCPhysSimpleWriteBytes;
2890
2891 STAMCOUNTER StatTrackVirgin; /**< The number of first time shadowings. */
2892 STAMCOUNTER StatTrackAliased; /**< The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
2893 STAMCOUNTER StatTrackAliasedMany; /**< The number of times we're tracking using cRef2. */
2894 STAMCOUNTER StatTrackAliasedLots; /**< The number of times we're hitting pages which has overflowed cRef2. */
2895 STAMCOUNTER StatTrackNoExtentsLeft; /**< The number of times the extent list was exhausted. */
2896 STAMCOUNTER StatTrackOverflows; /**< The number of times the extent list grows to long. */
2897 STAMPROFILE StatTrackDeref; /**< Profiling of SyncPageWorkerTrackDeref (expensive). */
2898
2899 /** Time spent by the host OS for large page allocation. */
2900 STAMPROFILE StatAllocLargePage;
2901 /** Time spent clearing the newly allocated large pages. */
2902 STAMPROFILE StatClearLargePage;
2903 /** pgmPhysIsValidLargePage profiling - R3 */
2904 STAMPROFILE StatR3IsValidLargePage;
2905 /** pgmPhysIsValidLargePage profiling - RZ*/
2906 STAMPROFILE StatRZIsValidLargePage;
2907#endif
2908} PGM;
2909#ifndef IN_TSTVMSTRUCTGC /* HACK */
2910AssertCompileMemberAlignment(PGM, paDynPageMap32BitPTEsGC, 8);
2911AssertCompileMemberAlignment(PGM, GCPtrMappingFixed, sizeof(RTGCPTR));
2912AssertCompileMemberAlignment(PGM, HCPhysInterPD, 8);
2913AssertCompileMemberAlignment(PGM, aHCPhysDynPageMapCache, 8);
2914AssertCompileMemberAlignment(PGM, CritSect, 8);
2915AssertCompileMemberAlignment(PGM, ChunkR3Map, 8);
2916AssertCompileMemberAlignment(PGM, PhysTlbHC, 8);
2917AssertCompileMemberAlignment(PGM, HCPhysZeroPg, 8);
2918AssertCompileMemberAlignment(PGM, aHandyPages, 8);
2919AssertCompileMemberAlignment(PGM, cRelocations, 8);
2920#endif /* !IN_TSTVMSTRUCTGC */
2921/** Pointer to the PGM instance data. */
2922typedef PGM *PPGM;
2923
2924
2925/**
2926 * Converts a PGMCPU pointer into a VM pointer.
2927 * @returns Pointer to the VM structure the PGM is part of.
2928 * @param pPGM Pointer to PGMCPU instance data.
2929 */
2930#define PGMCPU2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
2931
2932/**
2933 * Converts a PGMCPU pointer into a PGM pointer.
2934 * @returns Pointer to the VM structure the PGM is part of.
2935 * @param pPGM Pointer to PGMCPU instance data.
2936 */
2937#define PGMCPU2PGM(pPGMCpu) ( (PPGM)((char*)pPGMCpu - pPGMCpu->offPGM) )
2938
2939/**
2940 * PGMCPU Data (part of VMCPU).
2941 */
2942typedef struct PGMCPU
2943{
2944 /** Offset to the VM structure. */
2945 RTINT offVM;
2946 /** Offset to the VMCPU structure. */
2947 RTINT offVCpu;
2948 /** Offset of the PGM structure relative to VMCPU. */
2949 RTINT offPGM;
2950 RTINT uPadding0; /**< structure size alignment. */
2951
2952#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2953 /** Automatically tracked physical memory mapping set.
2954 * Ring-0 and strict raw-mode builds. */
2955 PGMMAPSET AutoSet;
2956#endif
2957
2958 /** A20 gate mask.
2959 * Our current approach to A20 emulation is to let REM do it and don't bother
2960 * anywhere else. The interesting Guests will be operating with it enabled anyway.
2961 * But whould need arrise, we'll subject physical addresses to this mask. */
2962 RTGCPHYS GCPhysA20Mask;
2963 /** A20 gate state - boolean! */
2964 bool fA20Enabled;
2965
2966 /** What needs syncing (PGM_SYNC_*).
2967 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
2968 * PGMFlushTLB, and PGMR3Load. */
2969 RTUINT fSyncFlags;
2970
2971 /** The shadow paging mode. */
2972 PGMMODE enmShadowMode;
2973 /** The guest paging mode. */
2974 PGMMODE enmGuestMode;
2975
2976 /** The current physical address representing in the guest CR3 register. */
2977 RTGCPHYS GCPhysCR3;
2978
2979 /** @name 32-bit Guest Paging.
2980 * @{ */
2981 /** The guest's page directory, R3 pointer. */
2982 R3PTRTYPE(PX86PD) pGst32BitPdR3;
2983#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2984 /** The guest's page directory, R0 pointer. */
2985 R0PTRTYPE(PX86PD) pGst32BitPdR0;
2986#endif
2987 /** The guest's page directory, static RC mapping. */
2988 RCPTRTYPE(PX86PD) pGst32BitPdRC;
2989 /** @} */
2990
2991 /** @name PAE Guest Paging.
2992 * @{ */
2993 /** The guest's page directory pointer table, static RC mapping. */
2994 RCPTRTYPE(PX86PDPT) pGstPaePdptRC;
2995 /** The guest's page directory pointer table, R3 pointer. */
2996 R3PTRTYPE(PX86PDPT) pGstPaePdptR3;
2997#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2998 /** The guest's page directory pointer table, R0 pointer. */
2999 R0PTRTYPE(PX86PDPT) pGstPaePdptR0;
3000#endif
3001
3002 /** The guest's page directories, R3 pointers.
3003 * These are individual pointers and don't have to be adjecent.
3004 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
3005 R3PTRTYPE(PX86PDPAE) apGstPaePDsR3[4];
3006 /** The guest's page directories, R0 pointers.
3007 * Same restrictions as apGstPaePDsR3. */
3008#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
3009 R0PTRTYPE(PX86PDPAE) apGstPaePDsR0[4];
3010#endif
3011 /** The guest's page directories, static GC mapping.
3012 * Unlike the R3/R0 array the first entry can be accessed as a 2048 entry PD.
3013 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
3014 RCPTRTYPE(PX86PDPAE) apGstPaePDsRC[4];
3015 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
3016 RTGCPHYS aGCPhysGstPaePDs[4];
3017 /** The physical addresses of the monitored guest page directories (PAE). */
3018 RTGCPHYS aGCPhysGstPaePDsMonitored[4];
3019 /** @} */
3020
3021 /** @name AMD64 Guest Paging.
3022 * @{ */
3023 /** The guest's page directory pointer table, R3 pointer. */
3024 R3PTRTYPE(PX86PML4) pGstAmd64Pml4R3;
3025#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
3026 /** The guest's page directory pointer table, R0 pointer. */
3027 R0PTRTYPE(PX86PML4) pGstAmd64Pml4R0;
3028#else
3029 RTR0PTR alignment6b; /**< alignment equalizer. */
3030#endif
3031 /** @} */
3032
3033 /** Pointer to the page of the current active CR3 - R3 Ptr. */
3034 R3PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R3;
3035 /** Pointer to the page of the current active CR3 - R0 Ptr. */
3036 R0PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R0;
3037 /** Pointer to the page of the current active CR3 - RC Ptr. */
3038 RCPTRTYPE(PPGMPOOLPAGE) pShwPageCR3RC;
3039 /* The shadow page pool index of the user table as specified during allocation; useful for freeing root pages */
3040 uint32_t iShwUser;
3041 /* The index into the user table (shadowed) as specified during allocation; useful for freeing root pages. */
3042 uint32_t iShwUserTable;
3043# if HC_ARCH_BITS == 64
3044 RTRCPTR alignment6; /**< structure size alignment. */
3045# endif
3046 /** @} */
3047
3048 /** @name Function pointers for Shadow paging.
3049 * @{
3050 */
3051 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
3052 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVMCPU pVCpu));
3053 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
3054 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3055
3056 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
3057 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3058
3059 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
3060 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3061
3062 /** @} */
3063
3064 /** @name Function pointers for Guest paging.
3065 * @{
3066 */
3067 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
3068 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVMCPU pVCpu));
3069 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
3070 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3071 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
3072 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
3073 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3074 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
3075#if HC_ARCH_BITS == 64
3076 RTRCPTR alignment3; /**< structure size alignment. */
3077#endif
3078
3079 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
3080 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3081 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
3082 /** @} */
3083
3084 /** @name Function pointers for Both Shadow and Guest paging.
3085 * @{
3086 */
3087 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
3088 /* no pfnR3BthTrap0eHandler */
3089 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3090 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
3091 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
3092 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3093 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
3094 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
3095 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
3096 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVMCPU pVCpu));
3097
3098 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
3099 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3100 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
3101 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
3102 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3103 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
3104 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
3105 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
3106 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVMCPU pVCpu));
3107
3108 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
3109 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3110 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
3111 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
3112 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3113 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
3114 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
3115 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
3116 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVMCPU pVCpu));
3117 RTRCPTR alignment2; /**< structure size alignment. */
3118 /** @} */
3119
3120 /** For saving stack space, the disassembler state is allocated here instead of
3121 * on the stack.
3122 * @note The DISCPUSTATE structure is not R3/R0/RZ clean! */
3123 union
3124 {
3125 /** The disassembler scratch space. */
3126 DISCPUSTATE DisState;
3127 /** Padding. */
3128 uint8_t abDisStatePadding[DISCPUSTATE_PADDING_SIZE];
3129 };
3130
3131 /* Count the number of pgm pool access handler calls. */
3132 uint64_t cPoolAccessHandler;
3133
3134 /** @name Release Statistics
3135 * @{ */
3136 /** The number of times the guest has switched mode since last reset or statistics reset. */
3137 STAMCOUNTER cGuestModeChanges;
3138 /** @} */
3139
3140#ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */
3141 /** @name Statistics
3142 * @{ */
3143 /** RC: Which statistic this \#PF should be attributed to. */
3144 RCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionRC;
3145 RTRCPTR padding0;
3146 /** R0: Which statistic this \#PF should be attributed to. */
3147 R0PTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionR0;
3148 RTR0PTR padding1;
3149
3150 /* Common */
3151 STAMCOUNTER StatSyncPtPD[X86_PG_ENTRIES]; /**< SyncPT - PD distribution. */
3152 STAMCOUNTER StatSyncPagePD[X86_PG_ENTRIES]; /**< SyncPage - PD distribution. */
3153
3154 /* R0 only: */
3155 STAMCOUNTER StatR0DynMapMigrateInvlPg; /**< R0: invlpg in PGMDynMapMigrateAutoSet. */
3156 STAMPROFILE StatR0DynMapGCPageInl; /**< R0: Calls to pgmR0DynMapGCPageInlined. */
3157 STAMCOUNTER StatR0DynMapGCPageInlHits; /**< R0: Hash table lookup hits. */
3158 STAMCOUNTER StatR0DynMapGCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
3159 STAMCOUNTER StatR0DynMapGCPageInlRamHits; /**< R0: 1st ram range hits. */
3160 STAMCOUNTER StatR0DynMapGCPageInlRamMisses; /**< R0: 1st ram range misses, takes slow path. */
3161 STAMPROFILE StatR0DynMapHCPageInl; /**< R0: Calls to pgmR0DynMapHCPageInlined. */
3162 STAMCOUNTER StatR0DynMapHCPageInlHits; /**< R0: Hash table lookup hits. */
3163 STAMCOUNTER StatR0DynMapHCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
3164 STAMPROFILE StatR0DynMapHCPage; /**< R0: Calls to PGMDynMapHCPage. */
3165 STAMCOUNTER StatR0DynMapSetOptimize; /**< R0: Calls to pgmDynMapOptimizeAutoSet. */
3166 STAMCOUNTER StatR0DynMapSetSearchFlushes; /**< R0: Set search restorting to subset flushes. */
3167 STAMCOUNTER StatR0DynMapSetSearchHits; /**< R0: Set search hits. */
3168 STAMCOUNTER StatR0DynMapSetSearchMisses; /**< R0: Set search misses. */
3169 STAMCOUNTER StatR0DynMapPage; /**< R0: Calls to pgmR0DynMapPage. */
3170 STAMCOUNTER StatR0DynMapPageHits0; /**< R0: Hits at iPage+0. */
3171 STAMCOUNTER StatR0DynMapPageHits1; /**< R0: Hits at iPage+1. */
3172 STAMCOUNTER StatR0DynMapPageHits2; /**< R0: Hits at iPage+2. */
3173 STAMCOUNTER StatR0DynMapPageInvlPg; /**< R0: invlpg. */
3174 STAMCOUNTER StatR0DynMapPageSlow; /**< R0: Calls to pgmR0DynMapPageSlow. */
3175 STAMCOUNTER StatR0DynMapPageSlowLoopHits; /**< R0: Hits in the pgmR0DynMapPageSlow search loop. */
3176 STAMCOUNTER StatR0DynMapPageSlowLoopMisses; /**< R0: Misses in the pgmR0DynMapPageSlow search loop. */
3177 //STAMCOUNTER StatR0DynMapPageSlowLostHits; /**< R0: Lost hits. */
3178 STAMCOUNTER StatR0DynMapSubsets; /**< R0: Times PGMDynMapPushAutoSubset was called. */
3179 STAMCOUNTER StatR0DynMapPopFlushes; /**< R0: Times PGMDynMapPopAutoSubset flushes the subset. */
3180 STAMCOUNTER aStatR0DynMapSetSize[11]; /**< R0: Set size distribution. */
3181
3182 /* RZ only: */
3183 STAMPROFILE StatRZTrap0e; /**< RC/R0: PGMTrap0eHandler() profiling. */
3184 STAMPROFILE StatRZTrap0eTimeCheckPageFault;
3185 STAMPROFILE StatRZTrap0eTimeSyncPT;
3186 STAMPROFILE StatRZTrap0eTimeMapping;
3187 STAMPROFILE StatRZTrap0eTimeOutOfSync;
3188 STAMPROFILE StatRZTrap0eTimeHandlers;
3189 STAMPROFILE StatRZTrap0eTime2CSAM; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CSAM. */
3190 STAMPROFILE StatRZTrap0eTime2DirtyAndAccessed; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
3191 STAMPROFILE StatRZTrap0eTime2GuestTrap; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a guest trap. */
3192 STAMPROFILE StatRZTrap0eTime2HndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a physical handler. */
3193 STAMPROFILE StatRZTrap0eTime2HndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a virtual handler. */
3194 STAMPROFILE StatRZTrap0eTime2HndUnhandled; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
3195 STAMPROFILE StatRZTrap0eTime2Misc; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is not known. */
3196 STAMPROFILE StatRZTrap0eTime2OutOfSync; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
3197 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
3198 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
3199 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndObs; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
3200 STAMPROFILE StatRZTrap0eTime2SyncPT; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
3201 STAMCOUNTER StatRZTrap0eConflicts; /**< RC/R0: The number of times \#PF was caused by an undetected conflict. */
3202 STAMCOUNTER StatRZTrap0eHandlersMapping; /**< RC/R0: Number of traps due to access handlers in mappings. */
3203 STAMCOUNTER StatRZTrap0eHandlersOutOfSync; /**< RC/R0: Number of out-of-sync handled pages. */
3204 STAMCOUNTER StatRZTrap0eHandlersPhysical; /**< RC/R0: Number of traps due to physical access handlers. */
3205 STAMCOUNTER StatRZTrap0eHandlersVirtual; /**< RC/R0: Number of traps due to virtual access handlers. */
3206 STAMCOUNTER StatRZTrap0eHandlersVirtualByPhys; /**< RC/R0: Number of traps due to virtual access handlers found by physical address. */
3207 STAMCOUNTER StatRZTrap0eHandlersVirtualUnmarked;/**< RC/R0: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
3208 STAMCOUNTER StatRZTrap0eHandlersUnhandled; /**< RC/R0: Number of traps due to access outside range of monitored page(s). */
3209 STAMCOUNTER StatRZTrap0eHandlersInvalid; /**< RC/R0: Number of traps due to access to invalid physical memory. */
3210 STAMCOUNTER StatRZTrap0eUSNotPresentRead; /**< RC/R0: \#PF err kind */
3211 STAMCOUNTER StatRZTrap0eUSNotPresentWrite; /**< RC/R0: \#PF err kind */
3212 STAMCOUNTER StatRZTrap0eUSWrite; /**< RC/R0: \#PF err kind */
3213 STAMCOUNTER StatRZTrap0eUSReserved; /**< RC/R0: \#PF err kind */
3214 STAMCOUNTER StatRZTrap0eUSNXE; /**< RC/R0: \#PF err kind */
3215 STAMCOUNTER StatRZTrap0eUSRead; /**< RC/R0: \#PF err kind */
3216 STAMCOUNTER StatRZTrap0eSVNotPresentRead; /**< RC/R0: \#PF err kind */
3217 STAMCOUNTER StatRZTrap0eSVNotPresentWrite; /**< RC/R0: \#PF err kind */
3218 STAMCOUNTER StatRZTrap0eSVWrite; /**< RC/R0: \#PF err kind */
3219 STAMCOUNTER StatRZTrap0eSVReserved; /**< RC/R0: \#PF err kind */
3220 STAMCOUNTER StatRZTrap0eSNXE; /**< RC/R0: \#PF err kind */
3221 STAMCOUNTER StatRZTrap0eGuestPF; /**< RC/R0: Real guest \#PFs. */
3222 STAMCOUNTER StatRZTrap0eGuestPFUnh; /**< RC/R0: Real guest \#PF ending up at the end of the \#PF code. */
3223 STAMCOUNTER StatRZTrap0eGuestPFMapping; /**< RC/R0: Real guest \#PF to HMA or other mapping. */
3224 STAMCOUNTER StatRZTrap0eWPEmulInRZ; /**< RC/R0: WP=0 virtualization trap, handled. */
3225 STAMCOUNTER StatRZTrap0eWPEmulToR3; /**< RC/R0: WP=0 virtualization trap, chickened out. */
3226 STAMCOUNTER StatRZTrap0ePD[X86_PG_ENTRIES]; /**< RC/R0: PD distribution of the \#PFs. */
3227 STAMCOUNTER StatRZGuestCR3WriteHandled; /**< RC/R0: The number of times WriteHandlerCR3() was successfully called. */
3228 STAMCOUNTER StatRZGuestCR3WriteUnhandled; /**< RC/R0: The number of times WriteHandlerCR3() was called and we had to fall back to the recompiler. */
3229 STAMCOUNTER StatRZGuestCR3WriteConflict; /**< RC/R0: The number of times WriteHandlerCR3() was called and a conflict was detected. */
3230 STAMCOUNTER StatRZGuestROMWriteHandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was successfully called. */
3231 STAMCOUNTER StatRZGuestROMWriteUnhandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was called and we had to fall back to the recompiler */
3232
3233 /* HC - R3 and (maybe) R0: */
3234
3235 /* RZ & R3: */
3236 STAMPROFILE StatRZSyncCR3; /**< RC/R0: PGMSyncCR3() profiling. */
3237 STAMPROFILE StatRZSyncCR3Handlers; /**< RC/R0: Profiling of the PGMSyncCR3() update handler section. */
3238 STAMCOUNTER StatRZSyncCR3Global; /**< RC/R0: The number of global CR3 syncs. */
3239 STAMCOUNTER StatRZSyncCR3NotGlobal; /**< RC/R0: The number of non-global CR3 syncs. */
3240 STAMCOUNTER StatRZSyncCR3DstCacheHit; /**< RC/R0: The number of times we got some kind of cache hit on a page table. */
3241 STAMCOUNTER StatRZSyncCR3DstFreed; /**< RC/R0: The number of times we've had to free a shadow entry. */
3242 STAMCOUNTER StatRZSyncCR3DstFreedSrcNP; /**< RC/R0: The number of times we've had to free a shadow entry for which the source entry was not present. */
3243 STAMCOUNTER StatRZSyncCR3DstNotPresent; /**< RC/R0: The number of times we've encountered a not present shadow entry for a present guest entry. */
3244 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPD; /**< RC/R0: The number of times a global page directory wasn't flushed. */
3245 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPT; /**< RC/R0: The number of times a page table with only global entries wasn't flushed. */
3246 STAMPROFILE StatRZSyncPT; /**< RC/R0: PGMSyncPT() profiling. */
3247 STAMCOUNTER StatRZSyncPTFailed; /**< RC/R0: The number of times PGMSyncPT() failed. */
3248 STAMCOUNTER StatRZSyncPT4K; /**< RC/R0: Number of 4KB syncs. */
3249 STAMCOUNTER StatRZSyncPT4M; /**< RC/R0: Number of 4MB syncs. */
3250 STAMCOUNTER StatRZSyncPagePDNAs; /**< RC/R0: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
3251 STAMCOUNTER StatRZSyncPagePDOutOfSync; /**< RC/R0: The number of time we've encountered an out-of-sync PD in SyncPage. */
3252 STAMCOUNTER StatRZAccessedPage; /**< RC/R0: The number of pages marked not present for accessed bit emulation. */
3253 STAMPROFILE StatRZDirtyBitTracking; /**< RC/R0: Profiling the dirty bit tracking in CheckPageFault().. */
3254 STAMCOUNTER StatRZDirtyPage; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
3255 STAMCOUNTER StatRZDirtyPageBig; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
3256 STAMCOUNTER StatRZDirtyPageSkipped; /**< RC/R0: The number of pages already dirty or readonly. */
3257 STAMCOUNTER StatRZDirtyPageTrap; /**< RC/R0: The number of traps generated for dirty bit tracking. */
3258 STAMCOUNTER StatRZDirtyPageStale; /**< RC/R0: The number of traps generated for dirty bit tracking. (stale tlb entries) */
3259 STAMCOUNTER StatRZDirtyTrackRealPF; /**< RC/R0: The number of real pages faults during dirty bit tracking. */
3260 STAMCOUNTER StatRZDirtiedPage; /**< RC/R0: The number of pages marked dirty because of write accesses. */
3261 STAMCOUNTER StatRZPageAlreadyDirty; /**< RC/R0: The number of pages already marked dirty because of write accesses. */
3262 STAMPROFILE StatRZInvalidatePage; /**< RC/R0: PGMInvalidatePage() profiling. */
3263 STAMCOUNTER StatRZInvalidatePage4KBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4KB page. */
3264 STAMCOUNTER StatRZInvalidatePage4MBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4MB page. */
3265 STAMCOUNTER StatRZInvalidatePage4MBPagesSkip; /**< RC/R0: The number of times PGMInvalidatePage() skipped a 4MB page. */
3266 STAMCOUNTER StatRZInvalidatePagePDMappings; /**< RC/R0: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
3267 STAMCOUNTER StatRZInvalidatePagePDNAs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
3268 STAMCOUNTER StatRZInvalidatePagePDNPs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not present page directory. */
3269 STAMCOUNTER StatRZInvalidatePagePDOutOfSync; /**< RC/R0: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
3270 STAMCOUNTER StatRZInvalidatePageSkipped; /**< RC/R0: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
3271 STAMCOUNTER StatRZPageOutOfSyncUser; /**< RC/R0: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
3272 STAMCOUNTER StatRZPageOutOfSyncSupervisor; /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
3273 STAMCOUNTER StatRZPageOutOfSyncUserWrite; /**< RC/R0: The number of times user page is out of sync was detected in \#PF. */
3274 STAMCOUNTER StatRZPageOutOfSyncSupervisorWrite; /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF. */
3275 STAMCOUNTER StatRZPageOutOfSyncBallloon; /**< RC/R0: The number of times a ballooned page was accessed (read). */
3276 STAMPROFILE StatRZPrefetch; /**< RC/R0: PGMPrefetchPage. */
3277 STAMPROFILE StatRZFlushTLB; /**< RC/R0: Profiling of the PGMFlushTLB() body. */
3278 STAMCOUNTER StatRZFlushTLBNewCR3; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
3279 STAMCOUNTER StatRZFlushTLBNewCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
3280 STAMCOUNTER StatRZFlushTLBSameCR3; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
3281 STAMCOUNTER StatRZFlushTLBSameCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
3282 STAMPROFILE StatRZGstModifyPage; /**< RC/R0: Profiling of the PGMGstModifyPage() body */
3283
3284 STAMPROFILE StatR3SyncCR3; /**< R3: PGMSyncCR3() profiling. */
3285 STAMPROFILE StatR3SyncCR3Handlers; /**< R3: Profiling of the PGMSyncCR3() update handler section. */
3286 STAMCOUNTER StatR3SyncCR3Global; /**< R3: The number of global CR3 syncs. */
3287 STAMCOUNTER StatR3SyncCR3NotGlobal; /**< R3: The number of non-global CR3 syncs. */
3288 STAMCOUNTER StatR3SyncCR3DstFreed; /**< R3: The number of times we've had to free a shadow entry. */
3289 STAMCOUNTER StatR3SyncCR3DstFreedSrcNP; /**< R3: The number of times we've had to free a shadow entry for which the source entry was not present. */
3290 STAMCOUNTER StatR3SyncCR3DstNotPresent; /**< R3: The number of times we've encountered a not present shadow entry for a present guest entry. */
3291 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPD; /**< R3: The number of times a global page directory wasn't flushed. */
3292 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPT; /**< R3: The number of times a page table with only global entries wasn't flushed. */
3293 STAMCOUNTER StatR3SyncCR3DstCacheHit; /**< R3: The number of times we got some kind of cache hit on a page table. */
3294 STAMPROFILE StatR3SyncPT; /**< R3: PGMSyncPT() profiling. */
3295 STAMCOUNTER StatR3SyncPTFailed; /**< R3: The number of times PGMSyncPT() failed. */
3296 STAMCOUNTER StatR3SyncPT4K; /**< R3: Number of 4KB syncs. */
3297 STAMCOUNTER StatR3SyncPT4M; /**< R3: Number of 4MB syncs. */
3298 STAMCOUNTER StatR3SyncPagePDNAs; /**< R3: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
3299 STAMCOUNTER StatR3SyncPagePDOutOfSync; /**< R3: The number of time we've encountered an out-of-sync PD in SyncPage. */
3300 STAMCOUNTER StatR3AccessedPage; /**< R3: The number of pages marked not present for accessed bit emulation. */
3301 STAMPROFILE StatR3DirtyBitTracking; /**< R3: Profiling the dirty bit tracking in CheckPageFault(). */
3302 STAMCOUNTER StatR3DirtyPage; /**< R3: The number of pages marked read-only for dirty bit tracking. */
3303 STAMCOUNTER StatR3DirtyPageBig; /**< R3: The number of pages marked read-only for dirty bit tracking. */
3304 STAMCOUNTER StatR3DirtyPageSkipped; /**< R3: The number of pages already dirty or readonly. */
3305 STAMCOUNTER StatR3DirtyPageTrap; /**< R3: The number of traps generated for dirty bit tracking. */
3306 STAMCOUNTER StatR3DirtyTrackRealPF; /**< R3: The number of real pages faults during dirty bit tracking. */
3307 STAMCOUNTER StatR3DirtiedPage; /**< R3: The number of pages marked dirty because of write accesses. */
3308 STAMCOUNTER StatR3PageAlreadyDirty; /**< R3: The number of pages already marked dirty because of write accesses. */
3309 STAMPROFILE StatR3InvalidatePage; /**< R3: PGMInvalidatePage() profiling. */
3310 STAMCOUNTER StatR3InvalidatePage4KBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4KB page. */
3311 STAMCOUNTER StatR3InvalidatePage4MBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4MB page. */
3312 STAMCOUNTER StatR3InvalidatePage4MBPagesSkip; /**< R3: The number of times PGMInvalidatePage() skipped a 4MB page. */
3313 STAMCOUNTER StatR3InvalidatePagePDNAs; /**< R3: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
3314 STAMCOUNTER StatR3InvalidatePagePDNPs; /**< R3: The number of times PGMInvalidatePage() was called for a not present page directory. */
3315 STAMCOUNTER StatR3InvalidatePagePDMappings; /**< R3: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
3316 STAMCOUNTER StatR3InvalidatePagePDOutOfSync; /**< R3: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
3317 STAMCOUNTER StatR3InvalidatePageSkipped; /**< R3: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
3318 STAMCOUNTER StatR3PageOutOfSyncUser; /**< R3: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
3319 STAMCOUNTER StatR3PageOutOfSyncSupervisor; /**< R3: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
3320 STAMCOUNTER StatR3PageOutOfSyncUserWrite; /**< R3: The number of times user page is out of sync was detected in \#PF. */
3321 STAMCOUNTER StatR3PageOutOfSyncSupervisorWrite; /**< R3: The number of times supervisor page is out of sync was detected in in \#PF. */
3322 STAMCOUNTER StatR3PageOutOfSyncBallloon; /**< R3: The number of times a ballooned page was accessed (read). */
3323 STAMPROFILE StatR3Prefetch; /**< R3: PGMPrefetchPage. */
3324 STAMPROFILE StatR3FlushTLB; /**< R3: Profiling of the PGMFlushTLB() body. */
3325 STAMCOUNTER StatR3FlushTLBNewCR3; /**< R3: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
3326 STAMCOUNTER StatR3FlushTLBNewCR3Global; /**< R3: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
3327 STAMCOUNTER StatR3FlushTLBSameCR3; /**< R3: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
3328 STAMCOUNTER StatR3FlushTLBSameCR3Global; /**< R3: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
3329 STAMPROFILE StatR3GstModifyPage; /**< R3: Profiling of the PGMGstModifyPage() body */
3330 /** @} */
3331#endif /* VBOX_WITH_STATISTICS */
3332} PGMCPU;
3333/** Pointer to the per-cpu PGM data. */
3334typedef PGMCPU *PPGMCPU;
3335
3336
3337/** @name PGM::fSyncFlags Flags
3338 * @{
3339 */
3340/** Updates the virtual access handler state bit in PGMPAGE. */
3341#define PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL RT_BIT(0)
3342/** Always sync CR3. */
3343#define PGM_SYNC_ALWAYS RT_BIT(1)
3344/** Check monitoring on next CR3 (re)load and invalidate page.
3345 * @todo This is obsolete now. Remove after 2.2.0 is branched off. */
3346#define PGM_SYNC_MONITOR_CR3 RT_BIT(2)
3347/** Check guest mapping in SyncCR3. */
3348#define PGM_SYNC_MAP_CR3 RT_BIT(3)
3349/** Clear the page pool (a light weight flush). */
3350#define PGM_SYNC_CLEAR_PGM_POOL_BIT 8
3351#define PGM_SYNC_CLEAR_PGM_POOL RT_BIT(PGM_SYNC_CLEAR_PGM_POOL_BIT)
3352/** @} */
3353
3354
3355RT_C_DECLS_BEGIN
3356
3357int pgmLock(PVM pVM);
3358void pgmUnlock(PVM pVM);
3359
3360int pgmR3MappingsFixInternal(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb);
3361int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping);
3362int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping);
3363PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
3364int pgmMapResolveConflicts(PVM pVM);
3365DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
3366
3367void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
3368bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys);
3369void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage);
3370int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
3371DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
3372#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
3373void pgmHandlerVirtualDumpPhysPages(PVM pVM);
3374#else
3375# define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
3376#endif
3377DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
3378int pgmR3InitSavedState(PVM pVM, uint64_t cbRam);
3379
3380int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3381int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys);
3382int pgmPhysIsValidLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage);
3383int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
3384int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3385void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage);
3386int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3387int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
3388int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
3389int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv);
3390int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv);
3391int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
3392int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv);
3393VMMDECL(int) pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
3394VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
3395#ifdef IN_RING3
3396void pgmR3PhysRelinkRamRanges(PVM pVM);
3397int pgmR3PhysRamPreAllocate(PVM pVM);
3398int pgmR3PhysRamReset(PVM pVM);
3399int pgmR3PhysRomReset(PVM pVM);
3400int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
3401
3402int pgmR3PoolInit(PVM pVM);
3403void pgmR3PoolRelocate(PVM pVM);
3404void pgmR3PoolResetUnpluggedCpu(PVM pVM, PVMCPU pVCpu);
3405void pgmR3PoolReset(PVM pVM);
3406void pgmR3PoolClearAll(PVM pVM);
3407DECLCALLBACK(VBOXSTRICTRC) pgmR3PoolClearAllRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser);
3408
3409#endif /* IN_RING3 */
3410#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3411int pgmR0DynMapHCPageCommon(PVM pVM, PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv);
3412#endif
3413int pgmPoolAllocEx(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false);
3414
3415DECLINLINE(int) pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false)
3416{
3417 return pgmPoolAllocEx(pVM, GCPhys, enmKind, PGMPOOLACCESS_DONTCARE, iUser, iUserTable, ppPage, fLockPage);
3418}
3419
3420void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable);
3421void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
3422int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fFlush = true /* DO NOT USE false UNLESS YOU KNOWN WHAT YOU'RE DOING!! */);
3423void pgmPoolFlushPageByGCPhys(PVM pVM, RTGCPHYS GCPhys);
3424PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys);
3425int pgmPoolSyncCR3(PVMCPU pVCpu);
3426bool pgmPoolIsDirtyPage(PVM pVM, RTGCPHYS GCPhys);
3427int pgmPoolTrackUpdateGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs);
3428void pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint, uint16_t iPte);
3429void pgmPoolInvalidateDirtyPage(PVM pVM, RTGCPHYS GCPhysPT);
3430DECLINLINE(int) pgmPoolTrackFlushGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool *pfFlushTLBs)
3431{
3432 return pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPhysPage, true /* flush PTEs */, pfFlushTLBs);
3433}
3434
3435uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, PPGMPAGE pPhysPage, uint16_t u16, uint16_t iShwPT, uint16_t iPte);
3436void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte);
3437void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, unsigned cbWrite);
3438int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3439void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3440
3441void pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3442void pgmPoolResetDirtyPages(PVM pVM);
3443
3444int pgmR3ExitShadowModeBeforePoolFlush(PVM pVM, PVMCPU pVCpu);
3445int pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu);
3446
3447void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
3448void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3);
3449int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
3450int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
3451
3452int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
3453#ifndef IN_RC
3454int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
3455#endif
3456int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
3457
3458PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM);
3459PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM);
3460PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt);
3461PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM);
3462
3463RT_C_DECLS_END
3464
3465/** @} */
3466
3467#endif
3468
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette