VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInternal.h@ 30838

Last change on this file since 30838 was 30836, checked in by vboxsync, 15 years ago

Removed aging tree code. Wasn't working at all. Do simple, but not so efficient complete enumeration.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 158.3 KB
Line 
1/* $Id: PGMInternal.h 30836 2010-07-14 13:50:19Z vboxsync $ */
2/** @file
3 * PGM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___PGMInternal_h
19#define ___PGMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/err.h>
24#include <VBox/dbg.h>
25#include <VBox/stam.h>
26#include <VBox/param.h>
27#include <VBox/vmm.h>
28#include <VBox/mm.h>
29#include <VBox/pdmcritsect.h>
30#include <VBox/pdmapi.h>
31#include <VBox/dis.h>
32#include <VBox/dbgf.h>
33#include <VBox/log.h>
34#include <VBox/gmm.h>
35#include <VBox/hwaccm.h>
36#include <VBox/hwacc_vmx.h>
37#include <include/internal/pgm.h>
38#include <iprt/asm.h>
39#include <iprt/assert.h>
40#include <iprt/avl.h>
41#include <iprt/critsect.h>
42#include <iprt/sha.h>
43
44
45
46/** @defgroup grp_pgm_int Internals
47 * @ingroup grp_pgm
48 * @internal
49 * @{
50 */
51
52
53/** @name PGM Compile Time Config
54 * @{
55 */
56
57/**
58 * Indicates that there are no guest mappings to care about.
59 * Currently on raw-mode related code uses mappings, i.e. RC and R3 code.
60 */
61#if defined(IN_RING0) || !defined(VBOX_WITH_RAW_MODE)
62# define PGM_WITHOUT_MAPPINGS
63#endif
64
65/**
66 * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).
67 * Comment it if it will break something.
68 */
69#define PGM_OUT_OF_SYNC_IN_GC
70
71/**
72 * Check and skip global PDEs for non-global flushes
73 */
74#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
75
76/**
77 * Optimization for PAE page tables that are modified often
78 */
79//#if 0 /* disabled again while debugging */
80#ifndef IN_RC
81# define PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
82#endif
83//#endif
84
85/**
86 * Large page support enabled only on 64 bits hosts; applies to nested paging only.
87 */
88#if (HC_ARCH_BITS == 64) && !defined(IN_RC)
89# define PGM_WITH_LARGE_PAGES
90#endif
91
92/**
93 * Sync N pages instead of a whole page table
94 */
95#define PGM_SYNC_N_PAGES
96
97/**
98 * Number of pages to sync during a page fault
99 *
100 * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
101 * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
102 *
103 * Note that \#PFs are much more expensive in the VT-x/AMD-V case due to
104 * world switch overhead, so let's sync more.
105 */
106# ifdef IN_RING0
107/* Chose 32 based on the compile test in #4219; 64 shows worse stats.
108 * 32 again shows better results than 16; slightly more overhead in the \#PF handler,
109 * but ~5% fewer faults.
110 */
111# define PGM_SYNC_NR_PAGES 32
112#else
113# define PGM_SYNC_NR_PAGES 8
114#endif
115
116/**
117 * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
118 */
119#define PGM_MAX_PHYSCACHE_ENTRIES 64
120#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
121
122
123/** @def PGMPOOL_CFG_MAX_GROW
124 * The maximum number of pages to add to the pool in one go.
125 */
126#define PGMPOOL_CFG_MAX_GROW (_256K >> PAGE_SHIFT)
127
128/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
129 * Enables some extra assertions for virtual handlers (mainly phys2virt related).
130 */
131#ifdef VBOX_STRICT
132# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
133#endif
134
135/** @def VBOX_WITH_NEW_LAZY_PAGE_ALLOC
136 * Enables the experimental lazy page allocation code. */
137/*#define VBOX_WITH_NEW_LAZY_PAGE_ALLOC */
138
139/** @def VBOX_WITH_REAL_WRITE_MONITORED_PAGES
140 * Enables real write monitoring of pages, i.e. mapping them read-only and
141 * only making them writable when getting a write access #PF. */
142#define VBOX_WITH_REAL_WRITE_MONITORED_PAGES
143
144/** @} */
145
146
147/** @name PDPT and PML4 flags.
148 * These are placed in the three bits available for system programs in
149 * the PDPT and PML4 entries.
150 * @{ */
151/** The entry is a permanent one and it's must always be present.
152 * Never free such an entry. */
153#define PGM_PLXFLAGS_PERMANENT RT_BIT_64(10)
154/** Mapping (hypervisor allocated pagetable). */
155#define PGM_PLXFLAGS_MAPPING RT_BIT_64(11)
156/** @} */
157
158/** @name Page directory flags.
159 * These are placed in the three bits available for system programs in
160 * the page directory entries.
161 * @{ */
162/** Mapping (hypervisor allocated pagetable). */
163#define PGM_PDFLAGS_MAPPING RT_BIT_64(10)
164/** Made read-only to facilitate dirty bit tracking. */
165#define PGM_PDFLAGS_TRACK_DIRTY RT_BIT_64(11)
166/** @} */
167
168/** @name Page flags.
169 * These are placed in the three bits available for system programs in
170 * the page entries.
171 * @{ */
172/** Made read-only to facilitate dirty bit tracking. */
173#define PGM_PTFLAGS_TRACK_DIRTY RT_BIT_64(9)
174
175#ifndef PGM_PTFLAGS_CSAM_VALIDATED
176/** Scanned and approved by CSAM (tm).
177 * NOTE: Must be identical to the one defined in CSAMInternal.h!!
178 * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/pgm.h. */
179#define PGM_PTFLAGS_CSAM_VALIDATED RT_BIT_64(11)
180#endif
181
182/** @} */
183
184/** @name Defines used to indicate the shadow and guest paging in the templates.
185 * @{ */
186#define PGM_TYPE_REAL 1
187#define PGM_TYPE_PROT 2
188#define PGM_TYPE_32BIT 3
189#define PGM_TYPE_PAE 4
190#define PGM_TYPE_AMD64 5
191#define PGM_TYPE_NESTED 6
192#define PGM_TYPE_EPT 7
193#define PGM_TYPE_MAX PGM_TYPE_EPT
194/** @} */
195
196/** Macro for checking if the guest is using paging.
197 * @param uGstType PGM_TYPE_*
198 * @param uShwType PGM_TYPE_*
199 * @remark ASSUMES certain order of the PGM_TYPE_* values.
200 */
201#define PGM_WITH_PAGING(uGstType, uShwType) \
202 ( (uGstType) >= PGM_TYPE_32BIT \
203 && (uShwType) != PGM_TYPE_NESTED \
204 && (uShwType) != PGM_TYPE_EPT)
205
206/** Macro for checking if the guest supports the NX bit.
207 * @param uGstType PGM_TYPE_*
208 * @param uShwType PGM_TYPE_*
209 * @remark ASSUMES certain order of the PGM_TYPE_* values.
210 */
211#define PGM_WITH_NX(uGstType, uShwType) \
212 ( (uGstType) >= PGM_TYPE_PAE \
213 && (uShwType) != PGM_TYPE_NESTED \
214 && (uShwType) != PGM_TYPE_EPT)
215
216
217/** @def PGM_HCPHYS_2_PTR
218 * Maps a HC physical page pool address to a virtual address.
219 *
220 * @returns VBox status code.
221 * @param pVM The VM handle.
222 * @param HCPhys The HC physical address to map to a virtual one.
223 * @param ppv Where to store the virtual address. No need to cast this.
224 *
225 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
226 * small page window employeed by that function. Be careful.
227 * @remark There is no need to assert on the result.
228 */
229#ifdef IN_RC
230# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
231 PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
232#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
233# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
234 pgmR0DynMapHCPageInlined(&(pVM)->pgm.s, HCPhys, (void **)(ppv))
235#else
236# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
237 MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
238#endif
239
240/** @def PGM_GCPHYS_2_PTR
241 * Maps a GC physical page address to a virtual address.
242 *
243 * @returns VBox status code.
244 * @param pVM The VM handle.
245 * @param GCPhys The GC physical address to map to a virtual one.
246 * @param ppv Where to store the virtual address. No need to cast this.
247 *
248 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
249 * small page window employeed by that function. Be careful.
250 * @remark There is no need to assert on the result.
251 */
252#ifdef IN_RC
253# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
254 PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
255#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
256# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
257 pgmR0DynMapGCPageInlined(&(pVM)->pgm.s, GCPhys, (void **)(ppv))
258#else
259# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
260 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
261#endif
262
263/** @def PGM_GCPHYS_2_PTR_BY_PGMCPU
264 * Maps a GC physical page address to a virtual address.
265 *
266 * @returns VBox status code.
267 * @param pPGM Pointer to the PGM instance data.
268 * @param GCPhys The GC physical address to map to a virtual one.
269 * @param ppv Where to store the virtual address. No need to cast this.
270 *
271 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
272 * small page window employeed by that function. Be careful.
273 * @remark There is no need to assert on the result.
274 */
275#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
276# define PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, GCPhys, ppv) \
277 pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), GCPhys, (void **)(ppv))
278#else
279# define PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, GCPhys, ppv) \
280 PGM_GCPHYS_2_PTR(PGMCPU2VM(pPGM), GCPhys, ppv)
281#endif
282
283/** @def PGM_GCPHYS_2_PTR_EX
284 * Maps a unaligned GC physical page address to a virtual address.
285 *
286 * @returns VBox status code.
287 * @param pVM The VM handle.
288 * @param GCPhys The GC physical address to map to a virtual one.
289 * @param ppv Where to store the virtual address. No need to cast this.
290 *
291 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
292 * small page window employeed by that function. Be careful.
293 * @remark There is no need to assert on the result.
294 */
295#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
296# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
297 PGMDynMapGCPageOff(pVM, GCPhys, (void **)(ppv))
298#else
299# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
300 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
301#endif
302
303/** @def PGM_INVL_PG
304 * Invalidates a page.
305 *
306 * @param pVCpu The VMCPU handle.
307 * @param GCVirt The virtual address of the page to invalidate.
308 */
309#ifdef IN_RC
310# define PGM_INVL_PG(pVCpu, GCVirt) ASMInvalidatePage((void *)(uintptr_t)(GCVirt))
311#elif defined(IN_RING0)
312# define PGM_INVL_PG(pVCpu, GCVirt) HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
313#else
314# define PGM_INVL_PG(pVCpu, GCVirt) HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
315#endif
316
317/** @def PGM_INVL_PG_ALL_VCPU
318 * Invalidates a page on all VCPUs
319 *
320 * @param pVM The VM handle.
321 * @param GCVirt The virtual address of the page to invalidate.
322 */
323#ifdef IN_RC
324# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) ASMInvalidatePage((void *)(uintptr_t)(GCVirt))
325#elif defined(IN_RING0)
326# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
327#else
328# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
329#endif
330
331/** @def PGM_INVL_BIG_PG
332 * Invalidates a 4MB page directory entry.
333 *
334 * @param pVCpu The VMCPU handle.
335 * @param GCVirt The virtual address within the page directory to invalidate.
336 */
337#ifdef IN_RC
338# define PGM_INVL_BIG_PG(pVCpu, GCVirt) ASMReloadCR3()
339#elif defined(IN_RING0)
340# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HWACCMFlushTLB(pVCpu)
341#else
342# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HWACCMFlushTLB(pVCpu)
343#endif
344
345/** @def PGM_INVL_VCPU_TLBS()
346 * Invalidates the TLBs of the specified VCPU
347 *
348 * @param pVCpu The VMCPU handle.
349 */
350#ifdef IN_RC
351# define PGM_INVL_VCPU_TLBS(pVCpu) ASMReloadCR3()
352#elif defined(IN_RING0)
353# define PGM_INVL_VCPU_TLBS(pVCpu) HWACCMFlushTLB(pVCpu)
354#else
355# define PGM_INVL_VCPU_TLBS(pVCpu) HWACCMFlushTLB(pVCpu)
356#endif
357
358/** @def PGM_INVL_ALL_VCPU_TLBS()
359 * Invalidates the TLBs of all VCPUs
360 *
361 * @param pVM The VM handle.
362 */
363#ifdef IN_RC
364# define PGM_INVL_ALL_VCPU_TLBS(pVM) ASMReloadCR3()
365#elif defined(IN_RING0)
366# define PGM_INVL_ALL_VCPU_TLBS(pVM) HWACCMFlushTLBOnAllVCpus(pVM)
367#else
368# define PGM_INVL_ALL_VCPU_TLBS(pVM) HWACCMFlushTLBOnAllVCpus(pVM)
369#endif
370
371/** Size of the GCPtrConflict array in PGMMAPPING.
372 * @remarks Must be a power of two. */
373#define PGMMAPPING_CONFLICT_MAX 8
374
375/**
376 * Structure for tracking GC Mappings.
377 *
378 * This structure is used by linked list in both GC and HC.
379 */
380typedef struct PGMMAPPING
381{
382 /** Pointer to next entry. */
383 R3PTRTYPE(struct PGMMAPPING *) pNextR3;
384 /** Pointer to next entry. */
385 R0PTRTYPE(struct PGMMAPPING *) pNextR0;
386 /** Pointer to next entry. */
387 RCPTRTYPE(struct PGMMAPPING *) pNextRC;
388 /** Indicate whether this entry is finalized. */
389 bool fFinalized;
390 /** Start Virtual address. */
391 RTGCPTR GCPtr;
392 /** Last Virtual address (inclusive). */
393 RTGCPTR GCPtrLast;
394 /** Range size (bytes). */
395 RTGCPTR cb;
396 /** Pointer to relocation callback function. */
397 R3PTRTYPE(PFNPGMRELOCATE) pfnRelocate;
398 /** User argument to the callback. */
399 R3PTRTYPE(void *) pvUser;
400 /** Mapping description / name. For easing debugging. */
401 R3PTRTYPE(const char *) pszDesc;
402 /** Last 8 addresses that caused conflicts. */
403 RTGCPTR aGCPtrConflicts[PGMMAPPING_CONFLICT_MAX];
404 /** Number of conflicts for this hypervisor mapping. */
405 uint32_t cConflicts;
406 /** Number of page tables. */
407 uint32_t cPTs;
408
409 /** Array of page table mapping data. Each entry
410 * describes one page table. The array can be longer
411 * than the declared length.
412 */
413 struct
414 {
415 /** The HC physical address of the page table. */
416 RTHCPHYS HCPhysPT;
417 /** The HC physical address of the first PAE page table. */
418 RTHCPHYS HCPhysPaePT0;
419 /** The HC physical address of the second PAE page table. */
420 RTHCPHYS HCPhysPaePT1;
421 /** The HC virtual address of the 32-bit page table. */
422 R3PTRTYPE(PX86PT) pPTR3;
423 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */
424 R3PTRTYPE(PX86PTPAE) paPaePTsR3;
425 /** The RC virtual address of the 32-bit page table. */
426 RCPTRTYPE(PX86PT) pPTRC;
427 /** The RC virtual address of the two PAE page table. */
428 RCPTRTYPE(PX86PTPAE) paPaePTsRC;
429 /** The R0 virtual address of the 32-bit page table. */
430 R0PTRTYPE(PX86PT) pPTR0;
431 /** The R0 virtual address of the two PAE page table. */
432 R0PTRTYPE(PX86PTPAE) paPaePTsR0;
433 } aPTs[1];
434} PGMMAPPING;
435/** Pointer to structure for tracking GC Mappings. */
436typedef struct PGMMAPPING *PPGMMAPPING;
437
438
439/**
440 * Physical page access handler structure.
441 *
442 * This is used to keep track of physical address ranges
443 * which are being monitored in some kind of way.
444 */
445typedef struct PGMPHYSHANDLER
446{
447 AVLROGCPHYSNODECORE Core;
448 /** Access type. */
449 PGMPHYSHANDLERTYPE enmType;
450 /** Number of pages to update. */
451 uint32_t cPages;
452 /** Pointer to R3 callback function. */
453 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3;
454 /** User argument for R3 handlers. */
455 R3PTRTYPE(void *) pvUserR3;
456 /** Pointer to R0 callback function. */
457 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0;
458 /** User argument for R0 handlers. */
459 R0PTRTYPE(void *) pvUserR0;
460 /** Pointer to RC callback function. */
461 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC;
462 /** User argument for RC handlers. */
463 RCPTRTYPE(void *) pvUserRC;
464 /** Description / Name. For easing debugging. */
465 R3PTRTYPE(const char *) pszDesc;
466#ifdef VBOX_WITH_STATISTICS
467 /** Profiling of this handler. */
468 STAMPROFILE Stat;
469#endif
470} PGMPHYSHANDLER;
471/** Pointer to a physical page access handler structure. */
472typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
473
474
475/**
476 * Cache node for the physical addresses covered by a virtual handler.
477 */
478typedef struct PGMPHYS2VIRTHANDLER
479{
480 /** Core node for the tree based on physical ranges. */
481 AVLROGCPHYSNODECORE Core;
482 /** Offset from this struct to the PGMVIRTHANDLER structure. */
483 int32_t offVirtHandler;
484 /** Offset of the next alias relative to this one.
485 * Bit 0 is used for indicating whether we're in the tree.
486 * Bit 1 is used for indicating that we're the head node.
487 */
488 int32_t offNextAlias;
489} PGMPHYS2VIRTHANDLER;
490/** Pointer to a phys to virtual handler structure. */
491typedef PGMPHYS2VIRTHANDLER *PPGMPHYS2VIRTHANDLER;
492
493/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
494 * node is in the tree. */
495#define PGMPHYS2VIRTHANDLER_IN_TREE RT_BIT(0)
496/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
497 * node is in the head of an alias chain.
498 * The PGMPHYS2VIRTHANDLER_IN_TREE is always set if this bit is set. */
499#define PGMPHYS2VIRTHANDLER_IS_HEAD RT_BIT(1)
500/** The mask to apply to PGMPHYS2VIRTHANDLER::offNextAlias to get the offset. */
501#define PGMPHYS2VIRTHANDLER_OFF_MASK (~(int32_t)3)
502
503
504/**
505 * Virtual page access handler structure.
506 *
507 * This is used to keep track of virtual address ranges
508 * which are being monitored in some kind of way.
509 */
510typedef struct PGMVIRTHANDLER
511{
512 /** Core node for the tree based on virtual ranges. */
513 AVLROGCPTRNODECORE Core;
514 /** Size of the range (in bytes). */
515 RTGCPTR cb;
516 /** Number of cache pages. */
517 uint32_t cPages;
518 /** Access type. */
519 PGMVIRTHANDLERTYPE enmType;
520 /** Pointer to the RC callback function. */
521 RCPTRTYPE(PFNPGMRCVIRTHANDLER) pfnHandlerRC;
522#if HC_ARCH_BITS == 64
523 RTRCPTR padding;
524#endif
525 /** Pointer to the R3 callback function for invalidation. */
526 R3PTRTYPE(PFNPGMR3VIRTINVALIDATE) pfnInvalidateR3;
527 /** Pointer to the R3 callback function. */
528 R3PTRTYPE(PFNPGMR3VIRTHANDLER) pfnHandlerR3;
529 /** Description / Name. For easing debugging. */
530 R3PTRTYPE(const char *) pszDesc;
531#ifdef VBOX_WITH_STATISTICS
532 /** Profiling of this handler. */
533 STAMPROFILE Stat;
534#endif
535 /** Array of cached physical addresses for the monitored ranged. */
536 PGMPHYS2VIRTHANDLER aPhysToVirt[HC_ARCH_BITS == 32 ? 1 : 2];
537} PGMVIRTHANDLER;
538/** Pointer to a virtual page access handler structure. */
539typedef PGMVIRTHANDLER *PPGMVIRTHANDLER;
540
541
542/**
543 * Page type.
544 *
545 * @remarks This enum has to fit in a 3-bit field (see PGMPAGE::u3Type).
546 * @remarks This is used in the saved state, so changes to it requires bumping
547 * the saved state version.
548 * @todo So, convert to \#defines!
549 */
550typedef enum PGMPAGETYPE
551{
552 /** The usual invalid zero entry. */
553 PGMPAGETYPE_INVALID = 0,
554 /** RAM page. (RWX) */
555 PGMPAGETYPE_RAM,
556 /** MMIO2 page. (RWX) */
557 PGMPAGETYPE_MMIO2,
558 /** MMIO2 page aliased over an MMIO page. (RWX)
559 * See PGMHandlerPhysicalPageAlias(). */
560 PGMPAGETYPE_MMIO2_ALIAS_MMIO,
561 /** Shadowed ROM. (RWX) */
562 PGMPAGETYPE_ROM_SHADOW,
563 /** ROM page. (R-X) */
564 PGMPAGETYPE_ROM,
565 /** MMIO page. (---) */
566 PGMPAGETYPE_MMIO,
567 /** End of valid entries. */
568 PGMPAGETYPE_END
569} PGMPAGETYPE;
570AssertCompile(PGMPAGETYPE_END <= 7);
571
572/** @name Page type predicates.
573 * @{ */
574#define PGMPAGETYPE_IS_READABLE(type) ( (type) <= PGMPAGETYPE_ROM )
575#define PGMPAGETYPE_IS_WRITEABLE(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
576#define PGMPAGETYPE_IS_RWX(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
577#define PGMPAGETYPE_IS_ROX(type) ( (type) == PGMPAGETYPE_ROM )
578#define PGMPAGETYPE_IS_NP(type) ( (type) == PGMPAGETYPE_MMIO )
579/** @} */
580
581
582/**
583 * A Physical Guest Page tracking structure.
584 *
585 * The format of this structure is complicated because we have to fit a lot
586 * of information into as few bits as possible. The format is also subject
587 * to change (there is one comming up soon). Which means that for we'll be
588 * using PGM_PAGE_GET_*, PGM_PAGE_IS_ and PGM_PAGE_SET_* macros for *all*
589 * accesses to the structure.
590 */
591typedef struct PGMPAGE
592{
593 /** The physical address and the Page ID. */
594 RTHCPHYS HCPhysAndPageID;
595 /** Combination of:
596 * - [0-7]: u2HandlerPhysStateY - the physical handler state
597 * (PGM_PAGE_HNDL_PHYS_STATE_*).
598 * - [8-9]: u2HandlerVirtStateY - the virtual handler state
599 * (PGM_PAGE_HNDL_VIRT_STATE_*).
600 * - [13-14]: u2PDEType - paging structure needed to map the page (PGM_PAGE_PDE_TYPE_*)
601 * - [15]: fWrittenToY - flag indicating that a write monitored page was
602 * written to when set.
603 * - [10-13]: 4 unused bits.
604 * @remarks Warning! All accesses to the bits are hardcoded.
605 *
606 * @todo Change this to a union with both bitfields, u8 and u accessors.
607 * That'll help deal with some of the hardcoded accesses.
608 *
609 * @todo Include uStateY and uTypeY as well so it becomes 32-bit. This
610 * will make it possible to turn some of the 16-bit accesses into
611 * 32-bit ones, which may be efficient (stalls).
612 */
613 RTUINT16U u16MiscY;
614 /** The page state.
615 * Only 3 bits are really needed for this. */
616 uint16_t uStateY : 3;
617 /** The page type (PGMPAGETYPE).
618 * Only 3 bits are really needed for this. */
619 uint16_t uTypeY : 3;
620 /** PTE index for usage tracking (page pool). */
621 uint16_t uPteIdx : 10;
622 /** Usage tracking (page pool). */
623 uint16_t u16TrackingY;
624 /** The number of read locks on this page. */
625 uint8_t cReadLocksY;
626 /** The number of write locks on this page. */
627 uint8_t cWriteLocksY;
628} PGMPAGE;
629AssertCompileSize(PGMPAGE, 16);
630/** Pointer to a physical guest page. */
631typedef PGMPAGE *PPGMPAGE;
632/** Pointer to a const physical guest page. */
633typedef const PGMPAGE *PCPGMPAGE;
634/** Pointer to a physical guest page pointer. */
635typedef PPGMPAGE *PPPGMPAGE;
636
637
638/**
639 * Clears the page structure.
640 * @param pPage Pointer to the physical guest page tracking structure.
641 */
642#define PGM_PAGE_CLEAR(pPage) \
643 do { \
644 (pPage)->HCPhysAndPageID = 0; \
645 (pPage)->uStateY = 0; \
646 (pPage)->uTypeY = 0; \
647 (pPage)->uPteIdx = 0; \
648 (pPage)->u16MiscY.u = 0; \
649 (pPage)->u16TrackingY = 0; \
650 (pPage)->cReadLocksY = 0; \
651 (pPage)->cWriteLocksY = 0; \
652 } while (0)
653
654/**
655 * Initializes the page structure.
656 * @param pPage Pointer to the physical guest page tracking structure.
657 */
658#define PGM_PAGE_INIT(pPage, _HCPhys, _idPage, _uType, _uState) \
659 do { \
660 RTHCPHYS SetHCPhysTmp = (_HCPhys); \
661 AssertFatal(!(SetHCPhysTmp & ~UINT64_C(0x0000fffffffff000))); \
662 (pPage)->HCPhysAndPageID = (SetHCPhysTmp << (28-12)) | ((_idPage) & UINT32_C(0x0fffffff)); \
663 (pPage)->uStateY = (_uState); \
664 (pPage)->uTypeY = (_uType); \
665 (pPage)->uPteIdx = 0; \
666 (pPage)->u16MiscY.u = 0; \
667 (pPage)->u16TrackingY = 0; \
668 (pPage)->cReadLocksY = 0; \
669 (pPage)->cWriteLocksY = 0; \
670 } while (0)
671
672/**
673 * Initializes the page structure of a ZERO page.
674 * @param pPage Pointer to the physical guest page tracking structure.
675 * @param pVM The VM handle (for getting the zero page address).
676 * @param uType The page type (PGMPAGETYPE).
677 */
678#define PGM_PAGE_INIT_ZERO(pPage, pVM, uType) \
679 PGM_PAGE_INIT((pPage), (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (uType), PGM_PAGE_STATE_ZERO)
680
681
682/** @name The Page state, PGMPAGE::uStateY.
683 * @{ */
684/** The zero page.
685 * This is a per-VM page that's never ever mapped writable. */
686#define PGM_PAGE_STATE_ZERO 0
687/** A allocated page.
688 * This is a per-VM page allocated from the page pool (or wherever
689 * we get MMIO2 pages from if the type is MMIO2).
690 */
691#define PGM_PAGE_STATE_ALLOCATED 1
692/** A allocated page that's being monitored for writes.
693 * The shadow page table mappings are read-only. When a write occurs, the
694 * fWrittenTo member is set, the page remapped as read-write and the state
695 * moved back to allocated. */
696#define PGM_PAGE_STATE_WRITE_MONITORED 2
697/** The page is shared, aka. copy-on-write.
698 * This is a page that's shared with other VMs. */
699#define PGM_PAGE_STATE_SHARED 3
700/** The page is ballooned, so no longer available for this VM. */
701#define PGM_PAGE_STATE_BALLOONED 4
702/** @} */
703
704
705/**
706 * Gets the page state.
707 * @returns page state (PGM_PAGE_STATE_*).
708 * @param pPage Pointer to the physical guest page tracking structure.
709 */
710#define PGM_PAGE_GET_STATE(pPage) ( (pPage)->uStateY )
711
712/**
713 * Sets the page state.
714 * @param pPage Pointer to the physical guest page tracking structure.
715 * @param _uState The new page state.
716 */
717#define PGM_PAGE_SET_STATE(pPage, _uState) do { (pPage)->uStateY = (_uState); } while (0)
718
719
720/**
721 * Gets the host physical address of the guest page.
722 * @returns host physical address (RTHCPHYS).
723 * @param pPage Pointer to the physical guest page tracking structure.
724 */
725#define PGM_PAGE_GET_HCPHYS(pPage) ( ((pPage)->HCPhysAndPageID >> 28) << 12 )
726
727/**
728 * Sets the host physical address of the guest page.
729 * @param pPage Pointer to the physical guest page tracking structure.
730 * @param _HCPhys The new host physical address.
731 */
732#define PGM_PAGE_SET_HCPHYS(pPage, _HCPhys) \
733 do { \
734 RTHCPHYS SetHCPhysTmp = (_HCPhys); \
735 AssertFatal(!(SetHCPhysTmp & ~UINT64_C(0x0000fffffffff000))); \
736 (pPage)->HCPhysAndPageID = ((pPage)->HCPhysAndPageID & UINT32_C(0x0fffffff)) \
737 | (SetHCPhysTmp << (28-12)); \
738 } while (0)
739
740/**
741 * Get the Page ID.
742 * @returns The Page ID; NIL_GMM_PAGEID if it's a ZERO page.
743 * @param pPage Pointer to the physical guest page tracking structure.
744 */
745#define PGM_PAGE_GET_PAGEID(pPage) ( (uint32_t)((pPage)->HCPhysAndPageID & UINT32_C(0x0fffffff)) )
746
747/**
748 * Sets the Page ID.
749 * @param pPage Pointer to the physical guest page tracking structure.
750 */
751#define PGM_PAGE_SET_PAGEID(pPage, _idPage) \
752 do { \
753 (pPage)->HCPhysAndPageID = (((pPage)->HCPhysAndPageID) & UINT64_C(0xfffffffff0000000)) \
754 | ((_idPage) & UINT32_C(0x0fffffff)); \
755 } while (0)
756
757/**
758 * Get the Chunk ID.
759 * @returns The Chunk ID; NIL_GMM_CHUNKID if it's a ZERO page.
760 * @param pPage Pointer to the physical guest page tracking structure.
761 */
762#define PGM_PAGE_GET_CHUNKID(pPage) ( PGM_PAGE_GET_PAGEID(pPage) >> GMM_CHUNKID_SHIFT )
763
764/**
765 * Get the index of the page within the allocation chunk.
766 * @returns The page index.
767 * @param pPage Pointer to the physical guest page tracking structure.
768 */
769#define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhysAndPageID & GMM_PAGEID_IDX_MASK) )
770
771/**
772 * Gets the page type.
773 * @returns The page type.
774 * @param pPage Pointer to the physical guest page tracking structure.
775 */
776#define PGM_PAGE_GET_TYPE(pPage) (pPage)->uTypeY
777
778/**
779 * Sets the page type.
780 * @param pPage Pointer to the physical guest page tracking structure.
781 * @param _enmType The new page type (PGMPAGETYPE).
782 */
783#define PGM_PAGE_SET_TYPE(pPage, _enmType) do { (pPage)->uTypeY = (_enmType); } while (0)
784
785/**
786 * Gets the page table index
787 * @returns The page table index.
788 * @param pPage Pointer to the physical guest page tracking structure.
789 */
790#define PGM_PAGE_GET_PTE_INDEX(pPage) (pPage)->uPteIdx
791
792/**
793 * Sets the page table index
794 * @param pPage Pointer to the physical guest page tracking structure.
795 * @param iPte New page table index.
796 */
797#define PGM_PAGE_SET_PTE_INDEX(pPage, _iPte) do { (pPage)->uPteIdx = (_iPte); } while (0)
798
799/**
800 * Checks if the page is marked for MMIO.
801 * @returns true/false.
802 * @param pPage Pointer to the physical guest page tracking structure.
803 */
804#define PGM_PAGE_IS_MMIO(pPage) ( (pPage)->uTypeY == PGMPAGETYPE_MMIO )
805
806/**
807 * Checks if the page is backed by the ZERO page.
808 * @returns true/false.
809 * @param pPage Pointer to the physical guest page tracking structure.
810 */
811#define PGM_PAGE_IS_ZERO(pPage) ( (pPage)->uStateY == PGM_PAGE_STATE_ZERO )
812
813/**
814 * Checks if the page is backed by a SHARED page.
815 * @returns true/false.
816 * @param pPage Pointer to the physical guest page tracking structure.
817 */
818#define PGM_PAGE_IS_SHARED(pPage) ( (pPage)->uStateY == PGM_PAGE_STATE_SHARED )
819
820/**
821 * Checks if the page is ballooned.
822 * @returns true/false.
823 * @param pPage Pointer to the physical guest page tracking structure.
824 */
825#define PGM_PAGE_IS_BALLOONED(pPage) ( (pPage)->uStateY == PGM_PAGE_STATE_BALLOONED )
826
827/**
828 * Marks the page as written to (for GMM change monitoring).
829 * @param pPage Pointer to the physical guest page tracking structure.
830 */
831#define PGM_PAGE_SET_WRITTEN_TO(pPage) do { (pPage)->u16MiscY.au8[1] |= UINT8_C(0x80); } while (0)
832
833/**
834 * Clears the written-to indicator.
835 * @param pPage Pointer to the physical guest page tracking structure.
836 */
837#define PGM_PAGE_CLEAR_WRITTEN_TO(pPage) do { (pPage)->u16MiscY.au8[1] &= UINT8_C(0x7f); } while (0)
838
839/**
840 * Checks if the page was marked as written-to.
841 * @returns true/false.
842 * @param pPage Pointer to the physical guest page tracking structure.
843 */
844#define PGM_PAGE_IS_WRITTEN_TO(pPage) ( !!((pPage)->u16MiscY.au8[1] & UINT8_C(0x80)) )
845
846/** @name PT usage values (PGMPAGE::u2PDEType).
847 *
848 * @{ */
849/** Either as a PT or PDE. */
850#define PGM_PAGE_PDE_TYPE_DONTCARE 0
851/** Must use a page table to map the range. */
852#define PGM_PAGE_PDE_TYPE_PT 1
853/** Can use a page directory entry to map the continous range. */
854#define PGM_PAGE_PDE_TYPE_PDE 2
855/** Can use a page directory entry to map the continous range - temporarily disabled (by page monitoring). */
856#define PGM_PAGE_PDE_TYPE_PDE_DISABLED 3
857/** @} */
858
859/**
860 * Set the PDE type of the page
861 * @param pPage Pointer to the physical guest page tracking structure.
862 * @param uType PGM_PAGE_PDE_TYPE_*
863 */
864#define PGM_PAGE_SET_PDE_TYPE(pPage, uType) \
865 do { \
866 (pPage)->u16MiscY.au8[1] = ((pPage)->u16MiscY.au8[1] & UINT8_C(0x9f)) \
867 | (((uType) & UINT8_C(0x03)) << 5); \
868 } while (0)
869
870/**
871 * Checks if the page was marked being part of a large page
872 * @returns true/false.
873 * @param pPage Pointer to the physical guest page tracking structure.
874 */
875#define PGM_PAGE_GET_PDE_TYPE(pPage) ( ((pPage)->u16MiscY.au8[1] & UINT8_C(0x60)) >> 5)
876
877/** Enabled optimized access handler tests.
878 * These optimizations makes ASSUMPTIONS about the state values and the u16MiscY
879 * layout. When enabled, the compiler should normally generate more compact
880 * code.
881 */
882#define PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS 1
883
884/** @name Physical Access Handler State values (PGMPAGE::u2HandlerPhysStateY).
885 *
886 * @remarks The values are assigned in order of priority, so we can calculate
887 * the correct state for a page with different handlers installed.
888 * @{ */
889/** No handler installed. */
890#define PGM_PAGE_HNDL_PHYS_STATE_NONE 0
891/** Monitoring is temporarily disabled. */
892#define PGM_PAGE_HNDL_PHYS_STATE_DISABLED 1
893/** Write access is monitored. */
894#define PGM_PAGE_HNDL_PHYS_STATE_WRITE 2
895/** All access is monitored. */
896#define PGM_PAGE_HNDL_PHYS_STATE_ALL 3
897/** @} */
898
899/**
900 * Gets the physical access handler state of a page.
901 * @returns PGM_PAGE_HNDL_PHYS_STATE_* value.
902 * @param pPage Pointer to the physical guest page tracking structure.
903 */
904#define PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) \
905 ( (pPage)->u16MiscY.au8[0] )
906
907/**
908 * Sets the physical access handler state of a page.
909 * @param pPage Pointer to the physical guest page tracking structure.
910 * @param _uState The new state value.
911 */
912#define PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, _uState) \
913 do { (pPage)->u16MiscY.au8[0] = (_uState); } while (0)
914
915/**
916 * Checks if the page has any physical access handlers, including temporariliy disabled ones.
917 * @returns true/false
918 * @param pPage Pointer to the physical guest page tracking structure.
919 */
920#define PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) \
921 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE )
922
923/**
924 * Checks if the page has any active physical access handlers.
925 * @returns true/false
926 * @param pPage Pointer to the physical guest page tracking structure.
927 */
928#define PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage) \
929 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE )
930
931
932/** @name Virtual Access Handler State values (PGMPAGE::u2HandlerVirtStateY).
933 *
934 * @remarks The values are assigned in order of priority, so we can calculate
935 * the correct state for a page with different handlers installed.
936 * @{ */
937/** No handler installed. */
938#define PGM_PAGE_HNDL_VIRT_STATE_NONE 0
939/* 1 is reserved so the lineup is identical with the physical ones. */
940/** Write access is monitored. */
941#define PGM_PAGE_HNDL_VIRT_STATE_WRITE 2
942/** All access is monitored. */
943#define PGM_PAGE_HNDL_VIRT_STATE_ALL 3
944/** @} */
945
946/**
947 * Gets the virtual access handler state of a page.
948 * @returns PGM_PAGE_HNDL_VIRT_STATE_* value.
949 * @param pPage Pointer to the physical guest page tracking structure.
950 */
951#define PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) ((uint8_t)( (pPage)->u16MiscY.au8[1] & UINT8_C(0x03) ))
952
953/**
954 * Sets the virtual access handler state of a page.
955 * @param pPage Pointer to the physical guest page tracking structure.
956 * @param _uState The new state value.
957 */
958#define PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, _uState) \
959 do { \
960 (pPage)->u16MiscY.au8[1] = ((pPage)->u16MiscY.au8[1] & UINT8_C(0xfc)) \
961 | ((_uState) & UINT8_C(0x03)); \
962 } while (0)
963
964/**
965 * Checks if the page has any virtual access handlers.
966 * @returns true/false
967 * @param pPage Pointer to the physical guest page tracking structure.
968 */
969#define PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) \
970 ( PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) != PGM_PAGE_HNDL_VIRT_STATE_NONE )
971
972/**
973 * Same as PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS - can't disable pages in
974 * virtual handlers.
975 * @returns true/false
976 * @param pPage Pointer to the physical guest page tracking structure.
977 */
978#define PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage) \
979 PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage)
980
981
982/**
983 * Checks if the page has any access handlers, including temporarily disabled ones.
984 * @returns true/false
985 * @param pPage Pointer to the physical guest page tracking structure.
986 */
987#ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
988# define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
989 ( ((pPage)->u16MiscY.u & UINT16_C(0x0303)) != 0 )
990#else
991# define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
992 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE \
993 || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) != PGM_PAGE_HNDL_VIRT_STATE_NONE )
994#endif
995
996/**
997 * Checks if the page has any active access handlers.
998 * @returns true/false
999 * @param pPage Pointer to the physical guest page tracking structure.
1000 */
1001#ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
1002# define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
1003 ( ((pPage)->u16MiscY.u & UINT16_C(0x0202)) != 0 )
1004#else
1005# define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
1006 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE \
1007 || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) >= PGM_PAGE_HNDL_VIRT_STATE_WRITE )
1008#endif
1009
1010/**
1011 * Checks if the page has any active access handlers catching all accesses.
1012 * @returns true/false
1013 * @param pPage Pointer to the physical guest page tracking structure.
1014 */
1015#ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
1016# define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
1017 ( ( ((pPage)->u16MiscY.au8[0] | (pPage)->u16MiscY.au8[1]) & UINT8_C(0x3) ) \
1018 == PGM_PAGE_HNDL_PHYS_STATE_ALL )
1019#else
1020# define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
1021 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL \
1022 || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL )
1023#endif
1024
1025
1026/** @def PGM_PAGE_GET_TRACKING
1027 * Gets the packed shadow page pool tracking data associated with a guest page.
1028 * @returns uint16_t containing the data.
1029 * @param pPage Pointer to the physical guest page tracking structure.
1030 */
1031#define PGM_PAGE_GET_TRACKING(pPage) ( (pPage)->u16TrackingY )
1032
1033/** @def PGM_PAGE_SET_TRACKING
1034 * Sets the packed shadow page pool tracking data associated with a guest page.
1035 * @param pPage Pointer to the physical guest page tracking structure.
1036 * @param u16TrackingData The tracking data to store.
1037 */
1038#define PGM_PAGE_SET_TRACKING(pPage, u16TrackingData) \
1039 do { (pPage)->u16TrackingY = (u16TrackingData); } while (0)
1040
1041/** @def PGM_PAGE_GET_TD_CREFS
1042 * Gets the @a cRefs tracking data member.
1043 * @returns cRefs.
1044 * @param pPage Pointer to the physical guest page tracking structure.
1045 */
1046#define PGM_PAGE_GET_TD_CREFS(pPage) \
1047 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK)
1048
1049/** @def PGM_PAGE_GET_TD_IDX
1050 * Gets the @a idx tracking data member.
1051 * @returns idx.
1052 * @param pPage Pointer to the physical guest page tracking structure.
1053 */
1054#define PGM_PAGE_GET_TD_IDX(pPage) \
1055 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK)
1056
1057
1058/** Max number of locks on a page. */
1059#define PGM_PAGE_MAX_LOCKS UINT8_C(254)
1060
1061/** Get the read lock count.
1062 * @returns count.
1063 * @param pPage Pointer to the physical guest page tracking structure.
1064 */
1065#define PGM_PAGE_GET_READ_LOCKS(pPage) ( (pPage)->cReadLocksY )
1066
1067/** Get the write lock count.
1068 * @returns count.
1069 * @param pPage Pointer to the physical guest page tracking structure.
1070 */
1071#define PGM_PAGE_GET_WRITE_LOCKS(pPage) ( (pPage)->cWriteLocksY )
1072
1073/** Decrement the read lock counter.
1074 * @param pPage Pointer to the physical guest page tracking structure.
1075 */
1076#define PGM_PAGE_DEC_READ_LOCKS(pPage) do { --(pPage)->cReadLocksY; } while (0)
1077
1078/** Decrement the write lock counter.
1079 * @param pPage Pointer to the physical guest page tracking structure.
1080 */
1081#define PGM_PAGE_DEC_WRITE_LOCKS(pPage) do { --(pPage)->cWriteLocksY; } while (0)
1082
1083/** Increment the read lock counter.
1084 * @param pPage Pointer to the physical guest page tracking structure.
1085 */
1086#define PGM_PAGE_INC_READ_LOCKS(pPage) do { ++(pPage)->cReadLocksY; } while (0)
1087
1088/** Increment the write lock counter.
1089 * @param pPage Pointer to the physical guest page tracking structure.
1090 */
1091#define PGM_PAGE_INC_WRITE_LOCKS(pPage) do { ++(pPage)->cWriteLocksY; } while (0)
1092
1093
1094#if 0
1095/** Enables sanity checking of write monitoring using CRC-32. */
1096# define PGMLIVESAVERAMPAGE_WITH_CRC32
1097#endif
1098
1099/**
1100 * Per page live save tracking data.
1101 */
1102typedef struct PGMLIVESAVERAMPAGE
1103{
1104 /** Number of times it has been dirtied. */
1105 uint32_t cDirtied : 24;
1106 /** Whether it is currently dirty. */
1107 uint32_t fDirty : 1;
1108 /** Ignore the page.
1109 * This is used for pages that has been MMIO, MMIO2 or ROM pages once. We will
1110 * deal with these after pausing the VM and DevPCI have said it bit about
1111 * remappings. */
1112 uint32_t fIgnore : 1;
1113 /** Was a ZERO page last time around. */
1114 uint32_t fZero : 1;
1115 /** Was a SHARED page last time around. */
1116 uint32_t fShared : 1;
1117 /** Whether the page is/was write monitored in a previous pass. */
1118 uint32_t fWriteMonitored : 1;
1119 /** Whether the page is/was write monitored earlier in this pass. */
1120 uint32_t fWriteMonitoredJustNow : 1;
1121 /** Bits reserved for future use. */
1122 uint32_t u2Reserved : 2;
1123#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1124 /** CRC-32 for the page. This is for internal consistency checks. */
1125 uint32_t u32Crc;
1126#endif
1127} PGMLIVESAVERAMPAGE;
1128#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1129AssertCompileSize(PGMLIVESAVERAMPAGE, 8);
1130#else
1131AssertCompileSize(PGMLIVESAVERAMPAGE, 4);
1132#endif
1133/** Pointer to the per page live save tracking data. */
1134typedef PGMLIVESAVERAMPAGE *PPGMLIVESAVERAMPAGE;
1135
1136/** The max value of PGMLIVESAVERAMPAGE::cDirtied. */
1137#define PGMLIVSAVEPAGE_MAX_DIRTIED 0x00fffff0
1138
1139
1140/**
1141 * Ram range for GC Phys to HC Phys conversion.
1142 *
1143 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
1144 * conversions too, but we'll let MM handle that for now.
1145 *
1146 * This structure is used by linked lists in both GC and HC.
1147 */
1148typedef struct PGMRAMRANGE
1149{
1150 /** Start of the range. Page aligned. */
1151 RTGCPHYS GCPhys;
1152 /** Size of the range. (Page aligned of course). */
1153 RTGCPHYS cb;
1154 /** Pointer to the next RAM range - for R3. */
1155 R3PTRTYPE(struct PGMRAMRANGE *) pNextR3;
1156 /** Pointer to the next RAM range - for R0. */
1157 R0PTRTYPE(struct PGMRAMRANGE *) pNextR0;
1158 /** Pointer to the next RAM range - for RC. */
1159 RCPTRTYPE(struct PGMRAMRANGE *) pNextRC;
1160 /** PGM_RAM_RANGE_FLAGS_* flags. */
1161 uint32_t fFlags;
1162 /** Last address in the range (inclusive). Page aligned (-1). */
1163 RTGCPHYS GCPhysLast;
1164 /** Start of the HC mapping of the range. This is only used for MMIO2. */
1165 R3PTRTYPE(void *) pvR3;
1166 /** Live save per page tracking data. */
1167 R3PTRTYPE(PPGMLIVESAVERAMPAGE) paLSPages;
1168 /** The range description. */
1169 R3PTRTYPE(const char *) pszDesc;
1170 /** Pointer to self - R0 pointer. */
1171 R0PTRTYPE(struct PGMRAMRANGE *) pSelfR0;
1172 /** Pointer to self - RC pointer. */
1173 RCPTRTYPE(struct PGMRAMRANGE *) pSelfRC;
1174 /** Padding to make aPage aligned on sizeof(PGMPAGE). */
1175 uint32_t au32Alignment2[HC_ARCH_BITS == 32 ? 1 : 3];
1176 /** Array of physical guest page tracking structures. */
1177 PGMPAGE aPages[1];
1178} PGMRAMRANGE;
1179/** Pointer to Ram range for GC Phys to HC Phys conversion. */
1180typedef PGMRAMRANGE *PPGMRAMRANGE;
1181
1182/** @name PGMRAMRANGE::fFlags
1183 * @{ */
1184/** The RAM range is floating around as an independent guest mapping. */
1185#define PGM_RAM_RANGE_FLAGS_FLOATING RT_BIT(20)
1186/** Ad hoc RAM range for an ROM mapping. */
1187#define PGM_RAM_RANGE_FLAGS_AD_HOC_ROM RT_BIT(21)
1188/** Ad hoc RAM range for an MMIO mapping. */
1189#define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO RT_BIT(22)
1190/** Ad hoc RAM range for an MMIO2 mapping. */
1191#define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2 RT_BIT(23)
1192/** @} */
1193
1194/** Tests if a RAM range is an ad hoc one or not.
1195 * @returns true/false.
1196 * @param pRam The RAM range.
1197 */
1198#define PGM_RAM_RANGE_IS_AD_HOC(pRam) \
1199 (!!( (pRam)->fFlags & (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2) ) )
1200
1201
1202/**
1203 * Per page tracking structure for ROM image.
1204 *
1205 * A ROM image may have a shadow page, in which case we may have two pages
1206 * backing it. This structure contains the PGMPAGE for both while
1207 * PGMRAMRANGE have a copy of the active one. It is important that these
1208 * aren't out of sync in any regard other than page pool tracking data.
1209 */
1210typedef struct PGMROMPAGE
1211{
1212 /** The page structure for the virgin ROM page. */
1213 PGMPAGE Virgin;
1214 /** The page structure for the shadow RAM page. */
1215 PGMPAGE Shadow;
1216 /** The current protection setting. */
1217 PGMROMPROT enmProt;
1218 /** Live save status information. Makes use of unused alignment space. */
1219 struct
1220 {
1221 /** The previous protection value. */
1222 uint8_t u8Prot;
1223 /** Written to flag set by the handler. */
1224 bool fWrittenTo;
1225 /** Whether the shadow page is dirty or not. */
1226 bool fDirty;
1227 /** Whether it was dirtied in the recently. */
1228 bool fDirtiedRecently;
1229 } LiveSave;
1230} PGMROMPAGE;
1231AssertCompileSizeAlignment(PGMROMPAGE, 8);
1232/** Pointer to a ROM page tracking structure. */
1233typedef PGMROMPAGE *PPGMROMPAGE;
1234
1235
1236/**
1237 * A registered ROM image.
1238 *
1239 * This is needed to keep track of ROM image since they generally intrude
1240 * into a PGMRAMRANGE. It also keeps track of additional info like the
1241 * two page sets (read-only virgin and read-write shadow), the current
1242 * state of each page.
1243 *
1244 * Because access handlers cannot easily be executed in a different
1245 * context, the ROM ranges needs to be accessible and in all contexts.
1246 */
1247typedef struct PGMROMRANGE
1248{
1249 /** Pointer to the next range - R3. */
1250 R3PTRTYPE(struct PGMROMRANGE *) pNextR3;
1251 /** Pointer to the next range - R0. */
1252 R0PTRTYPE(struct PGMROMRANGE *) pNextR0;
1253 /** Pointer to the next range - RC. */
1254 RCPTRTYPE(struct PGMROMRANGE *) pNextRC;
1255 /** Pointer alignment */
1256 RTRCPTR RCPtrAlignment;
1257 /** Address of the range. */
1258 RTGCPHYS GCPhys;
1259 /** Address of the last byte in the range. */
1260 RTGCPHYS GCPhysLast;
1261 /** Size of the range. */
1262 RTGCPHYS cb;
1263 /** The flags (PGMPHYS_ROM_FLAGS_*). */
1264 uint32_t fFlags;
1265 /** The saved state range ID. */
1266 uint8_t idSavedState;
1267 /** Alignment padding. */
1268 uint8_t au8Alignment[3];
1269 /** Alignment padding ensuring that aPages is sizeof(PGMROMPAGE) aligned. */
1270 uint32_t au32Alignemnt[HC_ARCH_BITS == 32 ? 6 : 2];
1271 /** Pointer to the original bits when PGMPHYS_ROM_FLAGS_PERMANENT_BINARY was specified.
1272 * This is used for strictness checks. */
1273 R3PTRTYPE(const void *) pvOriginal;
1274 /** The ROM description. */
1275 R3PTRTYPE(const char *) pszDesc;
1276 /** The per page tracking structures. */
1277 PGMROMPAGE aPages[1];
1278} PGMROMRANGE;
1279/** Pointer to a ROM range. */
1280typedef PGMROMRANGE *PPGMROMRANGE;
1281
1282
1283/**
1284 * Live save per page data for an MMIO2 page.
1285 *
1286 * Not using PGMLIVESAVERAMPAGE here because we cannot use normal write monitoring
1287 * of MMIO2 pages. The current approach is using some optimisitic SHA-1 +
1288 * CRC-32 for detecting changes as well as special handling of zero pages. This
1289 * is a TEMPORARY measure which isn't perfect, but hopefully it is good enough
1290 * for speeding things up. (We're using SHA-1 and not SHA-256 or SHA-512
1291 * because of speed (2.5x and 6x slower).)
1292 *
1293 * @todo Implement dirty MMIO2 page reporting that can be enabled during live
1294 * save but normally is disabled. Since we can write monitore guest
1295 * accesses on our own, we only need this for host accesses. Shouldn't be
1296 * too difficult for DevVGA, VMMDev might be doable, the planned
1297 * networking fun will be fun since it involves ring-0.
1298 */
1299typedef struct PGMLIVESAVEMMIO2PAGE
1300{
1301 /** Set if the page is considered dirty. */
1302 bool fDirty;
1303 /** The number of scans this page has remained unchanged for.
1304 * Only updated for dirty pages. */
1305 uint8_t cUnchangedScans;
1306 /** Whether this page was zero at the last scan. */
1307 bool fZero;
1308 /** Alignment padding. */
1309 bool fReserved;
1310 /** CRC-32 for the first half of the page.
1311 * This is used together with u32CrcH2 to quickly detect changes in the page
1312 * during the non-final passes. */
1313 uint32_t u32CrcH1;
1314 /** CRC-32 for the second half of the page. */
1315 uint32_t u32CrcH2;
1316 /** SHA-1 for the saved page.
1317 * This is used in the final pass to skip pages without changes. */
1318 uint8_t abSha1Saved[RTSHA1_HASH_SIZE];
1319} PGMLIVESAVEMMIO2PAGE;
1320/** Pointer to a live save status data for an MMIO2 page. */
1321typedef PGMLIVESAVEMMIO2PAGE *PPGMLIVESAVEMMIO2PAGE;
1322
1323/**
1324 * A registered MMIO2 (= Device RAM) range.
1325 *
1326 * There are a few reason why we need to keep track of these
1327 * registrations. One of them is the deregistration & cleanup stuff,
1328 * while another is that the PGMRAMRANGE associated with such a region may
1329 * have to be removed from the ram range list.
1330 *
1331 * Overlapping with a RAM range has to be 100% or none at all. The pages
1332 * in the existing RAM range must not be ROM nor MMIO. A guru meditation
1333 * will be raised if a partial overlap or an overlap of ROM pages is
1334 * encountered. On an overlap we will free all the existing RAM pages and
1335 * put in the ram range pages instead.
1336 */
1337typedef struct PGMMMIO2RANGE
1338{
1339 /** The owner of the range. (a device) */
1340 PPDMDEVINSR3 pDevInsR3;
1341 /** Pointer to the ring-3 mapping of the allocation. */
1342 RTR3PTR pvR3;
1343 /** Pointer to the next range - R3. */
1344 R3PTRTYPE(struct PGMMMIO2RANGE *) pNextR3;
1345 /** Whether it's mapped or not. */
1346 bool fMapped;
1347 /** Whether it's overlapping or not. */
1348 bool fOverlapping;
1349 /** The PCI region number.
1350 * @remarks This ASSUMES that nobody will ever really need to have multiple
1351 * PCI devices with matching MMIO region numbers on a single device. */
1352 uint8_t iRegion;
1353 /** The saved state range ID. */
1354 uint8_t idSavedState;
1355 /** Alignment padding for putting the ram range on a PGMPAGE alignment boundrary. */
1356 uint8_t abAlignemnt[HC_ARCH_BITS == 32 ? 12 : 12];
1357 /** Live save per page tracking data. */
1358 R3PTRTYPE(PPGMLIVESAVEMMIO2PAGE) paLSPages;
1359 /** The associated RAM range. */
1360 PGMRAMRANGE RamRange;
1361} PGMMMIO2RANGE;
1362/** Pointer to a MMIO2 range. */
1363typedef PGMMMIO2RANGE *PPGMMMIO2RANGE;
1364
1365
1366
1367
1368/**
1369 * PGMPhysRead/Write cache entry
1370 */
1371typedef struct PGMPHYSCACHEENTRY
1372{
1373 /** R3 pointer to physical page. */
1374 R3PTRTYPE(uint8_t *) pbR3;
1375 /** GC Physical address for cache entry */
1376 RTGCPHYS GCPhys;
1377#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1378 RTGCPHYS u32Padding0; /**< alignment padding. */
1379#endif
1380} PGMPHYSCACHEENTRY;
1381
1382/**
1383 * PGMPhysRead/Write cache to reduce REM memory access overhead
1384 */
1385typedef struct PGMPHYSCACHE
1386{
1387 /** Bitmap of valid cache entries */
1388 uint64_t aEntries;
1389 /** Cache entries */
1390 PGMPHYSCACHEENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES];
1391} PGMPHYSCACHE;
1392
1393
1394/** Pointer to an allocation chunk ring-3 mapping. */
1395typedef struct PGMCHUNKR3MAP *PPGMCHUNKR3MAP;
1396/** Pointer to an allocation chunk ring-3 mapping pointer. */
1397typedef PPGMCHUNKR3MAP *PPPGMCHUNKR3MAP;
1398
1399/**
1400 * Ring-3 tracking structore for an allocation chunk ring-3 mapping.
1401 *
1402 * The primary tree (Core) uses the chunk id as key.
1403 */
1404typedef struct PGMCHUNKR3MAP
1405{
1406 /** The key is the chunk id. */
1407 AVLU32NODECORE Core;
1408 /** The current age thingy. */
1409 uint32_t iAge;
1410 /** The current reference count. */
1411 uint32_t volatile cRefs;
1412 /** The current permanent reference count. */
1413 uint32_t volatile cPermRefs;
1414 /** The mapping address. */
1415 void *pv;
1416} PGMCHUNKR3MAP;
1417
1418/**
1419 * Allocation chunk ring-3 mapping TLB entry.
1420 */
1421typedef struct PGMCHUNKR3MAPTLBE
1422{
1423 /** The chunk id. */
1424 uint32_t volatile idChunk;
1425#if HC_ARCH_BITS == 64
1426 uint32_t u32Padding; /**< alignment padding. */
1427#endif
1428 /** The chunk map. */
1429#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1430 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1431#else
1432 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1433#endif
1434} PGMCHUNKR3MAPTLBE;
1435/** Pointer to the an allocation chunk ring-3 mapping TLB entry. */
1436typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE;
1437
1438/** The number of TLB entries in PGMCHUNKR3MAPTLB.
1439 * @remark Must be a power of two value. */
1440#define PGM_CHUNKR3MAPTLB_ENTRIES 64
1441
1442/**
1443 * Allocation chunk ring-3 mapping TLB.
1444 *
1445 * @remarks We use a TLB to speed up lookups by avoiding walking the AVL.
1446 * At first glance this might look kinda odd since AVL trees are
1447 * supposed to give the most optimial lookup times of all trees
1448 * due to their balancing. However, take a tree with 1023 nodes
1449 * in it, that's 10 levels, meaning that most searches has to go
1450 * down 9 levels before they find what they want. This isn't fast
1451 * compared to a TLB hit. There is the factor of cache misses,
1452 * and of course the problem with trees and branch prediction.
1453 * This is why we use TLBs in front of most of the trees.
1454 *
1455 * @todo Generalize this TLB + AVL stuff, shouldn't be all that
1456 * difficult when we switch to the new inlined AVL trees (from kStuff).
1457 */
1458typedef struct PGMCHUNKR3MAPTLB
1459{
1460 /** The TLB entries. */
1461 PGMCHUNKR3MAPTLBE aEntries[PGM_CHUNKR3MAPTLB_ENTRIES];
1462} PGMCHUNKR3MAPTLB;
1463
1464/**
1465 * Calculates the index of a guest page in the Ring-3 Chunk TLB.
1466 * @returns Chunk TLB index.
1467 * @param idChunk The Chunk ID.
1468 */
1469#define PGM_CHUNKR3MAPTLB_IDX(idChunk) ( (idChunk) & (PGM_CHUNKR3MAPTLB_ENTRIES - 1) )
1470
1471
1472/**
1473 * Ring-3 guest page mapping TLB entry.
1474 * @remarks used in ring-0 as well at the moment.
1475 */
1476typedef struct PGMPAGER3MAPTLBE
1477{
1478 /** Address of the page. */
1479 RTGCPHYS volatile GCPhys;
1480 /** The guest page. */
1481#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1482 R3PTRTYPE(PPGMPAGE) volatile pPage;
1483#else
1484 R3R0PTRTYPE(PPGMPAGE) volatile pPage;
1485#endif
1486 /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */
1487#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1488 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1489#else
1490 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1491#endif
1492 /** The address */
1493#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1494 R3PTRTYPE(void *) volatile pv;
1495#else
1496 R3R0PTRTYPE(void *) volatile pv;
1497#endif
1498#if HC_ARCH_BITS == 32
1499 uint32_t u32Padding; /**< alignment padding. */
1500#endif
1501} PGMPAGER3MAPTLBE;
1502/** Pointer to an entry in the HC physical TLB. */
1503typedef PGMPAGER3MAPTLBE *PPGMPAGER3MAPTLBE;
1504
1505
1506/** The number of entries in the ring-3 guest page mapping TLB.
1507 * @remarks The value must be a power of two. */
1508#define PGM_PAGER3MAPTLB_ENTRIES 256
1509
1510/**
1511 * Ring-3 guest page mapping TLB.
1512 * @remarks used in ring-0 as well at the moment.
1513 */
1514typedef struct PGMPAGER3MAPTLB
1515{
1516 /** The TLB entries. */
1517 PGMPAGER3MAPTLBE aEntries[PGM_PAGER3MAPTLB_ENTRIES];
1518} PGMPAGER3MAPTLB;
1519/** Pointer to the ring-3 guest page mapping TLB. */
1520typedef PGMPAGER3MAPTLB *PPGMPAGER3MAPTLB;
1521
1522/**
1523 * Calculates the index of the TLB entry for the specified guest page.
1524 * @returns Physical TLB index.
1525 * @param GCPhys The guest physical address.
1526 */
1527#define PGM_PAGER3MAPTLB_IDX(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER3MAPTLB_ENTRIES - 1) )
1528
1529
1530/**
1531 * Mapping cache usage set entry.
1532 *
1533 * @remarks 16-bit ints was choosen as the set is not expected to be used beyond
1534 * the dynamic ring-0 and (to some extent) raw-mode context mapping
1535 * cache. If it's extended to include ring-3, well, then something will
1536 * have be changed here...
1537 */
1538typedef struct PGMMAPSETENTRY
1539{
1540 /** The mapping cache index. */
1541 uint16_t iPage;
1542 /** The number of references.
1543 * The max is UINT16_MAX - 1. */
1544 uint16_t cRefs;
1545#if HC_ARCH_BITS == 64
1546 uint32_t alignment;
1547#endif
1548 /** Pointer to the page. */
1549 RTR0PTR pvPage;
1550 /** The physical address for this entry. */
1551 RTHCPHYS HCPhys;
1552} PGMMAPSETENTRY;
1553/** Pointer to a mapping cache usage set entry. */
1554typedef PGMMAPSETENTRY *PPGMMAPSETENTRY;
1555
1556/**
1557 * Mapping cache usage set.
1558 *
1559 * This is used in ring-0 and the raw-mode context to track dynamic mappings
1560 * done during exits / traps. The set is
1561 */
1562typedef struct PGMMAPSET
1563{
1564 /** The number of occupied entries.
1565 * This is PGMMAPSET_CLOSED if the set is closed and we're not supposed to do
1566 * dynamic mappings. */
1567 uint32_t cEntries;
1568 /** The start of the current subset.
1569 * This is UINT32_MAX if no subset is currently open. */
1570 uint32_t iSubset;
1571 /** The index of the current CPU, only valid if the set is open. */
1572 int32_t iCpu;
1573 uint32_t alignment;
1574 /** The entries. */
1575 PGMMAPSETENTRY aEntries[64];
1576 /** HCPhys -> iEntry fast lookup table.
1577 * Use PGMMAPSET_HASH for hashing.
1578 * The entries may or may not be valid, check against cEntries. */
1579 uint8_t aiHashTable[128];
1580} PGMMAPSET;
1581AssertCompileSizeAlignment(PGMMAPSET, 8);
1582/** Pointer to the mapping cache set. */
1583typedef PGMMAPSET *PPGMMAPSET;
1584
1585/** PGMMAPSET::cEntries value for a closed set. */
1586#define PGMMAPSET_CLOSED UINT32_C(0xdeadc0fe)
1587
1588/** Hash function for aiHashTable. */
1589#define PGMMAPSET_HASH(HCPhys) (((HCPhys) >> PAGE_SHIFT) & 127)
1590
1591/** The max fill size (strict builds). */
1592#define PGMMAPSET_MAX_FILL (64U * 80U / 100U)
1593
1594
1595/** @name Context neutrual page mapper TLB.
1596 *
1597 * Hoping to avoid some code and bug duplication parts of the GCxxx->CCPtr
1598 * code is writting in a kind of context neutrual way. Time will show whether
1599 * this actually makes sense or not...
1600 *
1601 * @todo this needs to be reconsidered and dropped/redone since the ring-0
1602 * context ends up using a global mapping cache on some platforms
1603 * (darwin).
1604 *
1605 * @{ */
1606/** @typedef PPGMPAGEMAPTLB
1607 * The page mapper TLB pointer type for the current context. */
1608/** @typedef PPGMPAGEMAPTLB
1609 * The page mapper TLB entry pointer type for the current context. */
1610/** @typedef PPGMPAGEMAPTLB
1611 * The page mapper TLB entry pointer pointer type for the current context. */
1612/** @def PGM_PAGEMAPTLB_ENTRIES
1613 * The number of TLB entries in the page mapper TLB for the current context. */
1614/** @def PGM_PAGEMAPTLB_IDX
1615 * Calculate the TLB index for a guest physical address.
1616 * @returns The TLB index.
1617 * @param GCPhys The guest physical address. */
1618/** @typedef PPGMPAGEMAP
1619 * Pointer to a page mapper unit for current context. */
1620/** @typedef PPPGMPAGEMAP
1621 * Pointer to a page mapper unit pointer for current context. */
1622#ifdef IN_RC
1623// typedef PPGMPAGEGCMAPTLB PPGMPAGEMAPTLB;
1624// typedef PPGMPAGEGCMAPTLBE PPGMPAGEMAPTLBE;
1625// typedef PPGMPAGEGCMAPTLBE *PPPGMPAGEMAPTLBE;
1626# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGEGCMAPTLB_ENTRIES
1627# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGEGCMAPTLB_IDX(GCPhys)
1628 typedef void * PPGMPAGEMAP;
1629 typedef void ** PPPGMPAGEMAP;
1630//#elif IN_RING0
1631// typedef PPGMPAGER0MAPTLB PPGMPAGEMAPTLB;
1632// typedef PPGMPAGER0MAPTLBE PPGMPAGEMAPTLBE;
1633// typedef PPGMPAGER0MAPTLBE *PPPGMPAGEMAPTLBE;
1634//# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER0MAPTLB_ENTRIES
1635//# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER0MAPTLB_IDX(GCPhys)
1636// typedef PPGMCHUNKR0MAP PPGMPAGEMAP;
1637// typedef PPPGMCHUNKR0MAP PPPGMPAGEMAP;
1638#else
1639 typedef PPGMPAGER3MAPTLB PPGMPAGEMAPTLB;
1640 typedef PPGMPAGER3MAPTLBE PPGMPAGEMAPTLBE;
1641 typedef PPGMPAGER3MAPTLBE *PPPGMPAGEMAPTLBE;
1642# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER3MAPTLB_ENTRIES
1643# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER3MAPTLB_IDX(GCPhys)
1644 typedef PPGMCHUNKR3MAP PPGMPAGEMAP;
1645 typedef PPPGMCHUNKR3MAP PPPGMPAGEMAP;
1646#endif
1647/** @} */
1648
1649
1650/** @name PGM Pool Indexes.
1651 * Aka. the unique shadow page identifier.
1652 * @{ */
1653/** NIL page pool IDX. */
1654#define NIL_PGMPOOL_IDX 0
1655/** The first normal index. */
1656#define PGMPOOL_IDX_FIRST_SPECIAL 1
1657/** Page directory (32-bit root). */
1658#define PGMPOOL_IDX_PD 1
1659/** Page Directory Pointer Table (PAE root). */
1660#define PGMPOOL_IDX_PDPT 2
1661/** AMD64 CR3 level index.*/
1662#define PGMPOOL_IDX_AMD64_CR3 3
1663/** Nested paging root.*/
1664#define PGMPOOL_IDX_NESTED_ROOT 4
1665/** The first normal index. */
1666#define PGMPOOL_IDX_FIRST 5
1667/** The last valid index. (inclusive, 14 bits) */
1668#define PGMPOOL_IDX_LAST 0x3fff
1669/** @} */
1670
1671/** The NIL index for the parent chain. */
1672#define NIL_PGMPOOL_USER_INDEX ((uint16_t)0xffff)
1673#define NIL_PGMPOOL_PRESENT_INDEX ((uint16_t)0xffff)
1674
1675/**
1676 * Node in the chain linking a shadowed page to it's parent (user).
1677 */
1678#pragma pack(1)
1679typedef struct PGMPOOLUSER
1680{
1681 /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
1682 uint16_t iNext;
1683 /** The user page index. */
1684 uint16_t iUser;
1685 /** Index into the user table. */
1686 uint32_t iUserTable;
1687} PGMPOOLUSER, *PPGMPOOLUSER;
1688typedef const PGMPOOLUSER *PCPGMPOOLUSER;
1689#pragma pack()
1690
1691
1692/** The NIL index for the phys ext chain. */
1693#define NIL_PGMPOOL_PHYSEXT_INDEX ((uint16_t)0xffff)
1694/** The NIL pte index for a phys ext chain slot. */
1695#define NIL_PGMPOOL_PHYSEXT_IDX_PTE ((uint16_t)0xffff)
1696
1697/**
1698 * Node in the chain of physical cross reference extents.
1699 * @todo Calling this an 'extent' is not quite right, find a better name.
1700 * @todo find out the optimal size of the aidx array
1701 */
1702#pragma pack(1)
1703typedef struct PGMPOOLPHYSEXT
1704{
1705 /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
1706 uint16_t iNext;
1707 /** Alignment. */
1708 uint16_t u16Align;
1709 /** The user page index. */
1710 uint16_t aidx[3];
1711 /** The page table index or NIL_PGMPOOL_PHYSEXT_IDX_PTE if unknown. */
1712 uint16_t apte[3];
1713} PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
1714typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
1715#pragma pack()
1716
1717
1718/**
1719 * The kind of page that's being shadowed.
1720 */
1721typedef enum PGMPOOLKIND
1722{
1723 /** The virtual invalid 0 entry. */
1724 PGMPOOLKIND_INVALID = 0,
1725 /** The entry is free (=unused). */
1726 PGMPOOLKIND_FREE,
1727
1728 /** Shw: 32-bit page table; Gst: no paging */
1729 PGMPOOLKIND_32BIT_PT_FOR_PHYS,
1730 /** Shw: 32-bit page table; Gst: 32-bit page table. */
1731 PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
1732 /** Shw: 32-bit page table; Gst: 4MB page. */
1733 PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
1734 /** Shw: PAE page table; Gst: no paging */
1735 PGMPOOLKIND_PAE_PT_FOR_PHYS,
1736 /** Shw: PAE page table; Gst: 32-bit page table. */
1737 PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
1738 /** Shw: PAE page table; Gst: Half of a 4MB page. */
1739 PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
1740 /** Shw: PAE page table; Gst: PAE page table. */
1741 PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
1742 /** Shw: PAE page table; Gst: 2MB page. */
1743 PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
1744
1745 /** Shw: 32-bit page directory. Gst: 32-bit page directory. */
1746 PGMPOOLKIND_32BIT_PD,
1747 /** Shw: 32-bit page directory. Gst: no paging. */
1748 PGMPOOLKIND_32BIT_PD_PHYS,
1749 /** Shw: PAE page directory 0; Gst: 32-bit page directory. */
1750 PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD,
1751 /** Shw: PAE page directory 1; Gst: 32-bit page directory. */
1752 PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD,
1753 /** Shw: PAE page directory 2; Gst: 32-bit page directory. */
1754 PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD,
1755 /** Shw: PAE page directory 3; Gst: 32-bit page directory. */
1756 PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
1757 /** Shw: PAE page directory; Gst: PAE page directory. */
1758 PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
1759 /** Shw: PAE page directory; Gst: no paging. */
1760 PGMPOOLKIND_PAE_PD_PHYS,
1761
1762 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst 32 bits paging. */
1763 PGMPOOLKIND_PAE_PDPT_FOR_32BIT,
1764 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst PAE PDPT. */
1765 PGMPOOLKIND_PAE_PDPT,
1766 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst: no paging. */
1767 PGMPOOLKIND_PAE_PDPT_PHYS,
1768
1769 /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */
1770 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT,
1771 /** Shw: 64-bit page directory pointer table; Gst: no paging */
1772 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS,
1773 /** Shw: 64-bit page directory table; Gst: 64-bit page directory table. */
1774 PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD,
1775 /** Shw: 64-bit page directory table; Gst: no paging */
1776 PGMPOOLKIND_64BIT_PD_FOR_PHYS, /* 22 */
1777
1778 /** Shw: 64-bit PML4; Gst: 64-bit PML4. */
1779 PGMPOOLKIND_64BIT_PML4,
1780
1781 /** Shw: EPT page directory pointer table; Gst: no paging */
1782 PGMPOOLKIND_EPT_PDPT_FOR_PHYS,
1783 /** Shw: EPT page directory table; Gst: no paging */
1784 PGMPOOLKIND_EPT_PD_FOR_PHYS,
1785 /** Shw: EPT page table; Gst: no paging */
1786 PGMPOOLKIND_EPT_PT_FOR_PHYS,
1787
1788 /** Shw: Root Nested paging table. */
1789 PGMPOOLKIND_ROOT_NESTED,
1790
1791 /** The last valid entry. */
1792 PGMPOOLKIND_LAST = PGMPOOLKIND_ROOT_NESTED
1793} PGMPOOLKIND;
1794
1795/**
1796 * The access attributes of the page; only applies to big pages.
1797 */
1798typedef enum
1799{
1800 PGMPOOLACCESS_DONTCARE = 0,
1801 PGMPOOLACCESS_USER_RW,
1802 PGMPOOLACCESS_USER_R,
1803 PGMPOOLACCESS_USER_RW_NX,
1804 PGMPOOLACCESS_USER_R_NX,
1805 PGMPOOLACCESS_SUPERVISOR_RW,
1806 PGMPOOLACCESS_SUPERVISOR_R,
1807 PGMPOOLACCESS_SUPERVISOR_RW_NX,
1808 PGMPOOLACCESS_SUPERVISOR_R_NX
1809} PGMPOOLACCESS;
1810
1811/**
1812 * The tracking data for a page in the pool.
1813 */
1814typedef struct PGMPOOLPAGE
1815{
1816 /** AVL node code with the (R3) physical address of this page. */
1817 AVLOHCPHYSNODECORE Core;
1818 /** Pointer to the R3 mapping of the page. */
1819#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1820 R3PTRTYPE(void *) pvPageR3;
1821#else
1822 R3R0PTRTYPE(void *) pvPageR3;
1823#endif
1824 /** The guest physical address. */
1825#if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64
1826 uint32_t Alignment0;
1827#endif
1828 RTGCPHYS GCPhys;
1829
1830 /** Access handler statistics to determine whether the guest is (re)initializing a page table. */
1831 RTGCPTR pvLastAccessHandlerRip;
1832 RTGCPTR pvLastAccessHandlerFault;
1833 uint64_t cLastAccessHandlerCount;
1834
1835 /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
1836 uint8_t enmKind;
1837 /** The subkind of page we're shadowing. (This is really a PGMPOOLACCESS enum.) */
1838 uint8_t enmAccess;
1839 /** The index of this page. */
1840 uint16_t idx;
1841 /** The next entry in the list this page currently resides in.
1842 * It's either in the free list or in the GCPhys hash. */
1843 uint16_t iNext;
1844 /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
1845 uint16_t iUserHead;
1846 /** The number of present entries. */
1847 uint16_t cPresent;
1848 /** The first entry in the table which is present. */
1849 uint16_t iFirstPresent;
1850 /** The number of modifications to the monitored page. */
1851 uint16_t cModifications;
1852 /** The next modified page. NIL_PGMPOOL_IDX if tail. */
1853 uint16_t iModifiedNext;
1854 /** The previous modified page. NIL_PGMPOOL_IDX if head. */
1855 uint16_t iModifiedPrev;
1856 /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
1857 uint16_t iMonitoredNext;
1858 /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
1859 uint16_t iMonitoredPrev;
1860 /** The next page in the age list. */
1861 uint16_t iAgeNext;
1862 /** The previous page in the age list. */
1863 uint16_t iAgePrev;
1864 /** Used to indicate that the page is zeroed. */
1865 bool fZeroed;
1866 /** Used to indicate that a PT has non-global entries. */
1867 bool fSeenNonGlobal;
1868 /** Used to indicate that we're monitoring writes to the guest page. */
1869 bool fMonitored;
1870 /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
1871 * (All pages are in the age list.) */
1872 bool fCached;
1873 /** This is used by the R3 access handlers when invoked by an async thread.
1874 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
1875 bool volatile fReusedFlushPending;
1876 /** Used to mark the page as dirty (write monitoring if temporarily off. */
1877 bool fDirty;
1878
1879 /** Used to indicate that this page can't be flushed. Important for cr3 root pages or shadow pae pd pages). */
1880 uint32_t cLocked;
1881 uint32_t idxDirty;
1882 RTGCPTR pvDirtyFault;
1883} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
1884/** Pointer to a const pool page. */
1885typedef PGMPOOLPAGE const *PCPGMPOOLPAGE;
1886
1887
1888/** The hash table size. */
1889# define PGMPOOL_HASH_SIZE 0x40
1890/** The hash function. */
1891# define PGMPOOL_HASH(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
1892
1893
1894/**
1895 * The shadow page pool instance data.
1896 *
1897 * It's all one big allocation made at init time, except for the
1898 * pages that is. The user nodes follows immediatly after the
1899 * page structures.
1900 */
1901typedef struct PGMPOOL
1902{
1903 /** The VM handle - R3 Ptr. */
1904 PVMR3 pVMR3;
1905 /** The VM handle - R0 Ptr. */
1906 PVMR0 pVMR0;
1907 /** The VM handle - RC Ptr. */
1908 PVMRC pVMRC;
1909 /** The max pool size. This includes the special IDs. */
1910 uint16_t cMaxPages;
1911 /** The current pool size. */
1912 uint16_t cCurPages;
1913 /** The head of the free page list. */
1914 uint16_t iFreeHead;
1915 /* Padding. */
1916 uint16_t u16Padding;
1917 /** Head of the chain of free user nodes. */
1918 uint16_t iUserFreeHead;
1919 /** The number of user nodes we've allocated. */
1920 uint16_t cMaxUsers;
1921 /** The number of present page table entries in the entire pool. */
1922 uint32_t cPresent;
1923 /** Pointer to the array of user nodes - RC pointer. */
1924 RCPTRTYPE(PPGMPOOLUSER) paUsersRC;
1925 /** Pointer to the array of user nodes - R3 pointer. */
1926 R3PTRTYPE(PPGMPOOLUSER) paUsersR3;
1927 /** Pointer to the array of user nodes - R0 pointer. */
1928 R0PTRTYPE(PPGMPOOLUSER) paUsersR0;
1929 /** Head of the chain of free phys ext nodes. */
1930 uint16_t iPhysExtFreeHead;
1931 /** The number of user nodes we've allocated. */
1932 uint16_t cMaxPhysExts;
1933 /** Pointer to the array of physical xref extent - RC pointer. */
1934 RCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsRC;
1935 /** Pointer to the array of physical xref extent nodes - R3 pointer. */
1936 R3PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR3;
1937 /** Pointer to the array of physical xref extent nodes - R0 pointer. */
1938 R0PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR0;
1939 /** Hash table for GCPhys addresses. */
1940 uint16_t aiHash[PGMPOOL_HASH_SIZE];
1941 /** The head of the age list. */
1942 uint16_t iAgeHead;
1943 /** The tail of the age list. */
1944 uint16_t iAgeTail;
1945 /** Set if the cache is enabled. */
1946 bool fCacheEnabled;
1947 /** Alignment padding. */
1948 bool afPadding1[3];
1949 /** Head of the list of modified pages. */
1950 uint16_t iModifiedHead;
1951 /** The current number of modified pages. */
1952 uint16_t cModifiedPages;
1953 /** Access handler, RC. */
1954 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnAccessHandlerRC;
1955 /** Access handler, R0. */
1956 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnAccessHandlerR0;
1957 /** Access handler, R3. */
1958 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnAccessHandlerR3;
1959 /** The access handler description (R3 ptr). */
1960 R3PTRTYPE(const char *) pszAccessHandler;
1961# if HC_ARCH_BITS == 32
1962 /** Alignment padding. */
1963 uint32_t u32Padding2;
1964# endif
1965 /* Next available slot. */
1966 uint32_t idxFreeDirtyPage;
1967 /* Number of active dirty pages. */
1968 uint32_t cDirtyPages;
1969 /* Array of current dirty pgm pool page indices. */
1970 uint16_t aIdxDirtyPages[16];
1971 uint64_t aDirtyPages[16][512];
1972 /** The number of pages currently in use. */
1973 uint16_t cUsedPages;
1974#ifdef VBOX_WITH_STATISTICS
1975 /** The high water mark for cUsedPages. */
1976 uint16_t cUsedPagesHigh;
1977 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
1978 /** Profiling pgmPoolAlloc(). */
1979 STAMPROFILEADV StatAlloc;
1980 /** Profiling pgmR3PoolClearDoIt(). */
1981 STAMPROFILE StatClearAll;
1982 /** Profiling pgmR3PoolReset(). */
1983 STAMPROFILE StatR3Reset;
1984 /** Profiling pgmPoolFlushPage(). */
1985 STAMPROFILE StatFlushPage;
1986 /** Profiling pgmPoolFree(). */
1987 STAMPROFILE StatFree;
1988 /** Counting explicit flushes by PGMPoolFlushPage(). */
1989 STAMCOUNTER StatForceFlushPage;
1990 /** Counting explicit flushes of dirty pages by PGMPoolFlushPage(). */
1991 STAMCOUNTER StatForceFlushDirtyPage;
1992 /** Counting flushes for reused pages. */
1993 STAMCOUNTER StatForceFlushReused;
1994 /** Profiling time spent zeroing pages. */
1995 STAMPROFILE StatZeroPage;
1996 /** Profiling of pgmPoolTrackDeref. */
1997 STAMPROFILE StatTrackDeref;
1998 /** Profiling pgmTrackFlushGCPhysPT. */
1999 STAMPROFILE StatTrackFlushGCPhysPT;
2000 /** Profiling pgmTrackFlushGCPhysPTs. */
2001 STAMPROFILE StatTrackFlushGCPhysPTs;
2002 /** Profiling pgmTrackFlushGCPhysPTsSlow. */
2003 STAMPROFILE StatTrackFlushGCPhysPTsSlow;
2004 /** Number of times we've been out of user records. */
2005 STAMCOUNTER StatTrackFreeUpOneUser;
2006 /** Nr of flushed entries. */
2007 STAMCOUNTER StatTrackFlushEntry;
2008 /** Nr of updated entries. */
2009 STAMCOUNTER StatTrackFlushEntryKeep;
2010 /** Profiling deref activity related tracking GC physical pages. */
2011 STAMPROFILE StatTrackDerefGCPhys;
2012 /** Number of linear searches for a HCPhys in the ram ranges. */
2013 STAMCOUNTER StatTrackLinearRamSearches;
2014 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
2015 STAMCOUNTER StamTrackPhysExtAllocFailures;
2016 /** Profiling the RC/R0 access handler. */
2017 STAMPROFILE StatMonitorRZ;
2018 /** Times we've failed interpreting the instruction. */
2019 STAMCOUNTER StatMonitorRZEmulateInstr;
2020 /** Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler. */
2021 STAMPROFILE StatMonitorRZFlushPage;
2022 /* Times we've detected a page table reinit. */
2023 STAMCOUNTER StatMonitorRZFlushReinit;
2024 /** Counting flushes for pages that are modified too often. */
2025 STAMCOUNTER StatMonitorRZFlushModOverflow;
2026 /** Times we've detected fork(). */
2027 STAMCOUNTER StatMonitorRZFork;
2028 /** Profiling the RC/R0 access we've handled (except REP STOSD). */
2029 STAMPROFILE StatMonitorRZHandled;
2030 /** Times we've failed interpreting a patch code instruction. */
2031 STAMCOUNTER StatMonitorRZIntrFailPatch1;
2032 /** Times we've failed interpreting a patch code instruction during flushing. */
2033 STAMCOUNTER StatMonitorRZIntrFailPatch2;
2034 /** The number of times we've seen rep prefixes we can't handle. */
2035 STAMCOUNTER StatMonitorRZRepPrefix;
2036 /** Profiling the REP STOSD cases we've handled. */
2037 STAMPROFILE StatMonitorRZRepStosd;
2038 /** Nr of handled PT faults. */
2039 STAMCOUNTER StatMonitorRZFaultPT;
2040 /** Nr of handled PD faults. */
2041 STAMCOUNTER StatMonitorRZFaultPD;
2042 /** Nr of handled PDPT faults. */
2043 STAMCOUNTER StatMonitorRZFaultPDPT;
2044 /** Nr of handled PML4 faults. */
2045 STAMCOUNTER StatMonitorRZFaultPML4;
2046
2047 /** Profiling the R3 access handler. */
2048 STAMPROFILE StatMonitorR3;
2049 /** Times we've failed interpreting the instruction. */
2050 STAMCOUNTER StatMonitorR3EmulateInstr;
2051 /** Profiling the pgmPoolFlushPage calls made from the R3 access handler. */
2052 STAMPROFILE StatMonitorR3FlushPage;
2053 /* Times we've detected a page table reinit. */
2054 STAMCOUNTER StatMonitorR3FlushReinit;
2055 /** Counting flushes for pages that are modified too often. */
2056 STAMCOUNTER StatMonitorR3FlushModOverflow;
2057 /** Times we've detected fork(). */
2058 STAMCOUNTER StatMonitorR3Fork;
2059 /** Profiling the R3 access we've handled (except REP STOSD). */
2060 STAMPROFILE StatMonitorR3Handled;
2061 /** The number of times we've seen rep prefixes we can't handle. */
2062 STAMCOUNTER StatMonitorR3RepPrefix;
2063 /** Profiling the REP STOSD cases we've handled. */
2064 STAMPROFILE StatMonitorR3RepStosd;
2065 /** Nr of handled PT faults. */
2066 STAMCOUNTER StatMonitorR3FaultPT;
2067 /** Nr of handled PD faults. */
2068 STAMCOUNTER StatMonitorR3FaultPD;
2069 /** Nr of handled PDPT faults. */
2070 STAMCOUNTER StatMonitorR3FaultPDPT;
2071 /** Nr of handled PML4 faults. */
2072 STAMCOUNTER StatMonitorR3FaultPML4;
2073 /** The number of times we're called in an async thread an need to flush. */
2074 STAMCOUNTER StatMonitorR3Async;
2075 /** Times we've called pgmPoolResetDirtyPages (and there were dirty page). */
2076 STAMCOUNTER StatResetDirtyPages;
2077 /** Times we've called pgmPoolAddDirtyPage. */
2078 STAMCOUNTER StatDirtyPage;
2079 /** Times we've had to flush duplicates for dirty page management. */
2080 STAMCOUNTER StatDirtyPageDupFlush;
2081 /** Times we've had to flush because of overflow. */
2082 STAMCOUNTER StatDirtyPageOverFlowFlush;
2083
2084 /** The high wather mark for cModifiedPages. */
2085 uint16_t cModifiedPagesHigh;
2086 uint16_t Alignment2[3]; /**< Align the next member on a 64-bit boundrary. */
2087
2088 /** The number of cache hits. */
2089 STAMCOUNTER StatCacheHits;
2090 /** The number of cache misses. */
2091 STAMCOUNTER StatCacheMisses;
2092 /** The number of times we've got a conflict of 'kind' in the cache. */
2093 STAMCOUNTER StatCacheKindMismatches;
2094 /** Number of times we've been out of pages. */
2095 STAMCOUNTER StatCacheFreeUpOne;
2096 /** The number of cacheable allocations. */
2097 STAMCOUNTER StatCacheCacheable;
2098 /** The number of uncacheable allocations. */
2099 STAMCOUNTER StatCacheUncacheable;
2100#else
2101 uint32_t Alignment3; /**< Align the next member on a 64-bit boundrary. */
2102#endif
2103 /** The AVL tree for looking up a page by its HC physical address. */
2104 AVLOHCPHYSTREE HCPhysTree;
2105 uint32_t Alignment4; /**< Align the next member on a 64-bit boundrary. */
2106 /** Array of pages. (cMaxPages in length)
2107 * The Id is the index into thist array.
2108 */
2109 PGMPOOLPAGE aPages[PGMPOOL_IDX_FIRST];
2110} PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
2111AssertCompileMemberAlignment(PGMPOOL, iModifiedHead, 8);
2112AssertCompileMemberAlignment(PGMPOOL, aDirtyPages, 8);
2113AssertCompileMemberAlignment(PGMPOOL, cUsedPages, 8);
2114#ifdef VBOX_WITH_STATISTICS
2115AssertCompileMemberAlignment(PGMPOOL, StatAlloc, 8);
2116#endif
2117AssertCompileMemberAlignment(PGMPOOL, aPages, 8);
2118
2119
2120/** @def PGMPOOL_PAGE_2_PTR
2121 * Maps a pool page pool into the current context.
2122 *
2123 * @returns VBox status code.
2124 * @param pVM The VM handle.
2125 * @param pPage The pool page.
2126 *
2127 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
2128 * small page window employeed by that function. Be careful.
2129 * @remark There is no need to assert on the result.
2130 */
2131#if defined(IN_RC)
2132# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
2133#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2134# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
2135#elif defined(VBOX_STRICT)
2136# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageStrict(pPage)
2137DECLINLINE(void *) pgmPoolMapPageStrict(PPGMPOOLPAGE pPage)
2138{
2139 Assert(pPage && pPage->pvPageR3);
2140 return pPage->pvPageR3;
2141}
2142#else
2143# define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageR3)
2144#endif
2145
2146/** @def PGMPOOL_PAGE_2_PTR_BY_PGM
2147 * Maps a pool page pool into the current context.
2148 *
2149 * @returns VBox status code.
2150 * @param pPGM Pointer to the PGM instance data.
2151 * @param pPage The pool page.
2152 *
2153 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
2154 * small page window employeed by that function. Be careful.
2155 * @remark There is no need to assert on the result.
2156 */
2157#if defined(IN_RC)
2158# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined(pPGM, (pPage))
2159#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2160# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined(pPGM, (pPage))
2161#else
2162# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) PGMPOOL_PAGE_2_PTR(PGM2VM(pPGM), pPage)
2163#endif
2164
2165/** @def PGMPOOL_PAGE_2_PTR_BY_PGMCPU
2166 * Maps a pool page pool into the current context.
2167 *
2168 * @returns VBox status code.
2169 * @param pPGM Pointer to the PGMCPU instance data.
2170 * @param pPage The pool page.
2171 *
2172 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
2173 * small page window employeed by that function. Be careful.
2174 * @remark There is no need to assert on the result.
2175 */
2176#if defined(IN_RC)
2177# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) pgmPoolMapPageInlined(PGMCPU2PGM(pPGM), (pPage))
2178#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2179# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) pgmPoolMapPageInlined(PGMCPU2PGM(pPGM), (pPage))
2180#else
2181# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) PGMPOOL_PAGE_2_PTR(PGMCPU2VM(pPGM), pPage)
2182#endif
2183
2184
2185/** @name Per guest page tracking data.
2186 * This is currently as a 16-bit word in the PGMPAGE structure, the idea though
2187 * is to use more bits for it and split it up later on. But for now we'll play
2188 * safe and change as little as possible.
2189 *
2190 * The 16-bit word has two parts:
2191 *
2192 * The first 14-bit forms the @a idx field. It is either the index of a page in
2193 * the shadow page pool, or and index into the extent list.
2194 *
2195 * The 2 topmost bits makes up the @a cRefs field, which counts the number of
2196 * shadow page pool references to the page. If cRefs equals
2197 * PGMPOOL_CREFS_PHYSEXT, then the @a idx field is an indext into the extent
2198 * (misnomer) table and not the shadow page pool.
2199 *
2200 * See PGM_PAGE_GET_TRACKING and PGM_PAGE_SET_TRACKING for how to get and set
2201 * the 16-bit word.
2202 *
2203 * @{ */
2204/** The shift count for getting to the cRefs part. */
2205#define PGMPOOL_TD_CREFS_SHIFT 14
2206/** The mask applied after shifting the tracking data down by
2207 * PGMPOOL_TD_CREFS_SHIFT. */
2208#define PGMPOOL_TD_CREFS_MASK 0x3
2209/** The cRef value used to indiciate that the idx is the head of a
2210 * physical cross reference list. */
2211#define PGMPOOL_TD_CREFS_PHYSEXT PGMPOOL_TD_CREFS_MASK
2212/** The shift used to get idx. */
2213#define PGMPOOL_TD_IDX_SHIFT 0
2214/** The mask applied to the idx after shifting down by PGMPOOL_TD_IDX_SHIFT. */
2215#define PGMPOOL_TD_IDX_MASK 0x3fff
2216/** The idx value when we're out of of PGMPOOLPHYSEXT entries or/and there are
2217 * simply too many mappings of this page. */
2218#define PGMPOOL_TD_IDX_OVERFLOWED PGMPOOL_TD_IDX_MASK
2219
2220/** @def PGMPOOL_TD_MAKE
2221 * Makes a 16-bit tracking data word.
2222 *
2223 * @returns tracking data.
2224 * @param cRefs The @a cRefs field. Must be within bounds!
2225 * @param idx The @a idx field. Must also be within bounds! */
2226#define PGMPOOL_TD_MAKE(cRefs, idx) ( ((cRefs) << PGMPOOL_TD_CREFS_SHIFT) | (idx) )
2227
2228/** @def PGMPOOL_TD_GET_CREFS
2229 * Get the @a cRefs field from a tracking data word.
2230 *
2231 * @returns The @a cRefs field
2232 * @param u16 The tracking data word. */
2233#define PGMPOOL_TD_GET_CREFS(u16) ( ((u16) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK )
2234
2235/** @def PGMPOOL_TD_GET_IDX
2236 * Get the @a idx field from a tracking data word.
2237 *
2238 * @returns The @a idx field
2239 * @param u16 The tracking data word. */
2240#define PGMPOOL_TD_GET_IDX(u16) ( ((u16) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK )
2241/** @} */
2242
2243
2244/**
2245 * Trees are using self relative offsets as pointers.
2246 * So, all its data, including the root pointer, must be in the heap for HC and GC
2247 * to have the same layout.
2248 */
2249typedef struct PGMTREES
2250{
2251 /** Physical access handlers (AVL range+offsetptr tree). */
2252 AVLROGCPHYSTREE PhysHandlers;
2253 /** Virtual access handlers (AVL range + GC ptr tree). */
2254 AVLROGCPTRTREE VirtHandlers;
2255 /** Virtual access handlers (Phys range AVL range + offsetptr tree). */
2256 AVLROGCPHYSTREE PhysToVirtHandlers;
2257 /** Virtual access handlers for the hypervisor (AVL range + GC ptr tree). */
2258 AVLROGCPTRTREE HyperVirtHandlers;
2259} PGMTREES;
2260/** Pointer to PGM trees. */
2261typedef PGMTREES *PPGMTREES;
2262
2263
2264/** @name Paging mode macros
2265 * @{ */
2266#ifdef IN_RC
2267# define PGM_CTX(a,b) a##RC##b
2268# define PGM_CTX_STR(a,b) a "GC" b
2269# define PGM_CTX_DECL(type) VMMRCDECL(type)
2270#else
2271# ifdef IN_RING3
2272# define PGM_CTX(a,b) a##R3##b
2273# define PGM_CTX_STR(a,b) a "R3" b
2274# define PGM_CTX_DECL(type) DECLCALLBACK(type)
2275# else
2276# define PGM_CTX(a,b) a##R0##b
2277# define PGM_CTX_STR(a,b) a "R0" b
2278# define PGM_CTX_DECL(type) VMMDECL(type)
2279# endif
2280#endif
2281
2282#define PGM_GST_NAME_REAL(name) PGM_CTX(pgm,GstReal##name)
2283#define PGM_GST_NAME_RC_REAL_STR(name) "pgmRCGstReal" #name
2284#define PGM_GST_NAME_R0_REAL_STR(name) "pgmR0GstReal" #name
2285#define PGM_GST_NAME_PROT(name) PGM_CTX(pgm,GstProt##name)
2286#define PGM_GST_NAME_RC_PROT_STR(name) "pgmRCGstProt" #name
2287#define PGM_GST_NAME_R0_PROT_STR(name) "pgmR0GstProt" #name
2288#define PGM_GST_NAME_32BIT(name) PGM_CTX(pgm,Gst32Bit##name)
2289#define PGM_GST_NAME_RC_32BIT_STR(name) "pgmRCGst32Bit" #name
2290#define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
2291#define PGM_GST_NAME_PAE(name) PGM_CTX(pgm,GstPAE##name)
2292#define PGM_GST_NAME_RC_PAE_STR(name) "pgmRCGstPAE" #name
2293#define PGM_GST_NAME_R0_PAE_STR(name) "pgmR0GstPAE" #name
2294#define PGM_GST_NAME_AMD64(name) PGM_CTX(pgm,GstAMD64##name)
2295#define PGM_GST_NAME_RC_AMD64_STR(name) "pgmRCGstAMD64" #name
2296#define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
2297#define PGM_GST_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Gst##name))
2298#define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name)
2299
2300#define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name)
2301#define PGM_SHW_NAME_RC_32BIT_STR(name) "pgmRCShw32Bit" #name
2302#define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
2303#define PGM_SHW_NAME_PAE(name) PGM_CTX(pgm,ShwPAE##name)
2304#define PGM_SHW_NAME_RC_PAE_STR(name) "pgmRCShwPAE" #name
2305#define PGM_SHW_NAME_R0_PAE_STR(name) "pgmR0ShwPAE" #name
2306#define PGM_SHW_NAME_AMD64(name) PGM_CTX(pgm,ShwAMD64##name)
2307#define PGM_SHW_NAME_RC_AMD64_STR(name) "pgmRCShwAMD64" #name
2308#define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
2309#define PGM_SHW_NAME_NESTED(name) PGM_CTX(pgm,ShwNested##name)
2310#define PGM_SHW_NAME_RC_NESTED_STR(name) "pgmRCShwNested" #name
2311#define PGM_SHW_NAME_R0_NESTED_STR(name) "pgmR0ShwNested" #name
2312#define PGM_SHW_NAME_EPT(name) PGM_CTX(pgm,ShwEPT##name)
2313#define PGM_SHW_NAME_RC_EPT_STR(name) "pgmRCShwEPT" #name
2314#define PGM_SHW_NAME_R0_EPT_STR(name) "pgmR0ShwEPT" #name
2315#define PGM_SHW_DECL(type, name) PGM_CTX_DECL(type) PGM_SHW_NAME(name)
2316#define PGM_SHW_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Shw##name))
2317
2318/* Shw_Gst */
2319#define PGM_BTH_NAME_32BIT_REAL(name) PGM_CTX(pgm,Bth32BitReal##name)
2320#define PGM_BTH_NAME_32BIT_PROT(name) PGM_CTX(pgm,Bth32BitProt##name)
2321#define PGM_BTH_NAME_32BIT_32BIT(name) PGM_CTX(pgm,Bth32Bit32Bit##name)
2322#define PGM_BTH_NAME_PAE_REAL(name) PGM_CTX(pgm,BthPAEReal##name)
2323#define PGM_BTH_NAME_PAE_PROT(name) PGM_CTX(pgm,BthPAEProt##name)
2324#define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name)
2325#define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name)
2326#define PGM_BTH_NAME_AMD64_PROT(name) PGM_CTX(pgm,BthAMD64Prot##name)
2327#define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name)
2328#define PGM_BTH_NAME_NESTED_REAL(name) PGM_CTX(pgm,BthNestedReal##name)
2329#define PGM_BTH_NAME_NESTED_PROT(name) PGM_CTX(pgm,BthNestedProt##name)
2330#define PGM_BTH_NAME_NESTED_32BIT(name) PGM_CTX(pgm,BthNested32Bit##name)
2331#define PGM_BTH_NAME_NESTED_PAE(name) PGM_CTX(pgm,BthNestedPAE##name)
2332#define PGM_BTH_NAME_NESTED_AMD64(name) PGM_CTX(pgm,BthNestedAMD64##name)
2333#define PGM_BTH_NAME_EPT_REAL(name) PGM_CTX(pgm,BthEPTReal##name)
2334#define PGM_BTH_NAME_EPT_PROT(name) PGM_CTX(pgm,BthEPTProt##name)
2335#define PGM_BTH_NAME_EPT_32BIT(name) PGM_CTX(pgm,BthEPT32Bit##name)
2336#define PGM_BTH_NAME_EPT_PAE(name) PGM_CTX(pgm,BthEPTPAE##name)
2337#define PGM_BTH_NAME_EPT_AMD64(name) PGM_CTX(pgm,BthEPTAMD64##name)
2338
2339#define PGM_BTH_NAME_RC_32BIT_REAL_STR(name) "pgmRCBth32BitReal" #name
2340#define PGM_BTH_NAME_RC_32BIT_PROT_STR(name) "pgmRCBth32BitProt" #name
2341#define PGM_BTH_NAME_RC_32BIT_32BIT_STR(name) "pgmRCBth32Bit32Bit" #name
2342#define PGM_BTH_NAME_RC_PAE_REAL_STR(name) "pgmRCBthPAEReal" #name
2343#define PGM_BTH_NAME_RC_PAE_PROT_STR(name) "pgmRCBthPAEProt" #name
2344#define PGM_BTH_NAME_RC_PAE_32BIT_STR(name) "pgmRCBthPAE32Bit" #name
2345#define PGM_BTH_NAME_RC_PAE_PAE_STR(name) "pgmRCBthPAEPAE" #name
2346#define PGM_BTH_NAME_RC_AMD64_AMD64_STR(name) "pgmRCBthAMD64AMD64" #name
2347#define PGM_BTH_NAME_RC_NESTED_REAL_STR(name) "pgmRCBthNestedReal" #name
2348#define PGM_BTH_NAME_RC_NESTED_PROT_STR(name) "pgmRCBthNestedProt" #name
2349#define PGM_BTH_NAME_RC_NESTED_32BIT_STR(name) "pgmRCBthNested32Bit" #name
2350#define PGM_BTH_NAME_RC_NESTED_PAE_STR(name) "pgmRCBthNestedPAE" #name
2351#define PGM_BTH_NAME_RC_NESTED_AMD64_STR(name) "pgmRCBthNestedAMD64" #name
2352#define PGM_BTH_NAME_RC_EPT_REAL_STR(name) "pgmRCBthEPTReal" #name
2353#define PGM_BTH_NAME_RC_EPT_PROT_STR(name) "pgmRCBthEPTProt" #name
2354#define PGM_BTH_NAME_RC_EPT_32BIT_STR(name) "pgmRCBthEPT32Bit" #name
2355#define PGM_BTH_NAME_RC_EPT_PAE_STR(name) "pgmRCBthEPTPAE" #name
2356#define PGM_BTH_NAME_RC_EPT_AMD64_STR(name) "pgmRCBthEPTAMD64" #name
2357#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name
2358#define PGM_BTH_NAME_R0_32BIT_PROT_STR(name) "pgmR0Bth32BitProt" #name
2359#define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name) "pgmR0Bth32Bit32Bit" #name
2360#define PGM_BTH_NAME_R0_PAE_REAL_STR(name) "pgmR0BthPAEReal" #name
2361#define PGM_BTH_NAME_R0_PAE_PROT_STR(name) "pgmR0BthPAEProt" #name
2362#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name
2363#define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name
2364#define PGM_BTH_NAME_R0_AMD64_PROT_STR(name) "pgmR0BthAMD64Prot" #name
2365#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name
2366#define PGM_BTH_NAME_R0_NESTED_REAL_STR(name) "pgmR0BthNestedReal" #name
2367#define PGM_BTH_NAME_R0_NESTED_PROT_STR(name) "pgmR0BthNestedProt" #name
2368#define PGM_BTH_NAME_R0_NESTED_32BIT_STR(name) "pgmR0BthNested32Bit" #name
2369#define PGM_BTH_NAME_R0_NESTED_PAE_STR(name) "pgmR0BthNestedPAE" #name
2370#define PGM_BTH_NAME_R0_NESTED_AMD64_STR(name) "pgmR0BthNestedAMD64" #name
2371#define PGM_BTH_NAME_R0_EPT_REAL_STR(name) "pgmR0BthEPTReal" #name
2372#define PGM_BTH_NAME_R0_EPT_PROT_STR(name) "pgmR0BthEPTProt" #name
2373#define PGM_BTH_NAME_R0_EPT_32BIT_STR(name) "pgmR0BthEPT32Bit" #name
2374#define PGM_BTH_NAME_R0_EPT_PAE_STR(name) "pgmR0BthEPTPAE" #name
2375#define PGM_BTH_NAME_R0_EPT_AMD64_STR(name) "pgmR0BthEPTAMD64" #name
2376
2377#define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name)
2378#define PGM_BTH_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Bth##name))
2379/** @} */
2380
2381/**
2382 * Data for each paging mode.
2383 */
2384typedef struct PGMMODEDATA
2385{
2386 /** The guest mode type. */
2387 uint32_t uGstType;
2388 /** The shadow mode type. */
2389 uint32_t uShwType;
2390
2391 /** @name Function pointers for Shadow paging.
2392 * @{
2393 */
2394 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2395 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVMCPU pVCpu));
2396 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2397 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
2398
2399 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2400 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
2401
2402 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2403 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
2404 /** @} */
2405
2406 /** @name Function pointers for Guest paging.
2407 * @{
2408 */
2409 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2410 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVMCPU pVCpu));
2411 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2412 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2413 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2414 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2415 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2416 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2417 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2418 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2419 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2420 /** @} */
2421
2422 /** @name Function pointers for Both Shadow and Guest paging.
2423 * @{
2424 */
2425 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2426 /* no pfnR3BthTrap0eHandler */
2427 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2428 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2429 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2430 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2431 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2432#ifdef VBOX_STRICT
2433 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2434#endif
2435 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2436 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVMCPU pVCpu));
2437
2438 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
2439 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2440 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2441 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2442 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2443 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2444#ifdef VBOX_STRICT
2445 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2446#endif
2447 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2448 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVMCPU pVCpu));
2449
2450 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
2451 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2452 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2453 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2454 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2455 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2456#ifdef VBOX_STRICT
2457 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2458#endif
2459 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2460 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVMCPU pVCpu));
2461 /** @} */
2462} PGMMODEDATA, *PPGMMODEDATA;
2463
2464
2465
2466/**
2467 * Converts a PGM pointer into a VM pointer.
2468 * @returns Pointer to the VM structure the PGM is part of.
2469 * @param pPGM Pointer to PGM instance data.
2470 */
2471#define PGM2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
2472
2473/**
2474 * PGM Data (part of VM)
2475 */
2476typedef struct PGM
2477{
2478 /** Offset to the VM structure. */
2479 RTINT offVM;
2480 /** Offset of the PGMCPU structure relative to VMCPU. */
2481 RTINT offVCpuPGM;
2482
2483 /** @cfgm{RamPreAlloc, boolean, false}
2484 * Indicates whether the base RAM should all be allocated before starting
2485 * the VM (default), or if it should be allocated when first written to.
2486 */
2487 bool fRamPreAlloc;
2488 /** Indicates whether write monitoring is currently in use.
2489 * This is used to prevent conflicts between live saving and page sharing
2490 * detection. */
2491 bool fPhysWriteMonitoringEngaged;
2492 /** Alignment padding. */
2493 bool afAlignment0[2];
2494
2495 /*
2496 * This will be redefined at least two more times before we're done, I'm sure.
2497 * The current code is only to get on with the coding.
2498 * - 2004-06-10: initial version, bird.
2499 * - 2004-07-02: 1st time, bird.
2500 * - 2004-10-18: 2nd time, bird.
2501 * - 2005-07-xx: 3rd time, bird.
2502 */
2503
2504 /** The host paging mode. (This is what SUPLib reports.) */
2505 SUPPAGINGMODE enmHostMode;
2506
2507 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2508 RCPTRTYPE(PX86PTE) paDynPageMap32BitPTEsGC;
2509 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2510 RCPTRTYPE(PX86PTEPAE) paDynPageMapPaePTEsGC;
2511
2512 /** 4 MB page mask; 32 or 36 bits depending on PSE-36 (identical for all VCPUs) */
2513 RTGCPHYS GCPhys4MBPSEMask;
2514
2515 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.
2516 * This is sorted by physical address and contains no overlapping ranges. */
2517 R3PTRTYPE(PPGMRAMRANGE) pRamRangesR3;
2518 /** R0 pointer corresponding to PGM::pRamRangesR3. */
2519 R0PTRTYPE(PPGMRAMRANGE) pRamRangesR0;
2520 /** RC pointer corresponding to PGM::pRamRangesR3. */
2521 RCPTRTYPE(PPGMRAMRANGE) pRamRangesRC;
2522 /** Generation ID for the RAM ranges. This member is incremented everytime a RAM
2523 * range is linked or unlinked. */
2524 uint32_t volatile idRamRangesGen;
2525
2526 /** Pointer to the list of ROM ranges - for R3.
2527 * This is sorted by physical address and contains no overlapping ranges. */
2528 R3PTRTYPE(PPGMROMRANGE) pRomRangesR3;
2529 /** R0 pointer corresponding to PGM::pRomRangesR3. */
2530 R0PTRTYPE(PPGMROMRANGE) pRomRangesR0;
2531 /** RC pointer corresponding to PGM::pRomRangesR3. */
2532 RCPTRTYPE(PPGMROMRANGE) pRomRangesRC;
2533#if HC_ARCH_BITS == 64
2534 /** Alignment padding. */
2535 RTRCPTR GCPtrPadding2;
2536#endif
2537
2538 /** Pointer to the list of MMIO2 ranges - for R3.
2539 * Registration order. */
2540 R3PTRTYPE(PPGMMMIO2RANGE) pMmio2RangesR3;
2541
2542 /** PGM offset based trees - R3 Ptr. */
2543 R3PTRTYPE(PPGMTREES) pTreesR3;
2544 /** PGM offset based trees - R0 Ptr. */
2545 R0PTRTYPE(PPGMTREES) pTreesR0;
2546 /** PGM offset based trees - RC Ptr. */
2547 RCPTRTYPE(PPGMTREES) pTreesRC;
2548
2549 /** Linked list of GC mappings - for RC.
2550 * The list is sorted ascending on address.
2551 */
2552 RCPTRTYPE(PPGMMAPPING) pMappingsRC;
2553 /** Linked list of GC mappings - for HC.
2554 * The list is sorted ascending on address.
2555 */
2556 R3PTRTYPE(PPGMMAPPING) pMappingsR3;
2557 /** Linked list of GC mappings - for R0.
2558 * The list is sorted ascending on address.
2559 */
2560 R0PTRTYPE(PPGMMAPPING) pMappingsR0;
2561
2562 /** Pointer to the 5 page CR3 content mapping.
2563 * The first page is always the CR3 (in some form) while the 4 other pages
2564 * are used of the PDs in PAE mode. */
2565 RTGCPTR GCPtrCR3Mapping;
2566#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
2567 uint32_t u32Alignment1;
2568#endif
2569
2570 /** Indicates that PGMR3FinalizeMappings has been called and that further
2571 * PGMR3MapIntermediate calls will be rejected. */
2572 bool fFinalizedMappings;
2573 /** If set no conflict checks are required. */
2574 bool fMappingsFixed;
2575 /** If set if restored as fixed but we were unable to re-fixate at the old
2576 * location because of room or address incompatibilities. */
2577 bool fMappingsFixedRestored;
2578 /** If set, then no mappings are put into the shadow page table.
2579 * Use pgmMapAreMappingsEnabled() instead of direct access. */
2580 bool fMappingsDisabled;
2581 /** Size of fixed mapping.
2582 * This is valid if either fMappingsFixed or fMappingsFixedRestored is set. */
2583 uint32_t cbMappingFixed;
2584 /** Base address (GC) of fixed mapping.
2585 * This is valid if either fMappingsFixed or fMappingsFixedRestored is set. */
2586 RTGCPTR GCPtrMappingFixed;
2587 /** The address of the previous RAM range mapping. */
2588 RTGCPTR GCPtrPrevRamRangeMapping;
2589
2590 /** @name Intermediate Context
2591 * @{ */
2592 /** Pointer to the intermediate page directory - Normal. */
2593 R3PTRTYPE(PX86PD) pInterPD;
2594 /** Pointer to the intermedate page tables - Normal.
2595 * There are two page tables, one for the identity mapping and one for
2596 * the host context mapping (of the core code). */
2597 R3PTRTYPE(PX86PT) apInterPTs[2];
2598 /** Pointer to the intermedate page tables - PAE. */
2599 R3PTRTYPE(PX86PTPAE) apInterPaePTs[2];
2600 /** Pointer to the intermedate page directory - PAE. */
2601 R3PTRTYPE(PX86PDPAE) apInterPaePDs[4];
2602 /** Pointer to the intermedate page directory - PAE. */
2603 R3PTRTYPE(PX86PDPT) pInterPaePDPT;
2604 /** Pointer to the intermedate page-map level 4 - AMD64. */
2605 R3PTRTYPE(PX86PML4) pInterPaePML4;
2606 /** Pointer to the intermedate page directory - AMD64. */
2607 R3PTRTYPE(PX86PDPT) pInterPaePDPT64;
2608 /** The Physical Address (HC) of the intermediate Page Directory - Normal. */
2609 RTHCPHYS HCPhysInterPD;
2610 /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */
2611 RTHCPHYS HCPhysInterPaePDPT;
2612 /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */
2613 RTHCPHYS HCPhysInterPaePML4;
2614 /** @} */
2615
2616 /** Base address of the dynamic page mapping area.
2617 * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
2618 */
2619 RCPTRTYPE(uint8_t *) pbDynPageMapBaseGC;
2620 /** The index of the last entry used in the dynamic page mapping area. */
2621 RTUINT iDynPageMapLast;
2622 /** Cache containing the last entries in the dynamic page mapping area.
2623 * The cache size is covering half of the mapping area. */
2624 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
2625 /** Keep a lock counter for the full (!) mapping area. */
2626 uint32_t aLockedDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)];
2627
2628 /** The address of the ring-0 mapping cache if we're making use of it. */
2629 RTR0PTR pvR0DynMapUsed;
2630#if HC_ARCH_BITS == 32
2631 /** Alignment padding that makes the next member start on a 8 byte boundrary. */
2632 uint32_t u32Alignment2;
2633#endif
2634
2635 /** PGM critical section.
2636 * This protects the physical & virtual access handlers, ram ranges,
2637 * and the page flag updating (some of it anyway).
2638 */
2639 PDMCRITSECT CritSect;
2640
2641 /** Pointer to SHW+GST mode data (function pointers).
2642 * The index into this table is made up from */
2643 R3PTRTYPE(PPGMMODEDATA) paModeData;
2644
2645 /** Shadow Page Pool - R3 Ptr. */
2646 R3PTRTYPE(PPGMPOOL) pPoolR3;
2647 /** Shadow Page Pool - R0 Ptr. */
2648 R0PTRTYPE(PPGMPOOL) pPoolR0;
2649 /** Shadow Page Pool - RC Ptr. */
2650 RCPTRTYPE(PPGMPOOL) pPoolRC;
2651
2652 /** We're not in a state which permits writes to guest memory.
2653 * (Only used in strict builds.) */
2654 bool fNoMorePhysWrites;
2655 /** Alignment padding that makes the next member start on a 8 byte boundrary. */
2656 bool afAlignment3[HC_ARCH_BITS == 32 ? 7: 3];
2657
2658 /**
2659 * Data associated with managing the ring-3 mappings of the allocation chunks.
2660 */
2661 struct
2662 {
2663 /** The chunk tree, ordered by chunk id. */
2664#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2665 R3PTRTYPE(PAVLU32NODECORE) pTree;
2666#else
2667 R3R0PTRTYPE(PAVLU32NODECORE) pTree;
2668#endif
2669 /** The chunk mapping TLB. */
2670 PGMCHUNKR3MAPTLB Tlb;
2671 /** The number of mapped chunks. */
2672 uint32_t c;
2673 /** The maximum number of mapped chunks.
2674 * @cfgm PGM/MaxRing3Chunks */
2675 uint32_t cMax;
2676 /** The current time. */
2677 uint32_t iNow;
2678 /** Number of pgmR3PhysChunkFindUnmapCandidate calls left to the next ageing. */
2679 uint32_t AgeingCountdown;
2680 } ChunkR3Map;
2681
2682 /**
2683 * The page mapping TLB for ring-3 and (for the time being) ring-0.
2684 */
2685 PGMPAGER3MAPTLB PhysTlbHC;
2686
2687 /** @name The zero page.
2688 * @{ */
2689 /** The host physical address of the zero page. */
2690 RTHCPHYS HCPhysZeroPg;
2691 /** The ring-3 mapping of the zero page. */
2692 RTR3PTR pvZeroPgR3;
2693 /** The ring-0 mapping of the zero page. */
2694 RTR0PTR pvZeroPgR0;
2695 /** The GC mapping of the zero page. */
2696 RTGCPTR pvZeroPgRC;
2697 /** @}*/
2698
2699 /** The number of handy pages. */
2700 uint32_t cHandyPages;
2701
2702 /** The number of large handy pages. */
2703 uint32_t cLargeHandyPages;
2704
2705 /**
2706 * Array of handy pages.
2707 *
2708 * This array is used in a two way communication between pgmPhysAllocPage
2709 * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as
2710 * an intermediary.
2711 *
2712 * The size of this array is important, see pgmPhysEnsureHandyPage for details.
2713 * (The current size of 32 pages, means 128 KB of handy memory.)
2714 */
2715 GMMPAGEDESC aHandyPages[PGM_HANDY_PAGES];
2716
2717 /**
2718 * Array of large handy pages. (currently size 1)
2719 *
2720 * This array is used in a two way communication between pgmPhysAllocLargePage
2721 * and GMMR0AllocateLargePage, with PGMR3PhysAllocateLargePage serving as
2722 * an intermediary.
2723 */
2724 GMMPAGEDESC aLargeHandyPage[1];
2725
2726 /**
2727 * Live save data.
2728 */
2729 struct
2730 {
2731 /** Per type statistics. */
2732 struct
2733 {
2734 /** The number of ready pages. */
2735 uint32_t cReadyPages;
2736 /** The number of dirty pages. */
2737 uint32_t cDirtyPages;
2738 /** The number of ready zero pages. */
2739 uint32_t cZeroPages;
2740 /** The number of write monitored pages. */
2741 uint32_t cMonitoredPages;
2742 } Rom,
2743 Mmio2,
2744 Ram;
2745 /** The number of ignored pages in the RAM ranges (i.e. MMIO, MMIO2 and ROM). */
2746 uint32_t cIgnoredPages;
2747 /** Indicates that a live save operation is active. */
2748 bool fActive;
2749 /** Padding. */
2750 bool afReserved[2];
2751 /** The next history index. */
2752 uint8_t iDirtyPagesHistory;
2753 /** History of the total amount of dirty pages. */
2754 uint32_t acDirtyPagesHistory[64];
2755 /** Short term dirty page average. */
2756 uint32_t cDirtyPagesShort;
2757 /** Long term dirty page average. */
2758 uint32_t cDirtyPagesLong;
2759 /** The number of saved pages. This is used to get some kind of estimate of the
2760 * link speed so we can decide when we're done. It is reset after the first
2761 * 7 passes so the speed estimate doesn't get inflated by the initial set of
2762 * zero pages. */
2763 uint64_t cSavedPages;
2764 /** The nanosecond timestamp when cSavedPages was 0. */
2765 uint64_t uSaveStartNS;
2766 /** Pages per second (for statistics). */
2767 uint32_t cPagesPerSecond;
2768 uint32_t cAlignment;
2769 } LiveSave;
2770
2771 /** @name Error injection.
2772 * @{ */
2773 /** Inject handy page allocation errors pretending we're completely out of
2774 * memory. */
2775 bool volatile fErrInjHandyPages;
2776 /** Padding. */
2777 bool afReserved[3];
2778 /** @} */
2779
2780 /** @name Release Statistics
2781 * @{ */
2782 uint32_t cAllPages; /**< The total number of pages. (Should be Private + Shared + Zero + Pure MMIO.) */
2783 uint32_t cPrivatePages; /**< The number of private pages. */
2784 uint32_t cSharedPages; /**< The number of shared pages. */
2785 uint32_t cReusedSharedPages; /**< The number of reused shared pages. */
2786 uint32_t cZeroPages; /**< The number of zero backed pages. */
2787 uint32_t cPureMmioPages; /**< The number of pure MMIO pages. */
2788 uint32_t cMonitoredPages; /**< The number of write monitored pages. */
2789 uint32_t cWrittenToPages; /**< The number of previously write monitored pages. */
2790 uint32_t cWriteLockedPages; /**< The number of write locked pages. */
2791 uint32_t cReadLockedPages; /**< The number of read locked pages. */
2792 uint32_t cBalloonedPages; /**< The number of ballooned pages. */
2793 uint32_t cMappedChunks; /**< Number of times we mapped a chunk. */
2794 uint32_t cUnmappedChunks; /**< Number of times we unmapped a chunk. */
2795/* uint32_t aAlignment4[1]; */
2796
2797 /** The number of times we were forced to change the hypervisor region location. */
2798 STAMCOUNTER cRelocations;
2799
2800 STAMCOUNTER StatLargePageAlloc; /**< The number of large pages we've allocated.*/
2801 STAMCOUNTER StatLargePageReused; /**< The number of large pages we've reused.*/
2802 STAMCOUNTER StatLargePageRefused; /**< The number of times we couldn't use a large page.*/
2803 STAMCOUNTER StatLargePageRecheck; /**< The number of times we rechecked a disabled large page.*/
2804 /** @} */
2805
2806#ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */
2807 /* R3 only: */
2808 STAMCOUNTER StatR3DetectedConflicts; /**< R3: Number of times PGMR3MapHasConflicts() detected a conflict. */
2809 STAMPROFILE StatR3ResolveConflict; /**< R3: pgmR3SyncPTResolveConflict() profiling (includes the entire relocation). */
2810
2811 STAMCOUNTER StatRZChunkR3MapTlbHits; /**< RC/R0: Ring-3/0 chunk mapper TLB hits. */
2812 STAMCOUNTER StatRZChunkR3MapTlbMisses; /**< RC/R0: Ring-3/0 chunk mapper TLB misses. */
2813 STAMCOUNTER StatRZPageMapTlbHits; /**< RC/R0: Ring-3/0 page mapper TLB hits. */
2814 STAMCOUNTER StatRZPageMapTlbMisses; /**< RC/R0: Ring-3/0 page mapper TLB misses. */
2815 STAMCOUNTER StatPageMapTlbFlushes; /**< ALL: Ring-3/0 page mapper TLB flushes. */
2816 STAMCOUNTER StatPageMapTlbFlushEntry; /**< ALL: Ring-3/0 page mapper TLB flushes. */
2817 STAMCOUNTER StatR3ChunkR3MapTlbHits; /**< R3: Ring-3/0 chunk mapper TLB hits. */
2818 STAMCOUNTER StatR3ChunkR3MapTlbMisses; /**< R3: Ring-3/0 chunk mapper TLB misses. */
2819 STAMCOUNTER StatR3PageMapTlbHits; /**< R3: Ring-3/0 page mapper TLB hits. */
2820 STAMCOUNTER StatR3PageMapTlbMisses; /**< R3: Ring-3/0 page mapper TLB misses. */
2821 STAMPROFILE StatRZSyncCR3HandlerVirtualReset; /**< RC/R0: Profiling of the virtual handler resets. */
2822 STAMPROFILE StatRZSyncCR3HandlerVirtualUpdate; /**< RC/R0: Profiling of the virtual handler updates. */
2823 STAMPROFILE StatR3SyncCR3HandlerVirtualReset; /**< R3: Profiling of the virtual handler resets. */
2824 STAMPROFILE StatR3SyncCR3HandlerVirtualUpdate; /**< R3: Profiling of the virtual handler updates. */
2825 STAMCOUNTER StatR3PhysHandlerReset; /**< R3: The number of times PGMHandlerPhysicalReset is called. */
2826 STAMCOUNTER StatRZPhysHandlerReset; /**< RC/R0: The number of times PGMHandlerPhysicalReset is called. */
2827 STAMPROFILE StatRZVirtHandlerSearchByPhys; /**< RC/R0: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2828 STAMPROFILE StatR3VirtHandlerSearchByPhys; /**< R3: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2829 STAMCOUNTER StatRZPageReplaceShared; /**< RC/R0: Times a shared page has been replaced by a private one. */
2830 STAMCOUNTER StatRZPageReplaceZero; /**< RC/R0: Times the zero page has been replaced by a private one. */
2831/// @todo STAMCOUNTER StatRZPageHandyAllocs; /**< RC/R0: The number of times we've executed GMMR3AllocateHandyPages. */
2832 STAMCOUNTER StatR3PageReplaceShared; /**< R3: Times a shared page has been replaced by a private one. */
2833 STAMCOUNTER StatR3PageReplaceZero; /**< R3: Times the zero page has been replaced by a private one. */
2834/// @todo STAMCOUNTER StatR3PageHandyAllocs; /**< R3: The number of times we've executed GMMR3AllocateHandyPages. */
2835
2836 /* RC only: */
2837 STAMCOUNTER StatRCDynMapCacheMisses; /**< RC: The number of dynamic page mapping cache misses */
2838 STAMCOUNTER StatRCDynMapCacheHits; /**< RC: The number of dynamic page mapping cache hits */
2839 STAMCOUNTER StatRCInvlPgConflict; /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */
2840 STAMCOUNTER StatRCInvlPgSyncMonCR3; /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */
2841
2842 STAMCOUNTER StatRZPhysRead;
2843 STAMCOUNTER StatRZPhysReadBytes;
2844 STAMCOUNTER StatRZPhysWrite;
2845 STAMCOUNTER StatRZPhysWriteBytes;
2846 STAMCOUNTER StatR3PhysRead;
2847 STAMCOUNTER StatR3PhysReadBytes;
2848 STAMCOUNTER StatR3PhysWrite;
2849 STAMCOUNTER StatR3PhysWriteBytes;
2850 STAMCOUNTER StatRCPhysRead;
2851 STAMCOUNTER StatRCPhysReadBytes;
2852 STAMCOUNTER StatRCPhysWrite;
2853 STAMCOUNTER StatRCPhysWriteBytes;
2854
2855 STAMCOUNTER StatRZPhysSimpleRead;
2856 STAMCOUNTER StatRZPhysSimpleReadBytes;
2857 STAMCOUNTER StatRZPhysSimpleWrite;
2858 STAMCOUNTER StatRZPhysSimpleWriteBytes;
2859 STAMCOUNTER StatR3PhysSimpleRead;
2860 STAMCOUNTER StatR3PhysSimpleReadBytes;
2861 STAMCOUNTER StatR3PhysSimpleWrite;
2862 STAMCOUNTER StatR3PhysSimpleWriteBytes;
2863 STAMCOUNTER StatRCPhysSimpleRead;
2864 STAMCOUNTER StatRCPhysSimpleReadBytes;
2865 STAMCOUNTER StatRCPhysSimpleWrite;
2866 STAMCOUNTER StatRCPhysSimpleWriteBytes;
2867
2868 STAMCOUNTER StatTrackVirgin; /**< The number of first time shadowings. */
2869 STAMCOUNTER StatTrackAliased; /**< The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
2870 STAMCOUNTER StatTrackAliasedMany; /**< The number of times we're tracking using cRef2. */
2871 STAMCOUNTER StatTrackAliasedLots; /**< The number of times we're hitting pages which has overflowed cRef2. */
2872 STAMCOUNTER StatTrackNoExtentsLeft; /**< The number of times the extent list was exhausted. */
2873 STAMCOUNTER StatTrackOverflows; /**< The number of times the extent list grows to long. */
2874 STAMPROFILE StatTrackDeref; /**< Profiling of SyncPageWorkerTrackDeref (expensive). */
2875
2876 /** Time spent by the host OS for large page allocation. */
2877 STAMPROFILE StatAllocLargePage;
2878 /** Time spent clearing the newly allocated large pages. */
2879 STAMPROFILE StatClearLargePage;
2880 /** pgmPhysIsValidLargePage profiling - R3 */
2881 STAMPROFILE StatR3IsValidLargePage;
2882 /** pgmPhysIsValidLargePage profiling - RZ*/
2883 STAMPROFILE StatRZIsValidLargePage;
2884#endif
2885} PGM;
2886#ifndef IN_TSTVMSTRUCTGC /* HACK */
2887AssertCompileMemberAlignment(PGM, paDynPageMap32BitPTEsGC, 8);
2888AssertCompileMemberAlignment(PGM, GCPtrMappingFixed, sizeof(RTGCPTR));
2889AssertCompileMemberAlignment(PGM, HCPhysInterPD, 8);
2890AssertCompileMemberAlignment(PGM, aHCPhysDynPageMapCache, 8);
2891AssertCompileMemberAlignment(PGM, CritSect, 8);
2892AssertCompileMemberAlignment(PGM, ChunkR3Map, 8);
2893AssertCompileMemberAlignment(PGM, PhysTlbHC, 8);
2894AssertCompileMemberAlignment(PGM, HCPhysZeroPg, 8);
2895AssertCompileMemberAlignment(PGM, aHandyPages, 8);
2896AssertCompileMemberAlignment(PGM, cRelocations, 8);
2897#endif /* !IN_TSTVMSTRUCTGC */
2898/** Pointer to the PGM instance data. */
2899typedef PGM *PPGM;
2900
2901
2902/**
2903 * Converts a PGMCPU pointer into a VM pointer.
2904 * @returns Pointer to the VM structure the PGM is part of.
2905 * @param pPGM Pointer to PGMCPU instance data.
2906 */
2907#define PGMCPU2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
2908
2909/**
2910 * Converts a PGMCPU pointer into a PGM pointer.
2911 * @returns Pointer to the VM structure the PGM is part of.
2912 * @param pPGM Pointer to PGMCPU instance data.
2913 */
2914#define PGMCPU2PGM(pPGMCpu) ( (PPGM)((char*)pPGMCpu - pPGMCpu->offPGM) )
2915
2916/**
2917 * PGMCPU Data (part of VMCPU).
2918 */
2919typedef struct PGMCPU
2920{
2921 /** Offset to the VM structure. */
2922 RTINT offVM;
2923 /** Offset to the VMCPU structure. */
2924 RTINT offVCpu;
2925 /** Offset of the PGM structure relative to VMCPU. */
2926 RTINT offPGM;
2927 RTINT uPadding0; /**< structure size alignment. */
2928
2929#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2930 /** Automatically tracked physical memory mapping set.
2931 * Ring-0 and strict raw-mode builds. */
2932 PGMMAPSET AutoSet;
2933#endif
2934
2935 /** A20 gate mask.
2936 * Our current approach to A20 emulation is to let REM do it and don't bother
2937 * anywhere else. The interesting Guests will be operating with it enabled anyway.
2938 * But whould need arrise, we'll subject physical addresses to this mask. */
2939 RTGCPHYS GCPhysA20Mask;
2940 /** A20 gate state - boolean! */
2941 bool fA20Enabled;
2942
2943 /** What needs syncing (PGM_SYNC_*).
2944 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
2945 * PGMFlushTLB, and PGMR3Load. */
2946 RTUINT fSyncFlags;
2947
2948 /** The shadow paging mode. */
2949 PGMMODE enmShadowMode;
2950 /** The guest paging mode. */
2951 PGMMODE enmGuestMode;
2952
2953 /** The current physical address representing in the guest CR3 register. */
2954 RTGCPHYS GCPhysCR3;
2955
2956 /** @name 32-bit Guest Paging.
2957 * @{ */
2958 /** The guest's page directory, R3 pointer. */
2959 R3PTRTYPE(PX86PD) pGst32BitPdR3;
2960#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2961 /** The guest's page directory, R0 pointer. */
2962 R0PTRTYPE(PX86PD) pGst32BitPdR0;
2963#endif
2964 /** The guest's page directory, static RC mapping. */
2965 RCPTRTYPE(PX86PD) pGst32BitPdRC;
2966 /** @} */
2967
2968 /** @name PAE Guest Paging.
2969 * @{ */
2970 /** The guest's page directory pointer table, static RC mapping. */
2971 RCPTRTYPE(PX86PDPT) pGstPaePdptRC;
2972 /** The guest's page directory pointer table, R3 pointer. */
2973 R3PTRTYPE(PX86PDPT) pGstPaePdptR3;
2974#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2975 /** The guest's page directory pointer table, R0 pointer. */
2976 R0PTRTYPE(PX86PDPT) pGstPaePdptR0;
2977#endif
2978
2979 /** The guest's page directories, R3 pointers.
2980 * These are individual pointers and don't have to be adjecent.
2981 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
2982 R3PTRTYPE(PX86PDPAE) apGstPaePDsR3[4];
2983 /** The guest's page directories, R0 pointers.
2984 * Same restrictions as apGstPaePDsR3. */
2985#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2986 R0PTRTYPE(PX86PDPAE) apGstPaePDsR0[4];
2987#endif
2988 /** The guest's page directories, static GC mapping.
2989 * Unlike the R3/R0 array the first entry can be accessed as a 2048 entry PD.
2990 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
2991 RCPTRTYPE(PX86PDPAE) apGstPaePDsRC[4];
2992 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
2993 RTGCPHYS aGCPhysGstPaePDs[4];
2994 /** The physical addresses of the monitored guest page directories (PAE). */
2995 RTGCPHYS aGCPhysGstPaePDsMonitored[4];
2996 /** @} */
2997
2998 /** @name AMD64 Guest Paging.
2999 * @{ */
3000 /** The guest's page directory pointer table, R3 pointer. */
3001 R3PTRTYPE(PX86PML4) pGstAmd64Pml4R3;
3002#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
3003 /** The guest's page directory pointer table, R0 pointer. */
3004 R0PTRTYPE(PX86PML4) pGstAmd64Pml4R0;
3005#else
3006 RTR0PTR alignment6b; /**< alignment equalizer. */
3007#endif
3008 /** @} */
3009
3010 /** Pointer to the page of the current active CR3 - R3 Ptr. */
3011 R3PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R3;
3012 /** Pointer to the page of the current active CR3 - R0 Ptr. */
3013 R0PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R0;
3014 /** Pointer to the page of the current active CR3 - RC Ptr. */
3015 RCPTRTYPE(PPGMPOOLPAGE) pShwPageCR3RC;
3016 /* The shadow page pool index of the user table as specified during allocation; useful for freeing root pages */
3017 uint32_t iShwUser;
3018 /* The index into the user table (shadowed) as specified during allocation; useful for freeing root pages. */
3019 uint32_t iShwUserTable;
3020# if HC_ARCH_BITS == 64
3021 RTRCPTR alignment6; /**< structure size alignment. */
3022# endif
3023 /** @} */
3024
3025 /** @name Function pointers for Shadow paging.
3026 * @{
3027 */
3028 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
3029 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVMCPU pVCpu));
3030 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
3031 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
3032
3033 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
3034 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
3035
3036 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
3037 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags));
3038
3039 /** @} */
3040
3041 /** @name Function pointers for Guest paging.
3042 * @{
3043 */
3044 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
3045 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVMCPU pVCpu));
3046 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
3047 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3048 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
3049 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
3050 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3051 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
3052#if HC_ARCH_BITS == 64
3053 RTRCPTR alignment3; /**< structure size alignment. */
3054#endif
3055
3056 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
3057 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3058 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
3059 /** @} */
3060
3061 /** @name Function pointers for Both Shadow and Guest paging.
3062 * @{
3063 */
3064 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
3065 /* no pfnR3BthTrap0eHandler */
3066 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3067 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
3068 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
3069 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3070 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
3071 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
3072 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
3073 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVMCPU pVCpu));
3074
3075 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
3076 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3077 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
3078 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
3079 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3080 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
3081 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
3082 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
3083 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVMCPU pVCpu));
3084
3085 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
3086 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3087 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
3088 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
3089 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3090 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
3091 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
3092 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
3093 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVMCPU pVCpu));
3094 RTRCPTR alignment2; /**< structure size alignment. */
3095 /** @} */
3096
3097 /** For saving stack space, the disassembler state is allocated here instead of
3098 * on the stack.
3099 * @note The DISCPUSTATE structure is not R3/R0/RZ clean! */
3100 union
3101 {
3102 /** The disassembler scratch space. */
3103 DISCPUSTATE DisState;
3104 /** Padding. */
3105 uint8_t abDisStatePadding[DISCPUSTATE_PADDING_SIZE];
3106 };
3107
3108 /* Count the number of pgm pool access handler calls. */
3109 uint64_t cPoolAccessHandler;
3110
3111 /** @name Release Statistics
3112 * @{ */
3113 /** The number of times the guest has switched mode since last reset or statistics reset. */
3114 STAMCOUNTER cGuestModeChanges;
3115 /** @} */
3116
3117#ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */
3118 /** @name Statistics
3119 * @{ */
3120 /** RC: Which statistic this \#PF should be attributed to. */
3121 RCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionRC;
3122 RTRCPTR padding0;
3123 /** R0: Which statistic this \#PF should be attributed to. */
3124 R0PTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionR0;
3125 RTR0PTR padding1;
3126
3127 /* Common */
3128 STAMCOUNTER StatSyncPtPD[X86_PG_ENTRIES]; /**< SyncPT - PD distribution. */
3129 STAMCOUNTER StatSyncPagePD[X86_PG_ENTRIES]; /**< SyncPage - PD distribution. */
3130
3131 /* R0 only: */
3132 STAMCOUNTER StatR0DynMapMigrateInvlPg; /**< R0: invlpg in PGMDynMapMigrateAutoSet. */
3133 STAMPROFILE StatR0DynMapGCPageInl; /**< R0: Calls to pgmR0DynMapGCPageInlined. */
3134 STAMCOUNTER StatR0DynMapGCPageInlHits; /**< R0: Hash table lookup hits. */
3135 STAMCOUNTER StatR0DynMapGCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
3136 STAMCOUNTER StatR0DynMapGCPageInlRamHits; /**< R0: 1st ram range hits. */
3137 STAMCOUNTER StatR0DynMapGCPageInlRamMisses; /**< R0: 1st ram range misses, takes slow path. */
3138 STAMPROFILE StatR0DynMapHCPageInl; /**< R0: Calls to pgmR0DynMapHCPageInlined. */
3139 STAMCOUNTER StatR0DynMapHCPageInlHits; /**< R0: Hash table lookup hits. */
3140 STAMCOUNTER StatR0DynMapHCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
3141 STAMPROFILE StatR0DynMapHCPage; /**< R0: Calls to PGMDynMapHCPage. */
3142 STAMCOUNTER StatR0DynMapSetOptimize; /**< R0: Calls to pgmDynMapOptimizeAutoSet. */
3143 STAMCOUNTER StatR0DynMapSetSearchFlushes; /**< R0: Set search restorting to subset flushes. */
3144 STAMCOUNTER StatR0DynMapSetSearchHits; /**< R0: Set search hits. */
3145 STAMCOUNTER StatR0DynMapSetSearchMisses; /**< R0: Set search misses. */
3146 STAMCOUNTER StatR0DynMapPage; /**< R0: Calls to pgmR0DynMapPage. */
3147 STAMCOUNTER StatR0DynMapPageHits0; /**< R0: Hits at iPage+0. */
3148 STAMCOUNTER StatR0DynMapPageHits1; /**< R0: Hits at iPage+1. */
3149 STAMCOUNTER StatR0DynMapPageHits2; /**< R0: Hits at iPage+2. */
3150 STAMCOUNTER StatR0DynMapPageInvlPg; /**< R0: invlpg. */
3151 STAMCOUNTER StatR0DynMapPageSlow; /**< R0: Calls to pgmR0DynMapPageSlow. */
3152 STAMCOUNTER StatR0DynMapPageSlowLoopHits; /**< R0: Hits in the pgmR0DynMapPageSlow search loop. */
3153 STAMCOUNTER StatR0DynMapPageSlowLoopMisses; /**< R0: Misses in the pgmR0DynMapPageSlow search loop. */
3154 //STAMCOUNTER StatR0DynMapPageSlowLostHits; /**< R0: Lost hits. */
3155 STAMCOUNTER StatR0DynMapSubsets; /**< R0: Times PGMDynMapPushAutoSubset was called. */
3156 STAMCOUNTER StatR0DynMapPopFlushes; /**< R0: Times PGMDynMapPopAutoSubset flushes the subset. */
3157 STAMCOUNTER aStatR0DynMapSetSize[11]; /**< R0: Set size distribution. */
3158
3159 /* RZ only: */
3160 STAMPROFILE StatRZTrap0e; /**< RC/R0: PGMTrap0eHandler() profiling. */
3161 STAMPROFILE StatRZTrap0eTimeCheckPageFault;
3162 STAMPROFILE StatRZTrap0eTimeSyncPT;
3163 STAMPROFILE StatRZTrap0eTimeMapping;
3164 STAMPROFILE StatRZTrap0eTimeOutOfSync;
3165 STAMPROFILE StatRZTrap0eTimeHandlers;
3166 STAMPROFILE StatRZTrap0eTime2CSAM; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CSAM. */
3167 STAMPROFILE StatRZTrap0eTime2DirtyAndAccessed; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
3168 STAMPROFILE StatRZTrap0eTime2GuestTrap; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a guest trap. */
3169 STAMPROFILE StatRZTrap0eTime2HndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a physical handler. */
3170 STAMPROFILE StatRZTrap0eTime2HndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a virtual handler. */
3171 STAMPROFILE StatRZTrap0eTime2HndUnhandled; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
3172 STAMPROFILE StatRZTrap0eTime2Misc; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is not known. */
3173 STAMPROFILE StatRZTrap0eTime2OutOfSync; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
3174 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
3175 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
3176 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndObs; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
3177 STAMPROFILE StatRZTrap0eTime2SyncPT; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
3178 STAMCOUNTER StatRZTrap0eConflicts; /**< RC/R0: The number of times \#PF was caused by an undetected conflict. */
3179 STAMCOUNTER StatRZTrap0eHandlersMapping; /**< RC/R0: Number of traps due to access handlers in mappings. */
3180 STAMCOUNTER StatRZTrap0eHandlersOutOfSync; /**< RC/R0: Number of out-of-sync handled pages. */
3181 STAMCOUNTER StatRZTrap0eHandlersPhysical; /**< RC/R0: Number of traps due to physical access handlers. */
3182 STAMCOUNTER StatRZTrap0eHandlersVirtual; /**< RC/R0: Number of traps due to virtual access handlers. */
3183 STAMCOUNTER StatRZTrap0eHandlersVirtualByPhys; /**< RC/R0: Number of traps due to virtual access handlers found by physical address. */
3184 STAMCOUNTER StatRZTrap0eHandlersVirtualUnmarked;/**< RC/R0: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
3185 STAMCOUNTER StatRZTrap0eHandlersUnhandled; /**< RC/R0: Number of traps due to access outside range of monitored page(s). */
3186 STAMCOUNTER StatRZTrap0eHandlersInvalid; /**< RC/R0: Number of traps due to access to invalid physical memory. */
3187 STAMCOUNTER StatRZTrap0eUSNotPresentRead; /**< RC/R0: \#PF err kind */
3188 STAMCOUNTER StatRZTrap0eUSNotPresentWrite; /**< RC/R0: \#PF err kind */
3189 STAMCOUNTER StatRZTrap0eUSWrite; /**< RC/R0: \#PF err kind */
3190 STAMCOUNTER StatRZTrap0eUSReserved; /**< RC/R0: \#PF err kind */
3191 STAMCOUNTER StatRZTrap0eUSNXE; /**< RC/R0: \#PF err kind */
3192 STAMCOUNTER StatRZTrap0eUSRead; /**< RC/R0: \#PF err kind */
3193 STAMCOUNTER StatRZTrap0eSVNotPresentRead; /**< RC/R0: \#PF err kind */
3194 STAMCOUNTER StatRZTrap0eSVNotPresentWrite; /**< RC/R0: \#PF err kind */
3195 STAMCOUNTER StatRZTrap0eSVWrite; /**< RC/R0: \#PF err kind */
3196 STAMCOUNTER StatRZTrap0eSVReserved; /**< RC/R0: \#PF err kind */
3197 STAMCOUNTER StatRZTrap0eSNXE; /**< RC/R0: \#PF err kind */
3198 STAMCOUNTER StatRZTrap0eGuestPF; /**< RC/R0: Real guest \#PFs. */
3199 STAMCOUNTER StatRZTrap0eGuestPFUnh; /**< RC/R0: Real guest \#PF ending up at the end of the \#PF code. */
3200 STAMCOUNTER StatRZTrap0eGuestPFMapping; /**< RC/R0: Real guest \#PF to HMA or other mapping. */
3201 STAMCOUNTER StatRZTrap0eWPEmulInRZ; /**< RC/R0: WP=0 virtualization trap, handled. */
3202 STAMCOUNTER StatRZTrap0eWPEmulToR3; /**< RC/R0: WP=0 virtualization trap, chickened out. */
3203 STAMCOUNTER StatRZTrap0ePD[X86_PG_ENTRIES]; /**< RC/R0: PD distribution of the \#PFs. */
3204 STAMCOUNTER StatRZGuestCR3WriteHandled; /**< RC/R0: The number of times WriteHandlerCR3() was successfully called. */
3205 STAMCOUNTER StatRZGuestCR3WriteUnhandled; /**< RC/R0: The number of times WriteHandlerCR3() was called and we had to fall back to the recompiler. */
3206 STAMCOUNTER StatRZGuestCR3WriteConflict; /**< RC/R0: The number of times WriteHandlerCR3() was called and a conflict was detected. */
3207 STAMCOUNTER StatRZGuestROMWriteHandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was successfully called. */
3208 STAMCOUNTER StatRZGuestROMWriteUnhandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was called and we had to fall back to the recompiler */
3209
3210 /* HC - R3 and (maybe) R0: */
3211
3212 /* RZ & R3: */
3213 STAMPROFILE StatRZSyncCR3; /**< RC/R0: PGMSyncCR3() profiling. */
3214 STAMPROFILE StatRZSyncCR3Handlers; /**< RC/R0: Profiling of the PGMSyncCR3() update handler section. */
3215 STAMCOUNTER StatRZSyncCR3Global; /**< RC/R0: The number of global CR3 syncs. */
3216 STAMCOUNTER StatRZSyncCR3NotGlobal; /**< RC/R0: The number of non-global CR3 syncs. */
3217 STAMCOUNTER StatRZSyncCR3DstCacheHit; /**< RC/R0: The number of times we got some kind of cache hit on a page table. */
3218 STAMCOUNTER StatRZSyncCR3DstFreed; /**< RC/R0: The number of times we've had to free a shadow entry. */
3219 STAMCOUNTER StatRZSyncCR3DstFreedSrcNP; /**< RC/R0: The number of times we've had to free a shadow entry for which the source entry was not present. */
3220 STAMCOUNTER StatRZSyncCR3DstNotPresent; /**< RC/R0: The number of times we've encountered a not present shadow entry for a present guest entry. */
3221 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPD; /**< RC/R0: The number of times a global page directory wasn't flushed. */
3222 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPT; /**< RC/R0: The number of times a page table with only global entries wasn't flushed. */
3223 STAMPROFILE StatRZSyncPT; /**< RC/R0: PGMSyncPT() profiling. */
3224 STAMCOUNTER StatRZSyncPTFailed; /**< RC/R0: The number of times PGMSyncPT() failed. */
3225 STAMCOUNTER StatRZSyncPT4K; /**< RC/R0: Number of 4KB syncs. */
3226 STAMCOUNTER StatRZSyncPT4M; /**< RC/R0: Number of 4MB syncs. */
3227 STAMCOUNTER StatRZSyncPagePDNAs; /**< RC/R0: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
3228 STAMCOUNTER StatRZSyncPagePDOutOfSync; /**< RC/R0: The number of time we've encountered an out-of-sync PD in SyncPage. */
3229 STAMCOUNTER StatRZAccessedPage; /**< RC/R0: The number of pages marked not present for accessed bit emulation. */
3230 STAMPROFILE StatRZDirtyBitTracking; /**< RC/R0: Profiling the dirty bit tracking in CheckPageFault().. */
3231 STAMCOUNTER StatRZDirtyPage; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
3232 STAMCOUNTER StatRZDirtyPageBig; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
3233 STAMCOUNTER StatRZDirtyPageSkipped; /**< RC/R0: The number of pages already dirty or readonly. */
3234 STAMCOUNTER StatRZDirtyPageTrap; /**< RC/R0: The number of traps generated for dirty bit tracking. */
3235 STAMCOUNTER StatRZDirtyPageStale; /**< RC/R0: The number of traps generated for dirty bit tracking. (stale tlb entries) */
3236 STAMCOUNTER StatRZDirtyTrackRealPF; /**< RC/R0: The number of real pages faults during dirty bit tracking. */
3237 STAMCOUNTER StatRZDirtiedPage; /**< RC/R0: The number of pages marked dirty because of write accesses. */
3238 STAMCOUNTER StatRZPageAlreadyDirty; /**< RC/R0: The number of pages already marked dirty because of write accesses. */
3239 STAMPROFILE StatRZInvalidatePage; /**< RC/R0: PGMInvalidatePage() profiling. */
3240 STAMCOUNTER StatRZInvalidatePage4KBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4KB page. */
3241 STAMCOUNTER StatRZInvalidatePage4MBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4MB page. */
3242 STAMCOUNTER StatRZInvalidatePage4MBPagesSkip; /**< RC/R0: The number of times PGMInvalidatePage() skipped a 4MB page. */
3243 STAMCOUNTER StatRZInvalidatePagePDMappings; /**< RC/R0: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
3244 STAMCOUNTER StatRZInvalidatePagePDNAs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
3245 STAMCOUNTER StatRZInvalidatePagePDNPs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not present page directory. */
3246 STAMCOUNTER StatRZInvalidatePagePDOutOfSync; /**< RC/R0: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
3247 STAMCOUNTER StatRZInvalidatePageSkipped; /**< RC/R0: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
3248 STAMCOUNTER StatRZPageOutOfSyncUser; /**< RC/R0: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
3249 STAMCOUNTER StatRZPageOutOfSyncSupervisor; /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
3250 STAMCOUNTER StatRZPageOutOfSyncUserWrite; /**< RC/R0: The number of times user page is out of sync was detected in \#PF. */
3251 STAMCOUNTER StatRZPageOutOfSyncSupervisorWrite; /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF. */
3252 STAMCOUNTER StatRZPageOutOfSyncBallloon; /**< RC/R0: The number of times a ballooned page was accessed (read). */
3253 STAMPROFILE StatRZPrefetch; /**< RC/R0: PGMPrefetchPage. */
3254 STAMPROFILE StatRZFlushTLB; /**< RC/R0: Profiling of the PGMFlushTLB() body. */
3255 STAMCOUNTER StatRZFlushTLBNewCR3; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
3256 STAMCOUNTER StatRZFlushTLBNewCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
3257 STAMCOUNTER StatRZFlushTLBSameCR3; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
3258 STAMCOUNTER StatRZFlushTLBSameCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
3259 STAMPROFILE StatRZGstModifyPage; /**< RC/R0: Profiling of the PGMGstModifyPage() body */
3260
3261 STAMPROFILE StatR3SyncCR3; /**< R3: PGMSyncCR3() profiling. */
3262 STAMPROFILE StatR3SyncCR3Handlers; /**< R3: Profiling of the PGMSyncCR3() update handler section. */
3263 STAMCOUNTER StatR3SyncCR3Global; /**< R3: The number of global CR3 syncs. */
3264 STAMCOUNTER StatR3SyncCR3NotGlobal; /**< R3: The number of non-global CR3 syncs. */
3265 STAMCOUNTER StatR3SyncCR3DstFreed; /**< R3: The number of times we've had to free a shadow entry. */
3266 STAMCOUNTER StatR3SyncCR3DstFreedSrcNP; /**< R3: The number of times we've had to free a shadow entry for which the source entry was not present. */
3267 STAMCOUNTER StatR3SyncCR3DstNotPresent; /**< R3: The number of times we've encountered a not present shadow entry for a present guest entry. */
3268 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPD; /**< R3: The number of times a global page directory wasn't flushed. */
3269 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPT; /**< R3: The number of times a page table with only global entries wasn't flushed. */
3270 STAMCOUNTER StatR3SyncCR3DstCacheHit; /**< R3: The number of times we got some kind of cache hit on a page table. */
3271 STAMPROFILE StatR3SyncPT; /**< R3: PGMSyncPT() profiling. */
3272 STAMCOUNTER StatR3SyncPTFailed; /**< R3: The number of times PGMSyncPT() failed. */
3273 STAMCOUNTER StatR3SyncPT4K; /**< R3: Number of 4KB syncs. */
3274 STAMCOUNTER StatR3SyncPT4M; /**< R3: Number of 4MB syncs. */
3275 STAMCOUNTER StatR3SyncPagePDNAs; /**< R3: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
3276 STAMCOUNTER StatR3SyncPagePDOutOfSync; /**< R3: The number of time we've encountered an out-of-sync PD in SyncPage. */
3277 STAMCOUNTER StatR3AccessedPage; /**< R3: The number of pages marked not present for accessed bit emulation. */
3278 STAMPROFILE StatR3DirtyBitTracking; /**< R3: Profiling the dirty bit tracking in CheckPageFault(). */
3279 STAMCOUNTER StatR3DirtyPage; /**< R3: The number of pages marked read-only for dirty bit tracking. */
3280 STAMCOUNTER StatR3DirtyPageBig; /**< R3: The number of pages marked read-only for dirty bit tracking. */
3281 STAMCOUNTER StatR3DirtyPageSkipped; /**< R3: The number of pages already dirty or readonly. */
3282 STAMCOUNTER StatR3DirtyPageTrap; /**< R3: The number of traps generated for dirty bit tracking. */
3283 STAMCOUNTER StatR3DirtyTrackRealPF; /**< R3: The number of real pages faults during dirty bit tracking. */
3284 STAMCOUNTER StatR3DirtiedPage; /**< R3: The number of pages marked dirty because of write accesses. */
3285 STAMCOUNTER StatR3PageAlreadyDirty; /**< R3: The number of pages already marked dirty because of write accesses. */
3286 STAMPROFILE StatR3InvalidatePage; /**< R3: PGMInvalidatePage() profiling. */
3287 STAMCOUNTER StatR3InvalidatePage4KBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4KB page. */
3288 STAMCOUNTER StatR3InvalidatePage4MBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4MB page. */
3289 STAMCOUNTER StatR3InvalidatePage4MBPagesSkip; /**< R3: The number of times PGMInvalidatePage() skipped a 4MB page. */
3290 STAMCOUNTER StatR3InvalidatePagePDNAs; /**< R3: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
3291 STAMCOUNTER StatR3InvalidatePagePDNPs; /**< R3: The number of times PGMInvalidatePage() was called for a not present page directory. */
3292 STAMCOUNTER StatR3InvalidatePagePDMappings; /**< R3: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
3293 STAMCOUNTER StatR3InvalidatePagePDOutOfSync; /**< R3: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
3294 STAMCOUNTER StatR3InvalidatePageSkipped; /**< R3: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
3295 STAMCOUNTER StatR3PageOutOfSyncUser; /**< R3: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
3296 STAMCOUNTER StatR3PageOutOfSyncSupervisor; /**< R3: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
3297 STAMCOUNTER StatR3PageOutOfSyncUserWrite; /**< R3: The number of times user page is out of sync was detected in \#PF. */
3298 STAMCOUNTER StatR3PageOutOfSyncSupervisorWrite; /**< R3: The number of times supervisor page is out of sync was detected in in \#PF. */
3299 STAMCOUNTER StatR3PageOutOfSyncBallloon; /**< R3: The number of times a ballooned page was accessed (read). */
3300 STAMPROFILE StatR3Prefetch; /**< R3: PGMPrefetchPage. */
3301 STAMPROFILE StatR3FlushTLB; /**< R3: Profiling of the PGMFlushTLB() body. */
3302 STAMCOUNTER StatR3FlushTLBNewCR3; /**< R3: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
3303 STAMCOUNTER StatR3FlushTLBNewCR3Global; /**< R3: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
3304 STAMCOUNTER StatR3FlushTLBSameCR3; /**< R3: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
3305 STAMCOUNTER StatR3FlushTLBSameCR3Global; /**< R3: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
3306 STAMPROFILE StatR3GstModifyPage; /**< R3: Profiling of the PGMGstModifyPage() body */
3307 /** @} */
3308#endif /* VBOX_WITH_STATISTICS */
3309} PGMCPU;
3310/** Pointer to the per-cpu PGM data. */
3311typedef PGMCPU *PPGMCPU;
3312
3313
3314/** @name PGM::fSyncFlags Flags
3315 * @{
3316 */
3317/** Updates the virtual access handler state bit in PGMPAGE. */
3318#define PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL RT_BIT(0)
3319/** Always sync CR3. */
3320#define PGM_SYNC_ALWAYS RT_BIT(1)
3321/** Check monitoring on next CR3 (re)load and invalidate page.
3322 * @todo This is obsolete now. Remove after 2.2.0 is branched off. */
3323#define PGM_SYNC_MONITOR_CR3 RT_BIT(2)
3324/** Check guest mapping in SyncCR3. */
3325#define PGM_SYNC_MAP_CR3 RT_BIT(3)
3326/** Clear the page pool (a light weight flush). */
3327#define PGM_SYNC_CLEAR_PGM_POOL_BIT 8
3328#define PGM_SYNC_CLEAR_PGM_POOL RT_BIT(PGM_SYNC_CLEAR_PGM_POOL_BIT)
3329/** @} */
3330
3331
3332RT_C_DECLS_BEGIN
3333
3334int pgmLock(PVM pVM);
3335void pgmUnlock(PVM pVM);
3336
3337int pgmR3MappingsFixInternal(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb);
3338int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping);
3339int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping);
3340PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
3341int pgmMapResolveConflicts(PVM pVM);
3342DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
3343
3344void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
3345bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys);
3346void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage);
3347int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
3348DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
3349#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
3350void pgmHandlerVirtualDumpPhysPages(PVM pVM);
3351#else
3352# define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
3353#endif
3354DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
3355int pgmR3InitSavedState(PVM pVM, uint64_t cbRam);
3356
3357int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3358int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys);
3359int pgmPhysIsValidLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage);
3360int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
3361int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3362void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage);
3363int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3364int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
3365int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
3366int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv);
3367int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv);
3368int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
3369int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv);
3370VMMDECL(int) pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
3371VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
3372int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys);
3373
3374#ifdef IN_RING3
3375void pgmR3PhysRelinkRamRanges(PVM pVM);
3376int pgmR3PhysRamPreAllocate(PVM pVM);
3377int pgmR3PhysRamReset(PVM pVM);
3378int pgmR3PhysRomReset(PVM pVM);
3379int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
3380int pgmR3PhysRamTerm(PVM pVM);
3381
3382int pgmR3PoolInit(PVM pVM);
3383void pgmR3PoolRelocate(PVM pVM);
3384void pgmR3PoolResetUnpluggedCpu(PVM pVM, PVMCPU pVCpu);
3385void pgmR3PoolReset(PVM pVM);
3386void pgmR3PoolClearAll(PVM pVM, bool fFlushRemTlb);
3387DECLCALLBACK(VBOXSTRICTRC) pgmR3PoolClearAllRendezvous(PVM pVM, PVMCPU pVCpu, void *fpvFlushRemTbl);
3388
3389#endif /* IN_RING3 */
3390#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3391int pgmR0DynMapHCPageCommon(PVM pVM, PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv);
3392#endif
3393int pgmPoolAllocEx(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false);
3394
3395DECLINLINE(int) pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false)
3396{
3397 return pgmPoolAllocEx(pVM, GCPhys, enmKind, PGMPOOLACCESS_DONTCARE, iUser, iUserTable, ppPage, fLockPage);
3398}
3399
3400void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable);
3401void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
3402int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fFlush = true /* DO NOT USE false UNLESS YOU KNOWN WHAT YOU'RE DOING!! */);
3403void pgmPoolFlushPageByGCPhys(PVM pVM, RTGCPHYS GCPhys);
3404PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys);
3405int pgmPoolSyncCR3(PVMCPU pVCpu);
3406bool pgmPoolIsDirtyPage(PVM pVM, RTGCPHYS GCPhys);
3407int pgmPoolTrackUpdateGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs);
3408void pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint, uint16_t iPte);
3409void pgmPoolInvalidateDirtyPage(PVM pVM, RTGCPHYS GCPhysPT);
3410DECLINLINE(int) pgmPoolTrackFlushGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool *pfFlushTLBs)
3411{
3412 return pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPhysPage, true /* flush PTEs */, pfFlushTLBs);
3413}
3414
3415uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, PPGMPAGE pPhysPage, uint16_t u16, uint16_t iShwPT, uint16_t iPte);
3416void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte);
3417void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, unsigned cbWrite);
3418int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3419void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3420
3421void pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3422void pgmPoolResetDirtyPages(PVM pVM);
3423
3424int pgmR3ExitShadowModeBeforePoolFlush(PVM pVM, PVMCPU pVCpu);
3425int pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu);
3426
3427void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
3428void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3);
3429int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
3430int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
3431
3432int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
3433#ifndef IN_RC
3434int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
3435#endif
3436int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
3437
3438PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM);
3439PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM);
3440PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt);
3441PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM);
3442
3443# if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
3444DECLCALLBACK(int) pgmR3CmdCheckDuplicatePages(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
3445DECLCALLBACK(int) pgmR3CmdShowSharedModules(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
3446# endif
3447
3448RT_C_DECLS_END
3449
3450/** @} */
3451
3452#endif
3453
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette