VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInternal.h@ 27149

Last change on this file since 27149 was 27065, checked in by vboxsync, 15 years ago

Fixed and cleaned up reference counting mess.
Additional code for cleaning up big pages.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 156.1 KB
Line 
1/* $Id: PGMInternal.h 27065 2010-03-05 10:07:55Z vboxsync $ */
2/** @file
3 * PGM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#ifndef ___PGMInternal_h
23#define ___PGMInternal_h
24
25#include <VBox/cdefs.h>
26#include <VBox/types.h>
27#include <VBox/err.h>
28#include <VBox/stam.h>
29#include <VBox/param.h>
30#include <VBox/vmm.h>
31#include <VBox/mm.h>
32#include <VBox/pdmcritsect.h>
33#include <VBox/pdmapi.h>
34#include <VBox/dis.h>
35#include <VBox/dbgf.h>
36#include <VBox/log.h>
37#include <VBox/gmm.h>
38#include <VBox/hwaccm.h>
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/avl.h>
42#include <iprt/critsect.h>
43#include <iprt/sha.h>
44
45
46
47/** @defgroup grp_pgm_int Internals
48 * @ingroup grp_pgm
49 * @internal
50 * @{
51 */
52
53
54/** @name PGM Compile Time Config
55 * @{
56 */
57
58/**
59 * Indicates that there are no guest mappings to care about.
60 * Currently on raw-mode related code uses mappings, i.e. RC and R3 code.
61 */
62#if defined(IN_RING0) || !defined(VBOX_WITH_RAW_MODE)
63# define PGM_WITHOUT_MAPPINGS
64#endif
65
66/**
67 * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).
68 * Comment it if it will break something.
69 */
70#define PGM_OUT_OF_SYNC_IN_GC
71
72/**
73 * Check and skip global PDEs for non-global flushes
74 */
75#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
76
77/**
78 * Optimization for PAE page tables that are modified often
79 */
80//#if 0 /* disabled again while debugging */
81#ifndef IN_RC
82# define PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
83#endif
84//#endif
85
86/**
87 * Large page support enabled only on 64 bits hosts; applies to nested paging only.
88 */
89#if (HC_ARCH_BITS == 64) && !defined(IN_RC)
90# define PGM_WITH_LARGE_PAGES
91#endif
92
93/**
94 * Sync N pages instead of a whole page table
95 */
96#define PGM_SYNC_N_PAGES
97
98/**
99 * Number of pages to sync during a page fault
100 *
101 * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
102 * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
103 *
104 * Note that \#PFs are much more expensive in the VT-x/AMD-V case due to
105 * world switch overhead, so let's sync more.
106 */
107# ifdef IN_RING0
108/* Chose 32 based on the compile test in #4219; 64 shows worse stats.
109 * 32 again shows better results than 16; slightly more overhead in the \#PF handler,
110 * but ~5% fewer faults.
111 */
112# define PGM_SYNC_NR_PAGES 32
113#else
114# define PGM_SYNC_NR_PAGES 8
115#endif
116
117/**
118 * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
119 */
120#define PGM_MAX_PHYSCACHE_ENTRIES 64
121#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
122
123
124/** @def PGMPOOL_CFG_MAX_GROW
125 * The maximum number of pages to add to the pool in one go.
126 */
127#define PGMPOOL_CFG_MAX_GROW (_256K >> PAGE_SHIFT)
128
129/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
130 * Enables some extra assertions for virtual handlers (mainly phys2virt related).
131 */
132#ifdef VBOX_STRICT
133# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
134#endif
135
136/** @def VBOX_WITH_NEW_LAZY_PAGE_ALLOC
137 * Enables the experimental lazy page allocation code. */
138/*#define VBOX_WITH_NEW_LAZY_PAGE_ALLOC */
139
140/** @def VBOX_WITH_REAL_WRITE_MONITORED_PAGES
141 * Enables real write monitoring of pages, i.e. mapping them read-only and
142 * only making them writable when getting a write access #PF. */
143#define VBOX_WITH_REAL_WRITE_MONITORED_PAGES
144
145/** @} */
146
147
148/** @name PDPT and PML4 flags.
149 * These are placed in the three bits available for system programs in
150 * the PDPT and PML4 entries.
151 * @{ */
152/** The entry is a permanent one and it's must always be present.
153 * Never free such an entry. */
154#define PGM_PLXFLAGS_PERMANENT RT_BIT_64(10)
155/** Mapping (hypervisor allocated pagetable). */
156#define PGM_PLXFLAGS_MAPPING RT_BIT_64(11)
157/** @} */
158
159/** @name Page directory flags.
160 * These are placed in the three bits available for system programs in
161 * the page directory entries.
162 * @{ */
163/** Mapping (hypervisor allocated pagetable). */
164#define PGM_PDFLAGS_MAPPING RT_BIT_64(10)
165/** Made read-only to facilitate dirty bit tracking. */
166#define PGM_PDFLAGS_TRACK_DIRTY RT_BIT_64(11)
167/** @} */
168
169/** @name Page flags.
170 * These are placed in the three bits available for system programs in
171 * the page entries.
172 * @{ */
173/** Made read-only to facilitate dirty bit tracking. */
174#define PGM_PTFLAGS_TRACK_DIRTY RT_BIT_64(9)
175
176#ifndef PGM_PTFLAGS_CSAM_VALIDATED
177/** Scanned and approved by CSAM (tm).
178 * NOTE: Must be identical to the one defined in CSAMInternal.h!!
179 * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/pgm.h. */
180#define PGM_PTFLAGS_CSAM_VALIDATED RT_BIT_64(11)
181#endif
182
183/** @} */
184
185/** @name Defines used to indicate the shadow and guest paging in the templates.
186 * @{ */
187#define PGM_TYPE_REAL 1
188#define PGM_TYPE_PROT 2
189#define PGM_TYPE_32BIT 3
190#define PGM_TYPE_PAE 4
191#define PGM_TYPE_AMD64 5
192#define PGM_TYPE_NESTED 6
193#define PGM_TYPE_EPT 7
194#define PGM_TYPE_MAX PGM_TYPE_EPT
195/** @} */
196
197/** Macro for checking if the guest is using paging.
198 * @param uGstType PGM_TYPE_*
199 * @param uShwType PGM_TYPE_*
200 * @remark ASSUMES certain order of the PGM_TYPE_* values.
201 */
202#define PGM_WITH_PAGING(uGstType, uShwType) \
203 ( (uGstType) >= PGM_TYPE_32BIT \
204 && (uShwType) != PGM_TYPE_NESTED \
205 && (uShwType) != PGM_TYPE_EPT)
206
207/** Macro for checking if the guest supports the NX bit.
208 * @param uGstType PGM_TYPE_*
209 * @param uShwType PGM_TYPE_*
210 * @remark ASSUMES certain order of the PGM_TYPE_* values.
211 */
212#define PGM_WITH_NX(uGstType, uShwType) \
213 ( (uGstType) >= PGM_TYPE_PAE \
214 && (uShwType) != PGM_TYPE_NESTED \
215 && (uShwType) != PGM_TYPE_EPT)
216
217
218/** @def PGM_HCPHYS_2_PTR
219 * Maps a HC physical page pool address to a virtual address.
220 *
221 * @returns VBox status code.
222 * @param pVM The VM handle.
223 * @param HCPhys The HC physical address to map to a virtual one.
224 * @param ppv Where to store the virtual address. No need to cast this.
225 *
226 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
227 * small page window employeed by that function. Be careful.
228 * @remark There is no need to assert on the result.
229 */
230#ifdef IN_RC
231# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
232 PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
233#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
234# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
235 pgmR0DynMapHCPageInlined(&(pVM)->pgm.s, HCPhys, (void **)(ppv))
236#else
237# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
238 MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
239#endif
240
241/** @def PGM_HCPHYS_2_PTR_BY_PGM
242 * Maps a HC physical page pool address to a virtual address.
243 *
244 * @returns VBox status code.
245 * @param pPGM The PGM instance data.
246 * @param HCPhys The HC physical address to map to a virtual one.
247 * @param ppv Where to store the virtual address. No need to cast this.
248 *
249 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
250 * small page window employeed by that function. Be careful.
251 * @remark There is no need to assert on the result.
252 */
253#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
254# define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
255 pgmR0DynMapHCPageInlined(pPGM, HCPhys, (void **)(ppv))
256#else
257# define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
258 PGM_HCPHYS_2_PTR(PGM2VM(pPGM), HCPhys, (void **)(ppv))
259#endif
260
261/** @def PGM_GCPHYS_2_PTR
262 * Maps a GC physical page address to a virtual address.
263 *
264 * @returns VBox status code.
265 * @param pVM The VM handle.
266 * @param GCPhys The GC physical address to map to a virtual one.
267 * @param ppv Where to store the virtual address. No need to cast this.
268 *
269 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
270 * small page window employeed by that function. Be careful.
271 * @remark There is no need to assert on the result.
272 */
273#ifdef IN_RC
274# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
275 PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
276#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
277# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
278 pgmR0DynMapGCPageInlined(&(pVM)->pgm.s, GCPhys, (void **)(ppv))
279#else
280# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
281 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
282#endif
283
284/** @def PGM_GCPHYS_2_PTR_BY_PGMCPU
285 * Maps a GC physical page address to a virtual address.
286 *
287 * @returns VBox status code.
288 * @param pPGM Pointer to the PGM instance data.
289 * @param GCPhys The GC physical address to map to a virtual one.
290 * @param ppv Where to store the virtual address. No need to cast this.
291 *
292 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
293 * small page window employeed by that function. Be careful.
294 * @remark There is no need to assert on the result.
295 */
296#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
297# define PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, GCPhys, ppv) \
298 pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), GCPhys, (void **)(ppv))
299#else
300# define PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, GCPhys, ppv) \
301 PGM_GCPHYS_2_PTR(PGMCPU2VM(pPGM), GCPhys, ppv)
302#endif
303
304/** @def PGM_GCPHYS_2_PTR_EX
305 * Maps a unaligned GC physical page address to a virtual address.
306 *
307 * @returns VBox status code.
308 * @param pVM The VM handle.
309 * @param GCPhys The GC physical address to map to a virtual one.
310 * @param ppv Where to store the virtual address. No need to cast this.
311 *
312 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
313 * small page window employeed by that function. Be careful.
314 * @remark There is no need to assert on the result.
315 */
316#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
317# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
318 PGMDynMapGCPageOff(pVM, GCPhys, (void **)(ppv))
319#else
320# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
321 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
322#endif
323
324/** @def PGM_INVL_PG
325 * Invalidates a page.
326 *
327 * @param pVCpu The VMCPU handle.
328 * @param GCVirt The virtual address of the page to invalidate.
329 */
330#ifdef IN_RC
331# define PGM_INVL_PG(pVCpu, GCVirt) ASMInvalidatePage((void *)(uintptr_t)(GCVirt))
332#elif defined(IN_RING0)
333# define PGM_INVL_PG(pVCpu, GCVirt) HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
334#else
335# define PGM_INVL_PG(pVCpu, GCVirt) HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
336#endif
337
338/** @def PGM_INVL_PG_ALL_VCPU
339 * Invalidates a page on all VCPUs
340 *
341 * @param pVM The VM handle.
342 * @param GCVirt The virtual address of the page to invalidate.
343 */
344#ifdef IN_RC
345# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) ASMInvalidatePage((void *)(uintptr_t)(GCVirt))
346#elif defined(IN_RING0)
347# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
348#else
349# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
350#endif
351
352/** @def PGM_INVL_BIG_PG
353 * Invalidates a 4MB page directory entry.
354 *
355 * @param pVCpu The VMCPU handle.
356 * @param GCVirt The virtual address within the page directory to invalidate.
357 */
358#ifdef IN_RC
359# define PGM_INVL_BIG_PG(pVCpu, GCVirt) ASMReloadCR3()
360#elif defined(IN_RING0)
361# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HWACCMFlushTLB(pVCpu)
362#else
363# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HWACCMFlushTLB(pVCpu)
364#endif
365
366/** @def PGM_INVL_VCPU_TLBS()
367 * Invalidates the TLBs of the specified VCPU
368 *
369 * @param pVCpu The VMCPU handle.
370 */
371#ifdef IN_RC
372# define PGM_INVL_VCPU_TLBS(pVCpu) ASMReloadCR3()
373#elif defined(IN_RING0)
374# define PGM_INVL_VCPU_TLBS(pVCpu) HWACCMFlushTLB(pVCpu)
375#else
376# define PGM_INVL_VCPU_TLBS(pVCpu) HWACCMFlushTLB(pVCpu)
377#endif
378
379/** @def PGM_INVL_ALL_VCPU_TLBS()
380 * Invalidates the TLBs of all VCPUs
381 *
382 * @param pVM The VM handle.
383 */
384#ifdef IN_RC
385# define PGM_INVL_ALL_VCPU_TLBS(pVM) ASMReloadCR3()
386#elif defined(IN_RING0)
387# define PGM_INVL_ALL_VCPU_TLBS(pVM) HWACCMFlushTLBOnAllVCpus(pVM)
388#else
389# define PGM_INVL_ALL_VCPU_TLBS(pVM) HWACCMFlushTLBOnAllVCpus(pVM)
390#endif
391
392/** Size of the GCPtrConflict array in PGMMAPPING.
393 * @remarks Must be a power of two. */
394#define PGMMAPPING_CONFLICT_MAX 8
395
396/**
397 * Structure for tracking GC Mappings.
398 *
399 * This structure is used by linked list in both GC and HC.
400 */
401typedef struct PGMMAPPING
402{
403 /** Pointer to next entry. */
404 R3PTRTYPE(struct PGMMAPPING *) pNextR3;
405 /** Pointer to next entry. */
406 R0PTRTYPE(struct PGMMAPPING *) pNextR0;
407 /** Pointer to next entry. */
408 RCPTRTYPE(struct PGMMAPPING *) pNextRC;
409 /** Indicate whether this entry is finalized. */
410 bool fFinalized;
411 /** Start Virtual address. */
412 RTGCPTR GCPtr;
413 /** Last Virtual address (inclusive). */
414 RTGCPTR GCPtrLast;
415 /** Range size (bytes). */
416 RTGCPTR cb;
417 /** Pointer to relocation callback function. */
418 R3PTRTYPE(PFNPGMRELOCATE) pfnRelocate;
419 /** User argument to the callback. */
420 R3PTRTYPE(void *) pvUser;
421 /** Mapping description / name. For easing debugging. */
422 R3PTRTYPE(const char *) pszDesc;
423 /** Last 8 addresses that caused conflicts. */
424 RTGCPTR aGCPtrConflicts[PGMMAPPING_CONFLICT_MAX];
425 /** Number of conflicts for this hypervisor mapping. */
426 uint32_t cConflicts;
427 /** Number of page tables. */
428 uint32_t cPTs;
429
430 /** Array of page table mapping data. Each entry
431 * describes one page table. The array can be longer
432 * than the declared length.
433 */
434 struct
435 {
436 /** The HC physical address of the page table. */
437 RTHCPHYS HCPhysPT;
438 /** The HC physical address of the first PAE page table. */
439 RTHCPHYS HCPhysPaePT0;
440 /** The HC physical address of the second PAE page table. */
441 RTHCPHYS HCPhysPaePT1;
442 /** The HC virtual address of the 32-bit page table. */
443 R3PTRTYPE(PX86PT) pPTR3;
444 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */
445 R3PTRTYPE(PX86PTPAE) paPaePTsR3;
446 /** The RC virtual address of the 32-bit page table. */
447 RCPTRTYPE(PX86PT) pPTRC;
448 /** The RC virtual address of the two PAE page table. */
449 RCPTRTYPE(PX86PTPAE) paPaePTsRC;
450 /** The R0 virtual address of the 32-bit page table. */
451 R0PTRTYPE(PX86PT) pPTR0;
452 /** The R0 virtual address of the two PAE page table. */
453 R0PTRTYPE(PX86PTPAE) paPaePTsR0;
454 } aPTs[1];
455} PGMMAPPING;
456/** Pointer to structure for tracking GC Mappings. */
457typedef struct PGMMAPPING *PPGMMAPPING;
458
459
460/**
461 * Physical page access handler structure.
462 *
463 * This is used to keep track of physical address ranges
464 * which are being monitored in some kind of way.
465 */
466typedef struct PGMPHYSHANDLER
467{
468 AVLROGCPHYSNODECORE Core;
469 /** Access type. */
470 PGMPHYSHANDLERTYPE enmType;
471 /** Number of pages to update. */
472 uint32_t cPages;
473 /** Pointer to R3 callback function. */
474 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3;
475 /** User argument for R3 handlers. */
476 R3PTRTYPE(void *) pvUserR3;
477 /** Pointer to R0 callback function. */
478 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0;
479 /** User argument for R0 handlers. */
480 R0PTRTYPE(void *) pvUserR0;
481 /** Pointer to RC callback function. */
482 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC;
483 /** User argument for RC handlers. */
484 RCPTRTYPE(void *) pvUserRC;
485 /** Description / Name. For easing debugging. */
486 R3PTRTYPE(const char *) pszDesc;
487#ifdef VBOX_WITH_STATISTICS
488 /** Profiling of this handler. */
489 STAMPROFILE Stat;
490#endif
491} PGMPHYSHANDLER;
492/** Pointer to a physical page access handler structure. */
493typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
494
495
496/**
497 * Cache node for the physical addresses covered by a virtual handler.
498 */
499typedef struct PGMPHYS2VIRTHANDLER
500{
501 /** Core node for the tree based on physical ranges. */
502 AVLROGCPHYSNODECORE Core;
503 /** Offset from this struct to the PGMVIRTHANDLER structure. */
504 int32_t offVirtHandler;
505 /** Offset of the next alias relative to this one.
506 * Bit 0 is used for indicating whether we're in the tree.
507 * Bit 1 is used for indicating that we're the head node.
508 */
509 int32_t offNextAlias;
510} PGMPHYS2VIRTHANDLER;
511/** Pointer to a phys to virtual handler structure. */
512typedef PGMPHYS2VIRTHANDLER *PPGMPHYS2VIRTHANDLER;
513
514/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
515 * node is in the tree. */
516#define PGMPHYS2VIRTHANDLER_IN_TREE RT_BIT(0)
517/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
518 * node is in the head of an alias chain.
519 * The PGMPHYS2VIRTHANDLER_IN_TREE is always set if this bit is set. */
520#define PGMPHYS2VIRTHANDLER_IS_HEAD RT_BIT(1)
521/** The mask to apply to PGMPHYS2VIRTHANDLER::offNextAlias to get the offset. */
522#define PGMPHYS2VIRTHANDLER_OFF_MASK (~(int32_t)3)
523
524
525/**
526 * Virtual page access handler structure.
527 *
528 * This is used to keep track of virtual address ranges
529 * which are being monitored in some kind of way.
530 */
531typedef struct PGMVIRTHANDLER
532{
533 /** Core node for the tree based on virtual ranges. */
534 AVLROGCPTRNODECORE Core;
535 /** Size of the range (in bytes). */
536 RTGCPTR cb;
537 /** Number of cache pages. */
538 uint32_t cPages;
539 /** Access type. */
540 PGMVIRTHANDLERTYPE enmType;
541 /** Pointer to the RC callback function. */
542 RCPTRTYPE(PFNPGMRCVIRTHANDLER) pfnHandlerRC;
543#if HC_ARCH_BITS == 64
544 RTRCPTR padding;
545#endif
546 /** Pointer to the R3 callback function for invalidation. */
547 R3PTRTYPE(PFNPGMR3VIRTINVALIDATE) pfnInvalidateR3;
548 /** Pointer to the R3 callback function. */
549 R3PTRTYPE(PFNPGMR3VIRTHANDLER) pfnHandlerR3;
550 /** Description / Name. For easing debugging. */
551 R3PTRTYPE(const char *) pszDesc;
552#ifdef VBOX_WITH_STATISTICS
553 /** Profiling of this handler. */
554 STAMPROFILE Stat;
555#endif
556 /** Array of cached physical addresses for the monitored ranged. */
557 PGMPHYS2VIRTHANDLER aPhysToVirt[HC_ARCH_BITS == 32 ? 1 : 2];
558} PGMVIRTHANDLER;
559/** Pointer to a virtual page access handler structure. */
560typedef PGMVIRTHANDLER *PPGMVIRTHANDLER;
561
562
563/**
564 * Page type.
565 *
566 * @remarks This enum has to fit in a 3-bit field (see PGMPAGE::u3Type).
567 * @remarks This is used in the saved state, so changes to it requires bumping
568 * the saved state version.
569 * @todo So, convert to \#defines!
570 */
571typedef enum PGMPAGETYPE
572{
573 /** The usual invalid zero entry. */
574 PGMPAGETYPE_INVALID = 0,
575 /** RAM page. (RWX) */
576 PGMPAGETYPE_RAM,
577 /** MMIO2 page. (RWX) */
578 PGMPAGETYPE_MMIO2,
579 /** MMIO2 page aliased over an MMIO page. (RWX)
580 * See PGMHandlerPhysicalPageAlias(). */
581 PGMPAGETYPE_MMIO2_ALIAS_MMIO,
582 /** Shadowed ROM. (RWX) */
583 PGMPAGETYPE_ROM_SHADOW,
584 /** ROM page. (R-X) */
585 PGMPAGETYPE_ROM,
586 /** MMIO page. (---) */
587 PGMPAGETYPE_MMIO,
588 /** End of valid entries. */
589 PGMPAGETYPE_END
590} PGMPAGETYPE;
591AssertCompile(PGMPAGETYPE_END <= 7);
592
593/** @name Page type predicates.
594 * @{ */
595#define PGMPAGETYPE_IS_READABLE(type) ( (type) <= PGMPAGETYPE_ROM )
596#define PGMPAGETYPE_IS_WRITEABLE(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
597#define PGMPAGETYPE_IS_RWX(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
598#define PGMPAGETYPE_IS_ROX(type) ( (type) == PGMPAGETYPE_ROM )
599#define PGMPAGETYPE_IS_NP(type) ( (type) == PGMPAGETYPE_MMIO )
600/** @} */
601
602
603/**
604 * A Physical Guest Page tracking structure.
605 *
606 * The format of this structure is complicated because we have to fit a lot
607 * of information into as few bits as possible. The format is also subject
608 * to change (there is one comming up soon). Which means that for we'll be
609 * using PGM_PAGE_GET_*, PGM_PAGE_IS_ and PGM_PAGE_SET_* macros for *all*
610 * accesses to the structure.
611 */
612typedef struct PGMPAGE
613{
614 /** The physical address and the Page ID. */
615 RTHCPHYS HCPhysAndPageID;
616 /** Combination of:
617 * - [0-7]: u2HandlerPhysStateY - the physical handler state
618 * (PGM_PAGE_HNDL_PHYS_STATE_*).
619 * - [8-9]: u2HandlerVirtStateY - the virtual handler state
620 * (PGM_PAGE_HNDL_VIRT_STATE_*).
621 * - [13-14]: u2PDEType - paging structure needed to map the page (PGM_PAGE_PDE_TYPE_*)
622 * - [15]: fWrittenToY - flag indicating that a write monitored page was
623 * written to when set.
624 * - [10-13]: 4 unused bits.
625 * @remarks Warning! All accesses to the bits are hardcoded.
626 *
627 * @todo Change this to a union with both bitfields, u8 and u accessors.
628 * That'll help deal with some of the hardcoded accesses.
629 *
630 * @todo Include uStateY and uTypeY as well so it becomes 32-bit. This
631 * will make it possible to turn some of the 16-bit accesses into
632 * 32-bit ones, which may be efficient (stalls).
633 */
634 RTUINT16U u16MiscY;
635 /** The page state.
636 * Only 2 bits are really needed for this. */
637 uint8_t uStateY;
638 /** The page type (PGMPAGETYPE).
639 * Only 3 bits are really needed for this. */
640 uint8_t uTypeY;
641 /** Usage tracking (page pool). */
642 uint16_t u16TrackingY;
643 /** The number of read locks on this page. */
644 uint8_t cReadLocksY;
645 /** The number of write locks on this page. */
646 uint8_t cWriteLocksY;
647} PGMPAGE;
648AssertCompileSize(PGMPAGE, 16);
649/** Pointer to a physical guest page. */
650typedef PGMPAGE *PPGMPAGE;
651/** Pointer to a const physical guest page. */
652typedef const PGMPAGE *PCPGMPAGE;
653/** Pointer to a physical guest page pointer. */
654typedef PPGMPAGE *PPPGMPAGE;
655
656
657/**
658 * Clears the page structure.
659 * @param pPage Pointer to the physical guest page tracking structure.
660 */
661#define PGM_PAGE_CLEAR(pPage) \
662 do { \
663 (pPage)->HCPhysAndPageID = 0; \
664 (pPage)->uStateY = 0; \
665 (pPage)->uTypeY = 0; \
666 (pPage)->u16MiscY.u = 0; \
667 (pPage)->u16TrackingY = 0; \
668 (pPage)->cReadLocksY = 0; \
669 (pPage)->cWriteLocksY = 0; \
670 } while (0)
671
672/**
673 * Initializes the page structure.
674 * @param pPage Pointer to the physical guest page tracking structure.
675 */
676#define PGM_PAGE_INIT(pPage, _HCPhys, _idPage, _uType, _uState) \
677 do { \
678 RTHCPHYS SetHCPhysTmp = (_HCPhys); \
679 AssertFatal(!(SetHCPhysTmp & ~UINT64_C(0x0000fffffffff000))); \
680 (pPage)->HCPhysAndPageID = (SetHCPhysTmp << (28-12)) | ((_idPage) & UINT32_C(0x0fffffff)); \
681 (pPage)->uStateY = (_uState); \
682 (pPage)->uTypeY = (_uType); \
683 (pPage)->u16MiscY.u = 0; \
684 (pPage)->u16TrackingY = 0; \
685 (pPage)->cReadLocksY = 0; \
686 (pPage)->cWriteLocksY = 0; \
687 } while (0)
688
689/**
690 * Initializes the page structure of a ZERO page.
691 * @param pPage Pointer to the physical guest page tracking structure.
692 * @param pVM The VM handle (for getting the zero page address).
693 * @param uType The page type (PGMPAGETYPE).
694 */
695#define PGM_PAGE_INIT_ZERO(pPage, pVM, uType) \
696 PGM_PAGE_INIT((pPage), (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (uType), PGM_PAGE_STATE_ZERO)
697
698
699/** @name The Page state, PGMPAGE::uStateY.
700 * @{ */
701/** The zero page.
702 * This is a per-VM page that's never ever mapped writable. */
703#define PGM_PAGE_STATE_ZERO 0
704/** A allocated page.
705 * This is a per-VM page allocated from the page pool (or wherever
706 * we get MMIO2 pages from if the type is MMIO2).
707 */
708#define PGM_PAGE_STATE_ALLOCATED 1
709/** A allocated page that's being monitored for writes.
710 * The shadow page table mappings are read-only. When a write occurs, the
711 * fWrittenTo member is set, the page remapped as read-write and the state
712 * moved back to allocated. */
713#define PGM_PAGE_STATE_WRITE_MONITORED 2
714/** The page is shared, aka. copy-on-write.
715 * This is a page that's shared with other VMs. */
716#define PGM_PAGE_STATE_SHARED 3
717/** @} */
718
719
720/**
721 * Gets the page state.
722 * @returns page state (PGM_PAGE_STATE_*).
723 * @param pPage Pointer to the physical guest page tracking structure.
724 */
725#define PGM_PAGE_GET_STATE(pPage) ( (pPage)->uStateY )
726
727/**
728 * Sets the page state.
729 * @param pPage Pointer to the physical guest page tracking structure.
730 * @param _uState The new page state.
731 */
732#define PGM_PAGE_SET_STATE(pPage, _uState) do { (pPage)->uStateY = (_uState); } while (0)
733
734
735/**
736 * Gets the host physical address of the guest page.
737 * @returns host physical address (RTHCPHYS).
738 * @param pPage Pointer to the physical guest page tracking structure.
739 */
740#define PGM_PAGE_GET_HCPHYS(pPage) ( ((pPage)->HCPhysAndPageID >> 28) << 12 )
741
742/**
743 * Sets the host physical address of the guest page.
744 * @param pPage Pointer to the physical guest page tracking structure.
745 * @param _HCPhys The new host physical address.
746 */
747#define PGM_PAGE_SET_HCPHYS(pPage, _HCPhys) \
748 do { \
749 RTHCPHYS SetHCPhysTmp = (_HCPhys); \
750 AssertFatal(!(SetHCPhysTmp & ~UINT64_C(0x0000fffffffff000))); \
751 (pPage)->HCPhysAndPageID = ((pPage)->HCPhysAndPageID & UINT32_C(0x0fffffff)) \
752 | (SetHCPhysTmp << (28-12)); \
753 } while (0)
754
755/**
756 * Get the Page ID.
757 * @returns The Page ID; NIL_GMM_PAGEID if it's a ZERO page.
758 * @param pPage Pointer to the physical guest page tracking structure.
759 */
760#define PGM_PAGE_GET_PAGEID(pPage) ( (uint32_t)((pPage)->HCPhysAndPageID & UINT32_C(0x0fffffff)) )
761
762/**
763 * Sets the Page ID.
764 * @param pPage Pointer to the physical guest page tracking structure.
765 */
766#define PGM_PAGE_SET_PAGEID(pPage, _idPage) \
767 do { \
768 (pPage)->HCPhysAndPageID = (((pPage)->HCPhysAndPageID) & UINT64_C(0xfffffffff0000000)) \
769 | ((_idPage) & UINT32_C(0x0fffffff)); \
770 } while (0)
771
772/**
773 * Get the Chunk ID.
774 * @returns The Chunk ID; NIL_GMM_CHUNKID if it's a ZERO page.
775 * @param pPage Pointer to the physical guest page tracking structure.
776 */
777#define PGM_PAGE_GET_CHUNKID(pPage) ( PGM_PAGE_GET_PAGEID(pPage) >> GMM_CHUNKID_SHIFT )
778
779/**
780 * Get the index of the page within the allocation chunk.
781 * @returns The page index.
782 * @param pPage Pointer to the physical guest page tracking structure.
783 */
784#define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhysAndPageID & GMM_PAGEID_IDX_MASK) )
785
786/**
787 * Gets the page type.
788 * @returns The page type.
789 * @param pPage Pointer to the physical guest page tracking structure.
790 */
791#define PGM_PAGE_GET_TYPE(pPage) (pPage)->uTypeY
792
793/**
794 * Sets the page type.
795 * @param pPage Pointer to the physical guest page tracking structure.
796 * @param _enmType The new page type (PGMPAGETYPE).
797 */
798#define PGM_PAGE_SET_TYPE(pPage, _enmType) do { (pPage)->uTypeY = (_enmType); } while (0)
799
800/**
801 * Checks if the page is marked for MMIO.
802 * @returns true/false.
803 * @param pPage Pointer to the physical guest page tracking structure.
804 */
805#define PGM_PAGE_IS_MMIO(pPage) ( (pPage)->uTypeY == PGMPAGETYPE_MMIO )
806
807/**
808 * Checks if the page is backed by the ZERO page.
809 * @returns true/false.
810 * @param pPage Pointer to the physical guest page tracking structure.
811 */
812#define PGM_PAGE_IS_ZERO(pPage) ( (pPage)->uStateY == PGM_PAGE_STATE_ZERO )
813
814/**
815 * Checks if the page is backed by a SHARED page.
816 * @returns true/false.
817 * @param pPage Pointer to the physical guest page tracking structure.
818 */
819#define PGM_PAGE_IS_SHARED(pPage) ( (pPage)->uStateY == PGM_PAGE_STATE_SHARED )
820
821
822/**
823 * Marks the page as written to (for GMM change monitoring).
824 * @param pPage Pointer to the physical guest page tracking structure.
825 */
826#define PGM_PAGE_SET_WRITTEN_TO(pPage) do { (pPage)->u16MiscY.au8[1] |= UINT8_C(0x80); } while (0)
827
828/**
829 * Clears the written-to indicator.
830 * @param pPage Pointer to the physical guest page tracking structure.
831 */
832#define PGM_PAGE_CLEAR_WRITTEN_TO(pPage) do { (pPage)->u16MiscY.au8[1] &= UINT8_C(0x7f); } while (0)
833
834/**
835 * Checks if the page was marked as written-to.
836 * @returns true/false.
837 * @param pPage Pointer to the physical guest page tracking structure.
838 */
839#define PGM_PAGE_IS_WRITTEN_TO(pPage) ( !!((pPage)->u16MiscY.au8[1] & UINT8_C(0x80)) )
840
841/** @name PT usage values (PGMPAGE::u2PDEType).
842 *
843 * @{ */
844/** Either as a PT or PDE. */
845#define PGM_PAGE_PDE_TYPE_DONTCARE 0
846/** Must use a page table to map the range. */
847#define PGM_PAGE_PDE_TYPE_PT 1
848/** Can use a page directory entry to map the continous range. */
849#define PGM_PAGE_PDE_TYPE_PDE 2
850/** Can use a page directory entry to map the continous range - temporarily disabled (by page monitoring). */
851#define PGM_PAGE_PDE_TYPE_PDE_DISABLED 3
852/** @} */
853
854/**
855 * Set the PDE type of the page
856 * @param pPage Pointer to the physical guest page tracking structure.
857 * @param uType PGM_PAGE_PDE_TYPE_*
858 */
859#define PGM_PAGE_SET_PDE_TYPE(pPage, uType) \
860 do { \
861 (pPage)->u16MiscY.au8[1] = ((pPage)->u16MiscY.au8[1] & UINT8_C(0x9f)) \
862 | (((uType) & UINT8_C(0x03)) << 5); \
863 } while (0)
864
865/**
866 * Checks if the page was marked being part of a large page
867 * @returns true/false.
868 * @param pPage Pointer to the physical guest page tracking structure.
869 */
870#define PGM_PAGE_GET_PDE_TYPE(pPage) ( ((pPage)->u16MiscY.au8[1] & UINT8_C(0x60)) >> 5)
871
872/** Enabled optimized access handler tests.
873 * These optimizations makes ASSUMPTIONS about the state values and the u16MiscY
874 * layout. When enabled, the compiler should normally generate more compact
875 * code.
876 */
877#define PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS 1
878
879/** @name Physical Access Handler State values (PGMPAGE::u2HandlerPhysStateY).
880 *
881 * @remarks The values are assigned in order of priority, so we can calculate
882 * the correct state for a page with different handlers installed.
883 * @{ */
884/** No handler installed. */
885#define PGM_PAGE_HNDL_PHYS_STATE_NONE 0
886/** Monitoring is temporarily disabled. */
887#define PGM_PAGE_HNDL_PHYS_STATE_DISABLED 1
888/** Write access is monitored. */
889#define PGM_PAGE_HNDL_PHYS_STATE_WRITE 2
890/** All access is monitored. */
891#define PGM_PAGE_HNDL_PHYS_STATE_ALL 3
892/** @} */
893
894/**
895 * Gets the physical access handler state of a page.
896 * @returns PGM_PAGE_HNDL_PHYS_STATE_* value.
897 * @param pPage Pointer to the physical guest page tracking structure.
898 */
899#define PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) \
900 ( (pPage)->u16MiscY.au8[0] )
901
902/**
903 * Sets the physical access handler state of a page.
904 * @param pPage Pointer to the physical guest page tracking structure.
905 * @param _uState The new state value.
906 */
907#define PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, _uState) \
908 do { (pPage)->u16MiscY.au8[0] = (_uState); } while (0)
909
910/**
911 * Checks if the page has any physical access handlers, including temporariliy disabled ones.
912 * @returns true/false
913 * @param pPage Pointer to the physical guest page tracking structure.
914 */
915#define PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) \
916 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE )
917
918/**
919 * Checks if the page has any active physical access handlers.
920 * @returns true/false
921 * @param pPage Pointer to the physical guest page tracking structure.
922 */
923#define PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage) \
924 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE )
925
926
927/** @name Virtual Access Handler State values (PGMPAGE::u2HandlerVirtStateY).
928 *
929 * @remarks The values are assigned in order of priority, so we can calculate
930 * the correct state for a page with different handlers installed.
931 * @{ */
932/** No handler installed. */
933#define PGM_PAGE_HNDL_VIRT_STATE_NONE 0
934/* 1 is reserved so the lineup is identical with the physical ones. */
935/** Write access is monitored. */
936#define PGM_PAGE_HNDL_VIRT_STATE_WRITE 2
937/** All access is monitored. */
938#define PGM_PAGE_HNDL_VIRT_STATE_ALL 3
939/** @} */
940
941/**
942 * Gets the virtual access handler state of a page.
943 * @returns PGM_PAGE_HNDL_VIRT_STATE_* value.
944 * @param pPage Pointer to the physical guest page tracking structure.
945 */
946#define PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) ((uint8_t)( (pPage)->u16MiscY.au8[1] & UINT8_C(0x03) ))
947
948/**
949 * Sets the virtual access handler state of a page.
950 * @param pPage Pointer to the physical guest page tracking structure.
951 * @param _uState The new state value.
952 */
953#define PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, _uState) \
954 do { \
955 (pPage)->u16MiscY.au8[1] = ((pPage)->u16MiscY.au8[1] & UINT8_C(0xfc)) \
956 | ((_uState) & UINT8_C(0x03)); \
957 } while (0)
958
959/**
960 * Checks if the page has any virtual access handlers.
961 * @returns true/false
962 * @param pPage Pointer to the physical guest page tracking structure.
963 */
964#define PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) \
965 ( PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) != PGM_PAGE_HNDL_VIRT_STATE_NONE )
966
967/**
968 * Same as PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS - can't disable pages in
969 * virtual handlers.
970 * @returns true/false
971 * @param pPage Pointer to the physical guest page tracking structure.
972 */
973#define PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage) \
974 PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage)
975
976
977/**
978 * Checks if the page has any access handlers, including temporarily disabled ones.
979 * @returns true/false
980 * @param pPage Pointer to the physical guest page tracking structure.
981 */
982#ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
983# define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
984 ( ((pPage)->u16MiscY.u & UINT16_C(0x0303)) != 0 )
985#else
986# define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
987 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE \
988 || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) != PGM_PAGE_HNDL_VIRT_STATE_NONE )
989#endif
990
991/**
992 * Checks if the page has any active access handlers.
993 * @returns true/false
994 * @param pPage Pointer to the physical guest page tracking structure.
995 */
996#ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
997# define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
998 ( ((pPage)->u16MiscY.u & UINT16_C(0x0202)) != 0 )
999#else
1000# define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
1001 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE \
1002 || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) >= PGM_PAGE_HNDL_VIRT_STATE_WRITE )
1003#endif
1004
1005/**
1006 * Checks if the page has any active access handlers catching all accesses.
1007 * @returns true/false
1008 * @param pPage Pointer to the physical guest page tracking structure.
1009 */
1010#ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
1011# define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
1012 ( ( ((pPage)->u16MiscY.au8[0] | (pPage)->u16MiscY.au8[1]) & UINT8_C(0x3) ) \
1013 == PGM_PAGE_HNDL_PHYS_STATE_ALL )
1014#else
1015# define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
1016 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL \
1017 || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL )
1018#endif
1019
1020
1021/** @def PGM_PAGE_GET_TRACKING
1022 * Gets the packed shadow page pool tracking data associated with a guest page.
1023 * @returns uint16_t containing the data.
1024 * @param pPage Pointer to the physical guest page tracking structure.
1025 */
1026#define PGM_PAGE_GET_TRACKING(pPage) ( (pPage)->u16TrackingY )
1027
1028/** @def PGM_PAGE_SET_TRACKING
1029 * Sets the packed shadow page pool tracking data associated with a guest page.
1030 * @param pPage Pointer to the physical guest page tracking structure.
1031 * @param u16TrackingData The tracking data to store.
1032 */
1033#define PGM_PAGE_SET_TRACKING(pPage, u16TrackingData) \
1034 do { (pPage)->u16TrackingY = (u16TrackingData); } while (0)
1035
1036/** @def PGM_PAGE_GET_TD_CREFS
1037 * Gets the @a cRefs tracking data member.
1038 * @returns cRefs.
1039 * @param pPage Pointer to the physical guest page tracking structure.
1040 */
1041#define PGM_PAGE_GET_TD_CREFS(pPage) \
1042 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK)
1043
1044/** @def PGM_PAGE_GET_TD_IDX
1045 * Gets the @a idx tracking data member.
1046 * @returns idx.
1047 * @param pPage Pointer to the physical guest page tracking structure.
1048 */
1049#define PGM_PAGE_GET_TD_IDX(pPage) \
1050 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK)
1051
1052
1053/** Max number of locks on a page. */
1054#define PGM_PAGE_MAX_LOCKS UINT8_C(254)
1055
1056/** Get the read lock count.
1057 * @returns count.
1058 * @param pPage Pointer to the physical guest page tracking structure.
1059 */
1060#define PGM_PAGE_GET_READ_LOCKS(pPage) ( (pPage)->cReadLocksY )
1061
1062/** Get the write lock count.
1063 * @returns count.
1064 * @param pPage Pointer to the physical guest page tracking structure.
1065 */
1066#define PGM_PAGE_GET_WRITE_LOCKS(pPage) ( (pPage)->cWriteLocksY )
1067
1068/** Decrement the read lock counter.
1069 * @param pPage Pointer to the physical guest page tracking structure.
1070 */
1071#define PGM_PAGE_DEC_READ_LOCKS(pPage) do { --(pPage)->cReadLocksY; } while (0)
1072
1073/** Decrement the write lock counter.
1074 * @param pPage Pointer to the physical guest page tracking structure.
1075 */
1076#define PGM_PAGE_DEC_WRITE_LOCKS(pPage) do { --(pPage)->cWriteLocksY; } while (0)
1077
1078/** Increment the read lock counter.
1079 * @param pPage Pointer to the physical guest page tracking structure.
1080 */
1081#define PGM_PAGE_INC_READ_LOCKS(pPage) do { ++(pPage)->cReadLocksY; } while (0)
1082
1083/** Increment the write lock counter.
1084 * @param pPage Pointer to the physical guest page tracking structure.
1085 */
1086#define PGM_PAGE_INC_WRITE_LOCKS(pPage) do { ++(pPage)->cWriteLocksY; } while (0)
1087
1088
1089#if 0
1090/** Enables sanity checking of write monitoring using CRC-32. */
1091# define PGMLIVESAVERAMPAGE_WITH_CRC32
1092#endif
1093
1094/**
1095 * Per page live save tracking data.
1096 */
1097typedef struct PGMLIVESAVERAMPAGE
1098{
1099 /** Number of times it has been dirtied. */
1100 uint32_t cDirtied : 24;
1101 /** Whether it is currently dirty. */
1102 uint32_t fDirty : 1;
1103 /** Ignore the page.
1104 * This is used for pages that has been MMIO, MMIO2 or ROM pages once. We will
1105 * deal with these after pausing the VM and DevPCI have said it bit about
1106 * remappings. */
1107 uint32_t fIgnore : 1;
1108 /** Was a ZERO page last time around. */
1109 uint32_t fZero : 1;
1110 /** Was a SHARED page last time around. */
1111 uint32_t fShared : 1;
1112 /** Whether the page is/was write monitored in a previous pass. */
1113 uint32_t fWriteMonitored : 1;
1114 /** Whether the page is/was write monitored earlier in this pass. */
1115 uint32_t fWriteMonitoredJustNow : 1;
1116 /** Bits reserved for future use. */
1117 uint32_t u2Reserved : 2;
1118#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1119 /** CRC-32 for the page. This is for internal consistency checks. */
1120 uint32_t u32Crc;
1121#endif
1122} PGMLIVESAVERAMPAGE;
1123#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1124AssertCompileSize(PGMLIVESAVERAMPAGE, 8);
1125#else
1126AssertCompileSize(PGMLIVESAVERAMPAGE, 4);
1127#endif
1128/** Pointer to the per page live save tracking data. */
1129typedef PGMLIVESAVERAMPAGE *PPGMLIVESAVERAMPAGE;
1130
1131/** The max value of PGMLIVESAVERAMPAGE::cDirtied. */
1132#define PGMLIVSAVEPAGE_MAX_DIRTIED 0x00fffff0
1133
1134
1135/**
1136 * Ram range for GC Phys to HC Phys conversion.
1137 *
1138 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
1139 * conversions too, but we'll let MM handle that for now.
1140 *
1141 * This structure is used by linked lists in both GC and HC.
1142 */
1143typedef struct PGMRAMRANGE
1144{
1145 /** Start of the range. Page aligned. */
1146 RTGCPHYS GCPhys;
1147 /** Size of the range. (Page aligned of course). */
1148 RTGCPHYS cb;
1149 /** Pointer to the next RAM range - for R3. */
1150 R3PTRTYPE(struct PGMRAMRANGE *) pNextR3;
1151 /** Pointer to the next RAM range - for R0. */
1152 R0PTRTYPE(struct PGMRAMRANGE *) pNextR0;
1153 /** Pointer to the next RAM range - for RC. */
1154 RCPTRTYPE(struct PGMRAMRANGE *) pNextRC;
1155 /** PGM_RAM_RANGE_FLAGS_* flags. */
1156 uint32_t fFlags;
1157 /** Last address in the range (inclusive). Page aligned (-1). */
1158 RTGCPHYS GCPhysLast;
1159 /** Start of the HC mapping of the range. This is only used for MMIO2. */
1160 R3PTRTYPE(void *) pvR3;
1161 /** Live save per page tracking data. */
1162 R3PTRTYPE(PPGMLIVESAVERAMPAGE) paLSPages;
1163 /** The range description. */
1164 R3PTRTYPE(const char *) pszDesc;
1165 /** Pointer to self - R0 pointer. */
1166 R0PTRTYPE(struct PGMRAMRANGE *) pSelfR0;
1167 /** Pointer to self - RC pointer. */
1168 RCPTRTYPE(struct PGMRAMRANGE *) pSelfRC;
1169 /** Padding to make aPage aligned on sizeof(PGMPAGE). */
1170 uint32_t au32Alignment2[HC_ARCH_BITS == 32 ? 1 : 3];
1171 /** Array of physical guest page tracking structures. */
1172 PGMPAGE aPages[1];
1173} PGMRAMRANGE;
1174/** Pointer to Ram range for GC Phys to HC Phys conversion. */
1175typedef PGMRAMRANGE *PPGMRAMRANGE;
1176
1177/** @name PGMRAMRANGE::fFlags
1178 * @{ */
1179/** The RAM range is floating around as an independent guest mapping. */
1180#define PGM_RAM_RANGE_FLAGS_FLOATING RT_BIT(20)
1181/** Ad hoc RAM range for an ROM mapping. */
1182#define PGM_RAM_RANGE_FLAGS_AD_HOC_ROM RT_BIT(21)
1183/** Ad hoc RAM range for an MMIO mapping. */
1184#define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO RT_BIT(22)
1185/** Ad hoc RAM range for an MMIO2 mapping. */
1186#define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2 RT_BIT(23)
1187/** @} */
1188
1189/** Tests if a RAM range is an ad hoc one or not.
1190 * @returns true/false.
1191 * @param pRam The RAM range.
1192 */
1193#define PGM_RAM_RANGE_IS_AD_HOC(pRam) \
1194 (!!( (pRam)->fFlags & (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2) ) )
1195
1196
1197/**
1198 * Per page tracking structure for ROM image.
1199 *
1200 * A ROM image may have a shadow page, in which case we may have two pages
1201 * backing it. This structure contains the PGMPAGE for both while
1202 * PGMRAMRANGE have a copy of the active one. It is important that these
1203 * aren't out of sync in any regard other than page pool tracking data.
1204 */
1205typedef struct PGMROMPAGE
1206{
1207 /** The page structure for the virgin ROM page. */
1208 PGMPAGE Virgin;
1209 /** The page structure for the shadow RAM page. */
1210 PGMPAGE Shadow;
1211 /** The current protection setting. */
1212 PGMROMPROT enmProt;
1213 /** Live save status information. Makes use of unused alignment space. */
1214 struct
1215 {
1216 /** The previous protection value. */
1217 uint8_t u8Prot;
1218 /** Written to flag set by the handler. */
1219 bool fWrittenTo;
1220 /** Whether the shadow page is dirty or not. */
1221 bool fDirty;
1222 /** Whether it was dirtied in the recently. */
1223 bool fDirtiedRecently;
1224 } LiveSave;
1225} PGMROMPAGE;
1226AssertCompileSizeAlignment(PGMROMPAGE, 8);
1227/** Pointer to a ROM page tracking structure. */
1228typedef PGMROMPAGE *PPGMROMPAGE;
1229
1230
1231/**
1232 * A registered ROM image.
1233 *
1234 * This is needed to keep track of ROM image since they generally intrude
1235 * into a PGMRAMRANGE. It also keeps track of additional info like the
1236 * two page sets (read-only virgin and read-write shadow), the current
1237 * state of each page.
1238 *
1239 * Because access handlers cannot easily be executed in a different
1240 * context, the ROM ranges needs to be accessible and in all contexts.
1241 */
1242typedef struct PGMROMRANGE
1243{
1244 /** Pointer to the next range - R3. */
1245 R3PTRTYPE(struct PGMROMRANGE *) pNextR3;
1246 /** Pointer to the next range - R0. */
1247 R0PTRTYPE(struct PGMROMRANGE *) pNextR0;
1248 /** Pointer to the next range - RC. */
1249 RCPTRTYPE(struct PGMROMRANGE *) pNextRC;
1250 /** Pointer alignment */
1251 RTRCPTR RCPtrAlignment;
1252 /** Address of the range. */
1253 RTGCPHYS GCPhys;
1254 /** Address of the last byte in the range. */
1255 RTGCPHYS GCPhysLast;
1256 /** Size of the range. */
1257 RTGCPHYS cb;
1258 /** The flags (PGMPHYS_ROM_FLAGS_*). */
1259 uint32_t fFlags;
1260 /** The saved state range ID. */
1261 uint8_t idSavedState;
1262 /** Alignment padding. */
1263 uint8_t au8Alignment[3];
1264 /** Alignment padding ensuring that aPages is sizeof(PGMROMPAGE) aligned. */
1265 uint32_t au32Alignemnt[HC_ARCH_BITS == 32 ? 6 : 2];
1266 /** Pointer to the original bits when PGMPHYS_ROM_FLAGS_PERMANENT_BINARY was specified.
1267 * This is used for strictness checks. */
1268 R3PTRTYPE(const void *) pvOriginal;
1269 /** The ROM description. */
1270 R3PTRTYPE(const char *) pszDesc;
1271 /** The per page tracking structures. */
1272 PGMROMPAGE aPages[1];
1273} PGMROMRANGE;
1274/** Pointer to a ROM range. */
1275typedef PGMROMRANGE *PPGMROMRANGE;
1276
1277
1278/**
1279 * Live save per page data for an MMIO2 page.
1280 *
1281 * Not using PGMLIVESAVERAMPAGE here because we cannot use normal write monitoring
1282 * of MMIO2 pages. The current approach is using some optimisitic SHA-1 +
1283 * CRC-32 for detecting changes as well as special handling of zero pages. This
1284 * is a TEMPORARY measure which isn't perfect, but hopefully it is good enough
1285 * for speeding things up. (We're using SHA-1 and not SHA-256 or SHA-512
1286 * because of speed (2.5x and 6x slower).)
1287 *
1288 * @todo Implement dirty MMIO2 page reporting that can be enabled during live
1289 * save but normally is disabled. Since we can write monitore guest
1290 * accesses on our own, we only need this for host accesses. Shouldn't be
1291 * too difficult for DevVGA, VMMDev might be doable, the planned
1292 * networking fun will be fun since it involves ring-0.
1293 */
1294typedef struct PGMLIVESAVEMMIO2PAGE
1295{
1296 /** Set if the page is considered dirty. */
1297 bool fDirty;
1298 /** The number of scans this page has remained unchanged for.
1299 * Only updated for dirty pages. */
1300 uint8_t cUnchangedScans;
1301 /** Whether this page was zero at the last scan. */
1302 bool fZero;
1303 /** Alignment padding. */
1304 bool fReserved;
1305 /** CRC-32 for the first half of the page.
1306 * This is used together with u32CrcH2 to quickly detect changes in the page
1307 * during the non-final passes. */
1308 uint32_t u32CrcH1;
1309 /** CRC-32 for the second half of the page. */
1310 uint32_t u32CrcH2;
1311 /** SHA-1 for the saved page.
1312 * This is used in the final pass to skip pages without changes. */
1313 uint8_t abSha1Saved[RTSHA1_HASH_SIZE];
1314} PGMLIVESAVEMMIO2PAGE;
1315/** Pointer to a live save status data for an MMIO2 page. */
1316typedef PGMLIVESAVEMMIO2PAGE *PPGMLIVESAVEMMIO2PAGE;
1317
1318/**
1319 * A registered MMIO2 (= Device RAM) range.
1320 *
1321 * There are a few reason why we need to keep track of these
1322 * registrations. One of them is the deregistration & cleanup stuff,
1323 * while another is that the PGMRAMRANGE associated with such a region may
1324 * have to be removed from the ram range list.
1325 *
1326 * Overlapping with a RAM range has to be 100% or none at all. The pages
1327 * in the existing RAM range must not be ROM nor MMIO. A guru meditation
1328 * will be raised if a partial overlap or an overlap of ROM pages is
1329 * encountered. On an overlap we will free all the existing RAM pages and
1330 * put in the ram range pages instead.
1331 */
1332typedef struct PGMMMIO2RANGE
1333{
1334 /** The owner of the range. (a device) */
1335 PPDMDEVINSR3 pDevInsR3;
1336 /** Pointer to the ring-3 mapping of the allocation. */
1337 RTR3PTR pvR3;
1338 /** Pointer to the next range - R3. */
1339 R3PTRTYPE(struct PGMMMIO2RANGE *) pNextR3;
1340 /** Whether it's mapped or not. */
1341 bool fMapped;
1342 /** Whether it's overlapping or not. */
1343 bool fOverlapping;
1344 /** The PCI region number.
1345 * @remarks This ASSUMES that nobody will ever really need to have multiple
1346 * PCI devices with matching MMIO region numbers on a single device. */
1347 uint8_t iRegion;
1348 /** The saved state range ID. */
1349 uint8_t idSavedState;
1350 /** Alignment padding for putting the ram range on a PGMPAGE alignment boundrary. */
1351 uint8_t abAlignemnt[HC_ARCH_BITS == 32 ? 12 : 12];
1352 /** Live save per page tracking data. */
1353 R3PTRTYPE(PPGMLIVESAVEMMIO2PAGE) paLSPages;
1354 /** The associated RAM range. */
1355 PGMRAMRANGE RamRange;
1356} PGMMMIO2RANGE;
1357/** Pointer to a MMIO2 range. */
1358typedef PGMMMIO2RANGE *PPGMMMIO2RANGE;
1359
1360
1361
1362
1363/**
1364 * PGMPhysRead/Write cache entry
1365 */
1366typedef struct PGMPHYSCACHEENTRY
1367{
1368 /** R3 pointer to physical page. */
1369 R3PTRTYPE(uint8_t *) pbR3;
1370 /** GC Physical address for cache entry */
1371 RTGCPHYS GCPhys;
1372#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1373 RTGCPHYS u32Padding0; /**< alignment padding. */
1374#endif
1375} PGMPHYSCACHEENTRY;
1376
1377/**
1378 * PGMPhysRead/Write cache to reduce REM memory access overhead
1379 */
1380typedef struct PGMPHYSCACHE
1381{
1382 /** Bitmap of valid cache entries */
1383 uint64_t aEntries;
1384 /** Cache entries */
1385 PGMPHYSCACHEENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES];
1386} PGMPHYSCACHE;
1387
1388
1389/** Pointer to an allocation chunk ring-3 mapping. */
1390typedef struct PGMCHUNKR3MAP *PPGMCHUNKR3MAP;
1391/** Pointer to an allocation chunk ring-3 mapping pointer. */
1392typedef PPGMCHUNKR3MAP *PPPGMCHUNKR3MAP;
1393
1394/**
1395 * Ring-3 tracking structore for an allocation chunk ring-3 mapping.
1396 *
1397 * The primary tree (Core) uses the chunk id as key.
1398 * The secondary tree (AgeCore) is used for ageing and uses ageing sequence number as key.
1399 */
1400typedef struct PGMCHUNKR3MAP
1401{
1402 /** The key is the chunk id. */
1403 AVLU32NODECORE Core;
1404 /** The key is the ageing sequence number. */
1405 AVLLU32NODECORE AgeCore;
1406 /** The current age thingy. */
1407 uint32_t iAge;
1408 /** The current reference count. */
1409 uint32_t volatile cRefs;
1410 /** The current permanent reference count. */
1411 uint32_t volatile cPermRefs;
1412 /** The mapping address. */
1413 void *pv;
1414} PGMCHUNKR3MAP;
1415
1416/**
1417 * Allocation chunk ring-3 mapping TLB entry.
1418 */
1419typedef struct PGMCHUNKR3MAPTLBE
1420{
1421 /** The chunk id. */
1422 uint32_t volatile idChunk;
1423#if HC_ARCH_BITS == 64
1424 uint32_t u32Padding; /**< alignment padding. */
1425#endif
1426 /** The chunk map. */
1427#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1428 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1429#else
1430 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1431#endif
1432} PGMCHUNKR3MAPTLBE;
1433/** Pointer to the an allocation chunk ring-3 mapping TLB entry. */
1434typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE;
1435
1436/** The number of TLB entries in PGMCHUNKR3MAPTLB.
1437 * @remark Must be a power of two value. */
1438#define PGM_CHUNKR3MAPTLB_ENTRIES 64
1439
1440/**
1441 * Allocation chunk ring-3 mapping TLB.
1442 *
1443 * @remarks We use a TLB to speed up lookups by avoiding walking the AVL.
1444 * At first glance this might look kinda odd since AVL trees are
1445 * supposed to give the most optimial lookup times of all trees
1446 * due to their balancing. However, take a tree with 1023 nodes
1447 * in it, that's 10 levels, meaning that most searches has to go
1448 * down 9 levels before they find what they want. This isn't fast
1449 * compared to a TLB hit. There is the factor of cache misses,
1450 * and of course the problem with trees and branch prediction.
1451 * This is why we use TLBs in front of most of the trees.
1452 *
1453 * @todo Generalize this TLB + AVL stuff, shouldn't be all that
1454 * difficult when we switch to the new inlined AVL trees (from kStuff).
1455 */
1456typedef struct PGMCHUNKR3MAPTLB
1457{
1458 /** The TLB entries. */
1459 PGMCHUNKR3MAPTLBE aEntries[PGM_CHUNKR3MAPTLB_ENTRIES];
1460} PGMCHUNKR3MAPTLB;
1461
1462/**
1463 * Calculates the index of a guest page in the Ring-3 Chunk TLB.
1464 * @returns Chunk TLB index.
1465 * @param idChunk The Chunk ID.
1466 */
1467#define PGM_CHUNKR3MAPTLB_IDX(idChunk) ( (idChunk) & (PGM_CHUNKR3MAPTLB_ENTRIES - 1) )
1468
1469
1470/**
1471 * Ring-3 guest page mapping TLB entry.
1472 * @remarks used in ring-0 as well at the moment.
1473 */
1474typedef struct PGMPAGER3MAPTLBE
1475{
1476 /** Address of the page. */
1477 RTGCPHYS volatile GCPhys;
1478 /** The guest page. */
1479#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1480 R3PTRTYPE(PPGMPAGE) volatile pPage;
1481#else
1482 R3R0PTRTYPE(PPGMPAGE) volatile pPage;
1483#endif
1484 /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */
1485#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1486 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1487#else
1488 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1489#endif
1490 /** The address */
1491#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1492 R3PTRTYPE(void *) volatile pv;
1493#else
1494 R3R0PTRTYPE(void *) volatile pv;
1495#endif
1496#if HC_ARCH_BITS == 32
1497 uint32_t u32Padding; /**< alignment padding. */
1498#endif
1499} PGMPAGER3MAPTLBE;
1500/** Pointer to an entry in the HC physical TLB. */
1501typedef PGMPAGER3MAPTLBE *PPGMPAGER3MAPTLBE;
1502
1503
1504/** The number of entries in the ring-3 guest page mapping TLB.
1505 * @remarks The value must be a power of two. */
1506#define PGM_PAGER3MAPTLB_ENTRIES 256
1507
1508/**
1509 * Ring-3 guest page mapping TLB.
1510 * @remarks used in ring-0 as well at the moment.
1511 */
1512typedef struct PGMPAGER3MAPTLB
1513{
1514 /** The TLB entries. */
1515 PGMPAGER3MAPTLBE aEntries[PGM_PAGER3MAPTLB_ENTRIES];
1516} PGMPAGER3MAPTLB;
1517/** Pointer to the ring-3 guest page mapping TLB. */
1518typedef PGMPAGER3MAPTLB *PPGMPAGER3MAPTLB;
1519
1520/**
1521 * Calculates the index of the TLB entry for the specified guest page.
1522 * @returns Physical TLB index.
1523 * @param GCPhys The guest physical address.
1524 */
1525#define PGM_PAGER3MAPTLB_IDX(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER3MAPTLB_ENTRIES - 1) )
1526
1527
1528/**
1529 * Mapping cache usage set entry.
1530 *
1531 * @remarks 16-bit ints was choosen as the set is not expected to be used beyond
1532 * the dynamic ring-0 and (to some extent) raw-mode context mapping
1533 * cache. If it's extended to include ring-3, well, then something will
1534 * have be changed here...
1535 */
1536typedef struct PGMMAPSETENTRY
1537{
1538 /** The mapping cache index. */
1539 uint16_t iPage;
1540 /** The number of references.
1541 * The max is UINT16_MAX - 1. */
1542 uint16_t cRefs;
1543#if HC_ARCH_BITS == 64
1544 uint32_t alignment;
1545#endif
1546 /** Pointer to the page. */
1547 RTR0PTR pvPage;
1548 /** The physical address for this entry. */
1549 RTHCPHYS HCPhys;
1550} PGMMAPSETENTRY;
1551/** Pointer to a mapping cache usage set entry. */
1552typedef PGMMAPSETENTRY *PPGMMAPSETENTRY;
1553
1554/**
1555 * Mapping cache usage set.
1556 *
1557 * This is used in ring-0 and the raw-mode context to track dynamic mappings
1558 * done during exits / traps. The set is
1559 */
1560typedef struct PGMMAPSET
1561{
1562 /** The number of occupied entries.
1563 * This is PGMMAPSET_CLOSED if the set is closed and we're not supposed to do
1564 * dynamic mappings. */
1565 uint32_t cEntries;
1566 /** The start of the current subset.
1567 * This is UINT32_MAX if no subset is currently open. */
1568 uint32_t iSubset;
1569 /** The index of the current CPU, only valid if the set is open. */
1570 int32_t iCpu;
1571 uint32_t alignment;
1572 /** The entries. */
1573 PGMMAPSETENTRY aEntries[64];
1574 /** HCPhys -> iEntry fast lookup table.
1575 * Use PGMMAPSET_HASH for hashing.
1576 * The entries may or may not be valid, check against cEntries. */
1577 uint8_t aiHashTable[128];
1578} PGMMAPSET;
1579AssertCompileSizeAlignment(PGMMAPSET, 8);
1580/** Pointer to the mapping cache set. */
1581typedef PGMMAPSET *PPGMMAPSET;
1582
1583/** PGMMAPSET::cEntries value for a closed set. */
1584#define PGMMAPSET_CLOSED UINT32_C(0xdeadc0fe)
1585
1586/** Hash function for aiHashTable. */
1587#define PGMMAPSET_HASH(HCPhys) (((HCPhys) >> PAGE_SHIFT) & 127)
1588
1589/** The max fill size (strict builds). */
1590#define PGMMAPSET_MAX_FILL (64U * 80U / 100U)
1591
1592
1593/** @name Context neutrual page mapper TLB.
1594 *
1595 * Hoping to avoid some code and bug duplication parts of the GCxxx->CCPtr
1596 * code is writting in a kind of context neutrual way. Time will show whether
1597 * this actually makes sense or not...
1598 *
1599 * @todo this needs to be reconsidered and dropped/redone since the ring-0
1600 * context ends up using a global mapping cache on some platforms
1601 * (darwin).
1602 *
1603 * @{ */
1604/** @typedef PPGMPAGEMAPTLB
1605 * The page mapper TLB pointer type for the current context. */
1606/** @typedef PPGMPAGEMAPTLB
1607 * The page mapper TLB entry pointer type for the current context. */
1608/** @typedef PPGMPAGEMAPTLB
1609 * The page mapper TLB entry pointer pointer type for the current context. */
1610/** @def PGM_PAGEMAPTLB_ENTRIES
1611 * The number of TLB entries in the page mapper TLB for the current context. */
1612/** @def PGM_PAGEMAPTLB_IDX
1613 * Calculate the TLB index for a guest physical address.
1614 * @returns The TLB index.
1615 * @param GCPhys The guest physical address. */
1616/** @typedef PPGMPAGEMAP
1617 * Pointer to a page mapper unit for current context. */
1618/** @typedef PPPGMPAGEMAP
1619 * Pointer to a page mapper unit pointer for current context. */
1620#ifdef IN_RC
1621// typedef PPGMPAGEGCMAPTLB PPGMPAGEMAPTLB;
1622// typedef PPGMPAGEGCMAPTLBE PPGMPAGEMAPTLBE;
1623// typedef PPGMPAGEGCMAPTLBE *PPPGMPAGEMAPTLBE;
1624# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGEGCMAPTLB_ENTRIES
1625# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGEGCMAPTLB_IDX(GCPhys)
1626 typedef void * PPGMPAGEMAP;
1627 typedef void ** PPPGMPAGEMAP;
1628//#elif IN_RING0
1629// typedef PPGMPAGER0MAPTLB PPGMPAGEMAPTLB;
1630// typedef PPGMPAGER0MAPTLBE PPGMPAGEMAPTLBE;
1631// typedef PPGMPAGER0MAPTLBE *PPPGMPAGEMAPTLBE;
1632//# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER0MAPTLB_ENTRIES
1633//# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER0MAPTLB_IDX(GCPhys)
1634// typedef PPGMCHUNKR0MAP PPGMPAGEMAP;
1635// typedef PPPGMCHUNKR0MAP PPPGMPAGEMAP;
1636#else
1637 typedef PPGMPAGER3MAPTLB PPGMPAGEMAPTLB;
1638 typedef PPGMPAGER3MAPTLBE PPGMPAGEMAPTLBE;
1639 typedef PPGMPAGER3MAPTLBE *PPPGMPAGEMAPTLBE;
1640# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER3MAPTLB_ENTRIES
1641# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER3MAPTLB_IDX(GCPhys)
1642 typedef PPGMCHUNKR3MAP PPGMPAGEMAP;
1643 typedef PPPGMCHUNKR3MAP PPPGMPAGEMAP;
1644#endif
1645/** @} */
1646
1647
1648/** @name PGM Pool Indexes.
1649 * Aka. the unique shadow page identifier.
1650 * @{ */
1651/** NIL page pool IDX. */
1652#define NIL_PGMPOOL_IDX 0
1653/** The first normal index. */
1654#define PGMPOOL_IDX_FIRST_SPECIAL 1
1655/** Page directory (32-bit root). */
1656#define PGMPOOL_IDX_PD 1
1657/** Page Directory Pointer Table (PAE root). */
1658#define PGMPOOL_IDX_PDPT 2
1659/** AMD64 CR3 level index.*/
1660#define PGMPOOL_IDX_AMD64_CR3 3
1661/** Nested paging root.*/
1662#define PGMPOOL_IDX_NESTED_ROOT 4
1663/** The first normal index. */
1664#define PGMPOOL_IDX_FIRST 5
1665/** The last valid index. (inclusive, 14 bits) */
1666#define PGMPOOL_IDX_LAST 0x3fff
1667/** @} */
1668
1669/** The NIL index for the parent chain. */
1670#define NIL_PGMPOOL_USER_INDEX ((uint16_t)0xffff)
1671#define NIL_PGMPOOL_PRESENT_INDEX ((uint16_t)0xffff)
1672
1673/**
1674 * Node in the chain linking a shadowed page to it's parent (user).
1675 */
1676#pragma pack(1)
1677typedef struct PGMPOOLUSER
1678{
1679 /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
1680 uint16_t iNext;
1681 /** The user page index. */
1682 uint16_t iUser;
1683 /** Index into the user table. */
1684 uint32_t iUserTable;
1685} PGMPOOLUSER, *PPGMPOOLUSER;
1686typedef const PGMPOOLUSER *PCPGMPOOLUSER;
1687#pragma pack()
1688
1689
1690/** The NIL index for the phys ext chain. */
1691#define NIL_PGMPOOL_PHYSEXT_INDEX ((uint16_t)0xffff)
1692
1693/**
1694 * Node in the chain of physical cross reference extents.
1695 * @todo Calling this an 'extent' is not quite right, find a better name.
1696 */
1697#pragma pack(1)
1698typedef struct PGMPOOLPHYSEXT
1699{
1700 /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
1701 uint16_t iNext;
1702 /** The user page index. */
1703 uint16_t aidx[3];
1704} PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
1705typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
1706#pragma pack()
1707
1708
1709/**
1710 * The kind of page that's being shadowed.
1711 */
1712typedef enum PGMPOOLKIND
1713{
1714 /** The virtual invalid 0 entry. */
1715 PGMPOOLKIND_INVALID = 0,
1716 /** The entry is free (=unused). */
1717 PGMPOOLKIND_FREE,
1718
1719 /** Shw: 32-bit page table; Gst: no paging */
1720 PGMPOOLKIND_32BIT_PT_FOR_PHYS,
1721 /** Shw: 32-bit page table; Gst: 32-bit page table. */
1722 PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
1723 /** Shw: 32-bit page table; Gst: 4MB page. */
1724 PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
1725 /** Shw: PAE page table; Gst: no paging */
1726 PGMPOOLKIND_PAE_PT_FOR_PHYS,
1727 /** Shw: PAE page table; Gst: 32-bit page table. */
1728 PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
1729 /** Shw: PAE page table; Gst: Half of a 4MB page. */
1730 PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
1731 /** Shw: PAE page table; Gst: PAE page table. */
1732 PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
1733 /** Shw: PAE page table; Gst: 2MB page. */
1734 PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
1735
1736 /** Shw: 32-bit page directory. Gst: 32-bit page directory. */
1737 PGMPOOLKIND_32BIT_PD,
1738 /** Shw: 32-bit page directory. Gst: no paging. */
1739 PGMPOOLKIND_32BIT_PD_PHYS,
1740 /** Shw: PAE page directory 0; Gst: 32-bit page directory. */
1741 PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD,
1742 /** Shw: PAE page directory 1; Gst: 32-bit page directory. */
1743 PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD,
1744 /** Shw: PAE page directory 2; Gst: 32-bit page directory. */
1745 PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD,
1746 /** Shw: PAE page directory 3; Gst: 32-bit page directory. */
1747 PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
1748 /** Shw: PAE page directory; Gst: PAE page directory. */
1749 PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
1750 /** Shw: PAE page directory; Gst: no paging. */
1751 PGMPOOLKIND_PAE_PD_PHYS,
1752
1753 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst 32 bits paging. */
1754 PGMPOOLKIND_PAE_PDPT_FOR_32BIT,
1755 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst PAE PDPT. */
1756 PGMPOOLKIND_PAE_PDPT,
1757 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst: no paging. */
1758 PGMPOOLKIND_PAE_PDPT_PHYS,
1759
1760 /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */
1761 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT,
1762 /** Shw: 64-bit page directory pointer table; Gst: no paging */
1763 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS,
1764 /** Shw: 64-bit page directory table; Gst: 64-bit page directory table. */
1765 PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD,
1766 /** Shw: 64-bit page directory table; Gst: no paging */
1767 PGMPOOLKIND_64BIT_PD_FOR_PHYS, /* 22 */
1768
1769 /** Shw: 64-bit PML4; Gst: 64-bit PML4. */
1770 PGMPOOLKIND_64BIT_PML4,
1771
1772 /** Shw: EPT page directory pointer table; Gst: no paging */
1773 PGMPOOLKIND_EPT_PDPT_FOR_PHYS,
1774 /** Shw: EPT page directory table; Gst: no paging */
1775 PGMPOOLKIND_EPT_PD_FOR_PHYS,
1776 /** Shw: EPT page table; Gst: no paging */
1777 PGMPOOLKIND_EPT_PT_FOR_PHYS,
1778
1779 /** Shw: Root Nested paging table. */
1780 PGMPOOLKIND_ROOT_NESTED,
1781
1782 /** The last valid entry. */
1783 PGMPOOLKIND_LAST = PGMPOOLKIND_ROOT_NESTED
1784} PGMPOOLKIND;
1785
1786/**
1787 * The access attributes of the page; only applies to big pages.
1788 */
1789typedef enum
1790{
1791 PGMPOOLACCESS_DONTCARE = 0,
1792 PGMPOOLACCESS_USER_RW,
1793 PGMPOOLACCESS_USER_R,
1794 PGMPOOLACCESS_USER_RW_NX,
1795 PGMPOOLACCESS_USER_R_NX,
1796 PGMPOOLACCESS_SUPERVISOR_RW,
1797 PGMPOOLACCESS_SUPERVISOR_R,
1798 PGMPOOLACCESS_SUPERVISOR_RW_NX,
1799 PGMPOOLACCESS_SUPERVISOR_R_NX
1800} PGMPOOLACCESS;
1801
1802/**
1803 * The tracking data for a page in the pool.
1804 */
1805typedef struct PGMPOOLPAGE
1806{
1807 /** AVL node code with the (R3) physical address of this page. */
1808 AVLOHCPHYSNODECORE Core;
1809 /** Pointer to the R3 mapping of the page. */
1810#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1811 R3PTRTYPE(void *) pvPageR3;
1812#else
1813 R3R0PTRTYPE(void *) pvPageR3;
1814#endif
1815 /** The guest physical address. */
1816#if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64
1817 uint32_t Alignment0;
1818#endif
1819 RTGCPHYS GCPhys;
1820
1821 /** Access handler statistics to determine whether the guest is (re)initializing a page table. */
1822 RTGCPTR pvLastAccessHandlerRip;
1823 RTGCPTR pvLastAccessHandlerFault;
1824 uint64_t cLastAccessHandlerCount;
1825
1826 /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
1827 uint8_t enmKind;
1828 /** The subkind of page we're shadowing. (This is really a PGMPOOLACCESS enum.) */
1829 uint8_t enmAccess;
1830 /** The index of this page. */
1831 uint16_t idx;
1832 /** The next entry in the list this page currently resides in.
1833 * It's either in the free list or in the GCPhys hash. */
1834 uint16_t iNext;
1835 /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
1836 uint16_t iUserHead;
1837 /** The number of present entries. */
1838 uint16_t cPresent;
1839 /** The first entry in the table which is present. */
1840 uint16_t iFirstPresent;
1841 /** The number of modifications to the monitored page. */
1842 uint16_t cModifications;
1843 /** The next modified page. NIL_PGMPOOL_IDX if tail. */
1844 uint16_t iModifiedNext;
1845 /** The previous modified page. NIL_PGMPOOL_IDX if head. */
1846 uint16_t iModifiedPrev;
1847 /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
1848 uint16_t iMonitoredNext;
1849 /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
1850 uint16_t iMonitoredPrev;
1851 /** The next page in the age list. */
1852 uint16_t iAgeNext;
1853 /** The previous page in the age list. */
1854 uint16_t iAgePrev;
1855 /** Used to indicate that the page is zeroed. */
1856 bool fZeroed;
1857 /** Used to indicate that a PT has non-global entries. */
1858 bool fSeenNonGlobal;
1859 /** Used to indicate that we're monitoring writes to the guest page. */
1860 bool fMonitored;
1861 /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
1862 * (All pages are in the age list.) */
1863 bool fCached;
1864 /** This is used by the R3 access handlers when invoked by an async thread.
1865 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
1866 bool volatile fReusedFlushPending;
1867 /** Used to mark the page as dirty (write monitoring if temporarily off. */
1868 bool fDirty;
1869
1870 /** Used to indicate that this page can't be flushed. Important for cr3 root pages or shadow pae pd pages). */
1871 uint32_t cLocked;
1872 uint32_t idxDirty;
1873 RTGCPTR pvDirtyFault;
1874} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
1875/** Pointer to a const pool page. */
1876typedef PGMPOOLPAGE const *PCPGMPOOLPAGE;
1877
1878
1879/** The hash table size. */
1880# define PGMPOOL_HASH_SIZE 0x40
1881/** The hash function. */
1882# define PGMPOOL_HASH(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
1883
1884
1885/**
1886 * The shadow page pool instance data.
1887 *
1888 * It's all one big allocation made at init time, except for the
1889 * pages that is. The user nodes follows immediatly after the
1890 * page structures.
1891 */
1892typedef struct PGMPOOL
1893{
1894 /** The VM handle - R3 Ptr. */
1895 PVMR3 pVMR3;
1896 /** The VM handle - R0 Ptr. */
1897 PVMR0 pVMR0;
1898 /** The VM handle - RC Ptr. */
1899 PVMRC pVMRC;
1900 /** The max pool size. This includes the special IDs. */
1901 uint16_t cMaxPages;
1902 /** The current pool size. */
1903 uint16_t cCurPages;
1904 /** The head of the free page list. */
1905 uint16_t iFreeHead;
1906 /* Padding. */
1907 uint16_t u16Padding;
1908 /** Head of the chain of free user nodes. */
1909 uint16_t iUserFreeHead;
1910 /** The number of user nodes we've allocated. */
1911 uint16_t cMaxUsers;
1912 /** The number of present page table entries in the entire pool. */
1913 uint32_t cPresent;
1914 /** Pointer to the array of user nodes - RC pointer. */
1915 RCPTRTYPE(PPGMPOOLUSER) paUsersRC;
1916 /** Pointer to the array of user nodes - R3 pointer. */
1917 R3PTRTYPE(PPGMPOOLUSER) paUsersR3;
1918 /** Pointer to the array of user nodes - R0 pointer. */
1919 R0PTRTYPE(PPGMPOOLUSER) paUsersR0;
1920 /** Head of the chain of free phys ext nodes. */
1921 uint16_t iPhysExtFreeHead;
1922 /** The number of user nodes we've allocated. */
1923 uint16_t cMaxPhysExts;
1924 /** Pointer to the array of physical xref extent - RC pointer. */
1925 RCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsRC;
1926 /** Pointer to the array of physical xref extent nodes - R3 pointer. */
1927 R3PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR3;
1928 /** Pointer to the array of physical xref extent nodes - R0 pointer. */
1929 R0PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR0;
1930 /** Hash table for GCPhys addresses. */
1931 uint16_t aiHash[PGMPOOL_HASH_SIZE];
1932 /** The head of the age list. */
1933 uint16_t iAgeHead;
1934 /** The tail of the age list. */
1935 uint16_t iAgeTail;
1936 /** Set if the cache is enabled. */
1937 bool fCacheEnabled;
1938 /** Alignment padding. */
1939 bool afPadding1[3];
1940 /** Head of the list of modified pages. */
1941 uint16_t iModifiedHead;
1942 /** The current number of modified pages. */
1943 uint16_t cModifiedPages;
1944 /** Access handler, RC. */
1945 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnAccessHandlerRC;
1946 /** Access handler, R0. */
1947 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnAccessHandlerR0;
1948 /** Access handler, R3. */
1949 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnAccessHandlerR3;
1950 /** The access handler description (R3 ptr). */
1951 R3PTRTYPE(const char *) pszAccessHandler;
1952# if HC_ARCH_BITS == 32
1953 /** Alignment padding. */
1954 uint32_t u32Padding2;
1955# endif
1956 /* Next available slot. */
1957 uint32_t idxFreeDirtyPage;
1958 /* Number of active dirty pages. */
1959 uint32_t cDirtyPages;
1960 /* Array of current dirty pgm pool page indices. */
1961 uint16_t aIdxDirtyPages[16];
1962 uint64_t aDirtyPages[16][512];
1963 /** The number of pages currently in use. */
1964 uint16_t cUsedPages;
1965#ifdef VBOX_WITH_STATISTICS
1966 /** The high water mark for cUsedPages. */
1967 uint16_t cUsedPagesHigh;
1968 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
1969 /** Profiling pgmPoolAlloc(). */
1970 STAMPROFILEADV StatAlloc;
1971 /** Profiling pgmR3PoolClearDoIt(). */
1972 STAMPROFILE StatClearAll;
1973 /** Profiling pgmR3PoolReset(). */
1974 STAMPROFILE StatR3Reset;
1975 /** Profiling pgmPoolFlushPage(). */
1976 STAMPROFILE StatFlushPage;
1977 /** Profiling pgmPoolFree(). */
1978 STAMPROFILE StatFree;
1979 /** Counting explicit flushes by PGMPoolFlushPage(). */
1980 STAMCOUNTER StatForceFlushPage;
1981 /** Counting explicit flushes of dirty pages by PGMPoolFlushPage(). */
1982 STAMCOUNTER StatForceFlushDirtyPage;
1983 /** Counting flushes for reused pages. */
1984 STAMCOUNTER StatForceFlushReused;
1985 /** Profiling time spent zeroing pages. */
1986 STAMPROFILE StatZeroPage;
1987 /** Profiling of pgmPoolTrackDeref. */
1988 STAMPROFILE StatTrackDeref;
1989 /** Profiling pgmTrackFlushGCPhysPT. */
1990 STAMPROFILE StatTrackFlushGCPhysPT;
1991 /** Profiling pgmTrackFlushGCPhysPTs. */
1992 STAMPROFILE StatTrackFlushGCPhysPTs;
1993 /** Profiling pgmTrackFlushGCPhysPTsSlow. */
1994 STAMPROFILE StatTrackFlushGCPhysPTsSlow;
1995 /** Number of times we've been out of user records. */
1996 STAMCOUNTER StatTrackFreeUpOneUser;
1997 /** Nr of flushed entries. */
1998 STAMCOUNTER StatTrackFlushEntry;
1999 /** Nr of updated entries. */
2000 STAMCOUNTER StatTrackFlushEntryKeep;
2001 /** Profiling deref activity related tracking GC physical pages. */
2002 STAMPROFILE StatTrackDerefGCPhys;
2003 /** Number of linear searches for a HCPhys in the ram ranges. */
2004 STAMCOUNTER StatTrackLinearRamSearches;
2005 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
2006 STAMCOUNTER StamTrackPhysExtAllocFailures;
2007 /** Profiling the RC/R0 access handler. */
2008 STAMPROFILE StatMonitorRZ;
2009 /** Times we've failed interpreting the instruction. */
2010 STAMCOUNTER StatMonitorRZEmulateInstr;
2011 /** Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler. */
2012 STAMPROFILE StatMonitorRZFlushPage;
2013 /* Times we've detected a page table reinit. */
2014 STAMCOUNTER StatMonitorRZFlushReinit;
2015 /** Counting flushes for pages that are modified too often. */
2016 STAMCOUNTER StatMonitorRZFlushModOverflow;
2017 /** Times we've detected fork(). */
2018 STAMCOUNTER StatMonitorRZFork;
2019 /** Profiling the RC/R0 access we've handled (except REP STOSD). */
2020 STAMPROFILE StatMonitorRZHandled;
2021 /** Times we've failed interpreting a patch code instruction. */
2022 STAMCOUNTER StatMonitorRZIntrFailPatch1;
2023 /** Times we've failed interpreting a patch code instruction during flushing. */
2024 STAMCOUNTER StatMonitorRZIntrFailPatch2;
2025 /** The number of times we've seen rep prefixes we can't handle. */
2026 STAMCOUNTER StatMonitorRZRepPrefix;
2027 /** Profiling the REP STOSD cases we've handled. */
2028 STAMPROFILE StatMonitorRZRepStosd;
2029 /** Nr of handled PT faults. */
2030 STAMCOUNTER StatMonitorRZFaultPT;
2031 /** Nr of handled PD faults. */
2032 STAMCOUNTER StatMonitorRZFaultPD;
2033 /** Nr of handled PDPT faults. */
2034 STAMCOUNTER StatMonitorRZFaultPDPT;
2035 /** Nr of handled PML4 faults. */
2036 STAMCOUNTER StatMonitorRZFaultPML4;
2037
2038 /** Profiling the R3 access handler. */
2039 STAMPROFILE StatMonitorR3;
2040 /** Times we've failed interpreting the instruction. */
2041 STAMCOUNTER StatMonitorR3EmulateInstr;
2042 /** Profiling the pgmPoolFlushPage calls made from the R3 access handler. */
2043 STAMPROFILE StatMonitorR3FlushPage;
2044 /* Times we've detected a page table reinit. */
2045 STAMCOUNTER StatMonitorR3FlushReinit;
2046 /** Counting flushes for pages that are modified too often. */
2047 STAMCOUNTER StatMonitorR3FlushModOverflow;
2048 /** Times we've detected fork(). */
2049 STAMCOUNTER StatMonitorR3Fork;
2050 /** Profiling the R3 access we've handled (except REP STOSD). */
2051 STAMPROFILE StatMonitorR3Handled;
2052 /** The number of times we've seen rep prefixes we can't handle. */
2053 STAMCOUNTER StatMonitorR3RepPrefix;
2054 /** Profiling the REP STOSD cases we've handled. */
2055 STAMPROFILE StatMonitorR3RepStosd;
2056 /** Nr of handled PT faults. */
2057 STAMCOUNTER StatMonitorR3FaultPT;
2058 /** Nr of handled PD faults. */
2059 STAMCOUNTER StatMonitorR3FaultPD;
2060 /** Nr of handled PDPT faults. */
2061 STAMCOUNTER StatMonitorR3FaultPDPT;
2062 /** Nr of handled PML4 faults. */
2063 STAMCOUNTER StatMonitorR3FaultPML4;
2064 /** The number of times we're called in an async thread an need to flush. */
2065 STAMCOUNTER StatMonitorR3Async;
2066 /** Times we've called pgmPoolResetDirtyPages (and there were dirty page). */
2067 STAMCOUNTER StatResetDirtyPages;
2068 /** Times we've called pgmPoolAddDirtyPage. */
2069 STAMCOUNTER StatDirtyPage;
2070 /** Times we've had to flush duplicates for dirty page management. */
2071 STAMCOUNTER StatDirtyPageDupFlush;
2072 /** Times we've had to flush because of overflow. */
2073 STAMCOUNTER StatDirtyPageOverFlowFlush;
2074
2075 /** The high wather mark for cModifiedPages. */
2076 uint16_t cModifiedPagesHigh;
2077 uint16_t Alignment2[3]; /**< Align the next member on a 64-bit boundrary. */
2078
2079 /** The number of cache hits. */
2080 STAMCOUNTER StatCacheHits;
2081 /** The number of cache misses. */
2082 STAMCOUNTER StatCacheMisses;
2083 /** The number of times we've got a conflict of 'kind' in the cache. */
2084 STAMCOUNTER StatCacheKindMismatches;
2085 /** Number of times we've been out of pages. */
2086 STAMCOUNTER StatCacheFreeUpOne;
2087 /** The number of cacheable allocations. */
2088 STAMCOUNTER StatCacheCacheable;
2089 /** The number of uncacheable allocations. */
2090 STAMCOUNTER StatCacheUncacheable;
2091#else
2092 uint32_t Alignment3; /**< Align the next member on a 64-bit boundrary. */
2093#endif
2094 /** The AVL tree for looking up a page by its HC physical address. */
2095 AVLOHCPHYSTREE HCPhysTree;
2096 uint32_t Alignment4; /**< Align the next member on a 64-bit boundrary. */
2097 /** Array of pages. (cMaxPages in length)
2098 * The Id is the index into thist array.
2099 */
2100 PGMPOOLPAGE aPages[PGMPOOL_IDX_FIRST];
2101} PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
2102AssertCompileMemberAlignment(PGMPOOL, iModifiedHead, 8);
2103AssertCompileMemberAlignment(PGMPOOL, aDirtyPages, 8);
2104AssertCompileMemberAlignment(PGMPOOL, cUsedPages, 8);
2105#ifdef VBOX_WITH_STATISTICS
2106AssertCompileMemberAlignment(PGMPOOL, StatAlloc, 8);
2107#endif
2108AssertCompileMemberAlignment(PGMPOOL, aPages, 8);
2109
2110
2111/** @def PGMPOOL_PAGE_2_PTR
2112 * Maps a pool page pool into the current context.
2113 *
2114 * @returns VBox status code.
2115 * @param pVM The VM handle.
2116 * @param pPage The pool page.
2117 *
2118 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
2119 * small page window employeed by that function. Be careful.
2120 * @remark There is no need to assert on the result.
2121 */
2122#if defined(IN_RC)
2123# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
2124#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2125# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
2126#elif defined(VBOX_STRICT)
2127# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageStrict(pPage)
2128DECLINLINE(void *) pgmPoolMapPageStrict(PPGMPOOLPAGE pPage)
2129{
2130 Assert(pPage && pPage->pvPageR3);
2131 return pPage->pvPageR3;
2132}
2133#else
2134# define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageR3)
2135#endif
2136
2137/** @def PGMPOOL_PAGE_2_PTR_BY_PGM
2138 * Maps a pool page pool into the current context.
2139 *
2140 * @returns VBox status code.
2141 * @param pPGM Pointer to the PGM instance data.
2142 * @param pPage The pool page.
2143 *
2144 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
2145 * small page window employeed by that function. Be careful.
2146 * @remark There is no need to assert on the result.
2147 */
2148#if defined(IN_RC)
2149# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined(pPGM, (pPage))
2150#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2151# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined(pPGM, (pPage))
2152#else
2153# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) PGMPOOL_PAGE_2_PTR(PGM2VM(pPGM), pPage)
2154#endif
2155
2156/** @def PGMPOOL_PAGE_2_PTR_BY_PGMCPU
2157 * Maps a pool page pool into the current context.
2158 *
2159 * @returns VBox status code.
2160 * @param pPGM Pointer to the PGMCPU instance data.
2161 * @param pPage The pool page.
2162 *
2163 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
2164 * small page window employeed by that function. Be careful.
2165 * @remark There is no need to assert on the result.
2166 */
2167#if defined(IN_RC)
2168# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) pgmPoolMapPageInlined(PGMCPU2PGM(pPGM), (pPage))
2169#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2170# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) pgmPoolMapPageInlined(PGMCPU2PGM(pPGM), (pPage))
2171#else
2172# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) PGMPOOL_PAGE_2_PTR(PGMCPU2VM(pPGM), pPage)
2173#endif
2174
2175
2176/** @name Per guest page tracking data.
2177 * This is currently as a 16-bit word in the PGMPAGE structure, the idea though
2178 * is to use more bits for it and split it up later on. But for now we'll play
2179 * safe and change as little as possible.
2180 *
2181 * The 16-bit word has two parts:
2182 *
2183 * The first 14-bit forms the @a idx field. It is either the index of a page in
2184 * the shadow page pool, or and index into the extent list.
2185 *
2186 * The 2 topmost bits makes up the @a cRefs field, which counts the number of
2187 * shadow page pool references to the page. If cRefs equals
2188 * PGMPOOL_CREFS_PHYSEXT, then the @a idx field is an indext into the extent
2189 * (misnomer) table and not the shadow page pool.
2190 *
2191 * See PGM_PAGE_GET_TRACKING and PGM_PAGE_SET_TRACKING for how to get and set
2192 * the 16-bit word.
2193 *
2194 * @{ */
2195/** The shift count for getting to the cRefs part. */
2196#define PGMPOOL_TD_CREFS_SHIFT 14
2197/** The mask applied after shifting the tracking data down by
2198 * PGMPOOL_TD_CREFS_SHIFT. */
2199#define PGMPOOL_TD_CREFS_MASK 0x3
2200/** The cRef value used to indiciate that the idx is the head of a
2201 * physical cross reference list. */
2202#define PGMPOOL_TD_CREFS_PHYSEXT PGMPOOL_TD_CREFS_MASK
2203/** The shift used to get idx. */
2204#define PGMPOOL_TD_IDX_SHIFT 0
2205/** The mask applied to the idx after shifting down by PGMPOOL_TD_IDX_SHIFT. */
2206#define PGMPOOL_TD_IDX_MASK 0x3fff
2207/** The idx value when we're out of of PGMPOOLPHYSEXT entries or/and there are
2208 * simply too many mappings of this page. */
2209#define PGMPOOL_TD_IDX_OVERFLOWED PGMPOOL_TD_IDX_MASK
2210
2211/** @def PGMPOOL_TD_MAKE
2212 * Makes a 16-bit tracking data word.
2213 *
2214 * @returns tracking data.
2215 * @param cRefs The @a cRefs field. Must be within bounds!
2216 * @param idx The @a idx field. Must also be within bounds! */
2217#define PGMPOOL_TD_MAKE(cRefs, idx) ( ((cRefs) << PGMPOOL_TD_CREFS_SHIFT) | (idx) )
2218
2219/** @def PGMPOOL_TD_GET_CREFS
2220 * Get the @a cRefs field from a tracking data word.
2221 *
2222 * @returns The @a cRefs field
2223 * @param u16 The tracking data word. */
2224#define PGMPOOL_TD_GET_CREFS(u16) ( ((u16) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK )
2225
2226/** @def PGMPOOL_TD_GET_IDX
2227 * Get the @a idx field from a tracking data word.
2228 *
2229 * @returns The @a idx field
2230 * @param u16 The tracking data word. */
2231#define PGMPOOL_TD_GET_IDX(u16) ( ((u16) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK )
2232/** @} */
2233
2234
2235/**
2236 * Trees are using self relative offsets as pointers.
2237 * So, all its data, including the root pointer, must be in the heap for HC and GC
2238 * to have the same layout.
2239 */
2240typedef struct PGMTREES
2241{
2242 /** Physical access handlers (AVL range+offsetptr tree). */
2243 AVLROGCPHYSTREE PhysHandlers;
2244 /** Virtual access handlers (AVL range + GC ptr tree). */
2245 AVLROGCPTRTREE VirtHandlers;
2246 /** Virtual access handlers (Phys range AVL range + offsetptr tree). */
2247 AVLROGCPHYSTREE PhysToVirtHandlers;
2248 /** Virtual access handlers for the hypervisor (AVL range + GC ptr tree). */
2249 AVLROGCPTRTREE HyperVirtHandlers;
2250} PGMTREES;
2251/** Pointer to PGM trees. */
2252typedef PGMTREES *PPGMTREES;
2253
2254
2255/** @name Paging mode macros
2256 * @{ */
2257#ifdef IN_RC
2258# define PGM_CTX(a,b) a##RC##b
2259# define PGM_CTX_STR(a,b) a "GC" b
2260# define PGM_CTX_DECL(type) VMMRCDECL(type)
2261#else
2262# ifdef IN_RING3
2263# define PGM_CTX(a,b) a##R3##b
2264# define PGM_CTX_STR(a,b) a "R3" b
2265# define PGM_CTX_DECL(type) DECLCALLBACK(type)
2266# else
2267# define PGM_CTX(a,b) a##R0##b
2268# define PGM_CTX_STR(a,b) a "R0" b
2269# define PGM_CTX_DECL(type) VMMDECL(type)
2270# endif
2271#endif
2272
2273#define PGM_GST_NAME_REAL(name) PGM_CTX(pgm,GstReal##name)
2274#define PGM_GST_NAME_RC_REAL_STR(name) "pgmRCGstReal" #name
2275#define PGM_GST_NAME_R0_REAL_STR(name) "pgmR0GstReal" #name
2276#define PGM_GST_NAME_PROT(name) PGM_CTX(pgm,GstProt##name)
2277#define PGM_GST_NAME_RC_PROT_STR(name) "pgmRCGstProt" #name
2278#define PGM_GST_NAME_R0_PROT_STR(name) "pgmR0GstProt" #name
2279#define PGM_GST_NAME_32BIT(name) PGM_CTX(pgm,Gst32Bit##name)
2280#define PGM_GST_NAME_RC_32BIT_STR(name) "pgmRCGst32Bit" #name
2281#define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
2282#define PGM_GST_NAME_PAE(name) PGM_CTX(pgm,GstPAE##name)
2283#define PGM_GST_NAME_RC_PAE_STR(name) "pgmRCGstPAE" #name
2284#define PGM_GST_NAME_R0_PAE_STR(name) "pgmR0GstPAE" #name
2285#define PGM_GST_NAME_AMD64(name) PGM_CTX(pgm,GstAMD64##name)
2286#define PGM_GST_NAME_RC_AMD64_STR(name) "pgmRCGstAMD64" #name
2287#define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
2288#define PGM_GST_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Gst##name))
2289#define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name)
2290
2291#define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name)
2292#define PGM_SHW_NAME_RC_32BIT_STR(name) "pgmRCShw32Bit" #name
2293#define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
2294#define PGM_SHW_NAME_PAE(name) PGM_CTX(pgm,ShwPAE##name)
2295#define PGM_SHW_NAME_RC_PAE_STR(name) "pgmRCShwPAE" #name
2296#define PGM_SHW_NAME_R0_PAE_STR(name) "pgmR0ShwPAE" #name
2297#define PGM_SHW_NAME_AMD64(name) PGM_CTX(pgm,ShwAMD64##name)
2298#define PGM_SHW_NAME_RC_AMD64_STR(name) "pgmRCShwAMD64" #name
2299#define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
2300#define PGM_SHW_NAME_NESTED(name) PGM_CTX(pgm,ShwNested##name)
2301#define PGM_SHW_NAME_RC_NESTED_STR(name) "pgmRCShwNested" #name
2302#define PGM_SHW_NAME_R0_NESTED_STR(name) "pgmR0ShwNested" #name
2303#define PGM_SHW_NAME_EPT(name) PGM_CTX(pgm,ShwEPT##name)
2304#define PGM_SHW_NAME_RC_EPT_STR(name) "pgmRCShwEPT" #name
2305#define PGM_SHW_NAME_R0_EPT_STR(name) "pgmR0ShwEPT" #name
2306#define PGM_SHW_DECL(type, name) PGM_CTX_DECL(type) PGM_SHW_NAME(name)
2307#define PGM_SHW_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Shw##name))
2308
2309/* Shw_Gst */
2310#define PGM_BTH_NAME_32BIT_REAL(name) PGM_CTX(pgm,Bth32BitReal##name)
2311#define PGM_BTH_NAME_32BIT_PROT(name) PGM_CTX(pgm,Bth32BitProt##name)
2312#define PGM_BTH_NAME_32BIT_32BIT(name) PGM_CTX(pgm,Bth32Bit32Bit##name)
2313#define PGM_BTH_NAME_PAE_REAL(name) PGM_CTX(pgm,BthPAEReal##name)
2314#define PGM_BTH_NAME_PAE_PROT(name) PGM_CTX(pgm,BthPAEProt##name)
2315#define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name)
2316#define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name)
2317#define PGM_BTH_NAME_AMD64_PROT(name) PGM_CTX(pgm,BthAMD64Prot##name)
2318#define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name)
2319#define PGM_BTH_NAME_NESTED_REAL(name) PGM_CTX(pgm,BthNestedReal##name)
2320#define PGM_BTH_NAME_NESTED_PROT(name) PGM_CTX(pgm,BthNestedProt##name)
2321#define PGM_BTH_NAME_NESTED_32BIT(name) PGM_CTX(pgm,BthNested32Bit##name)
2322#define PGM_BTH_NAME_NESTED_PAE(name) PGM_CTX(pgm,BthNestedPAE##name)
2323#define PGM_BTH_NAME_NESTED_AMD64(name) PGM_CTX(pgm,BthNestedAMD64##name)
2324#define PGM_BTH_NAME_EPT_REAL(name) PGM_CTX(pgm,BthEPTReal##name)
2325#define PGM_BTH_NAME_EPT_PROT(name) PGM_CTX(pgm,BthEPTProt##name)
2326#define PGM_BTH_NAME_EPT_32BIT(name) PGM_CTX(pgm,BthEPT32Bit##name)
2327#define PGM_BTH_NAME_EPT_PAE(name) PGM_CTX(pgm,BthEPTPAE##name)
2328#define PGM_BTH_NAME_EPT_AMD64(name) PGM_CTX(pgm,BthEPTAMD64##name)
2329
2330#define PGM_BTH_NAME_RC_32BIT_REAL_STR(name) "pgmRCBth32BitReal" #name
2331#define PGM_BTH_NAME_RC_32BIT_PROT_STR(name) "pgmRCBth32BitProt" #name
2332#define PGM_BTH_NAME_RC_32BIT_32BIT_STR(name) "pgmRCBth32Bit32Bit" #name
2333#define PGM_BTH_NAME_RC_PAE_REAL_STR(name) "pgmRCBthPAEReal" #name
2334#define PGM_BTH_NAME_RC_PAE_PROT_STR(name) "pgmRCBthPAEProt" #name
2335#define PGM_BTH_NAME_RC_PAE_32BIT_STR(name) "pgmRCBthPAE32Bit" #name
2336#define PGM_BTH_NAME_RC_PAE_PAE_STR(name) "pgmRCBthPAEPAE" #name
2337#define PGM_BTH_NAME_RC_AMD64_AMD64_STR(name) "pgmRCBthAMD64AMD64" #name
2338#define PGM_BTH_NAME_RC_NESTED_REAL_STR(name) "pgmRCBthNestedReal" #name
2339#define PGM_BTH_NAME_RC_NESTED_PROT_STR(name) "pgmRCBthNestedProt" #name
2340#define PGM_BTH_NAME_RC_NESTED_32BIT_STR(name) "pgmRCBthNested32Bit" #name
2341#define PGM_BTH_NAME_RC_NESTED_PAE_STR(name) "pgmRCBthNestedPAE" #name
2342#define PGM_BTH_NAME_RC_NESTED_AMD64_STR(name) "pgmRCBthNestedAMD64" #name
2343#define PGM_BTH_NAME_RC_EPT_REAL_STR(name) "pgmRCBthEPTReal" #name
2344#define PGM_BTH_NAME_RC_EPT_PROT_STR(name) "pgmRCBthEPTProt" #name
2345#define PGM_BTH_NAME_RC_EPT_32BIT_STR(name) "pgmRCBthEPT32Bit" #name
2346#define PGM_BTH_NAME_RC_EPT_PAE_STR(name) "pgmRCBthEPTPAE" #name
2347#define PGM_BTH_NAME_RC_EPT_AMD64_STR(name) "pgmRCBthEPTAMD64" #name
2348#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name
2349#define PGM_BTH_NAME_R0_32BIT_PROT_STR(name) "pgmR0Bth32BitProt" #name
2350#define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name) "pgmR0Bth32Bit32Bit" #name
2351#define PGM_BTH_NAME_R0_PAE_REAL_STR(name) "pgmR0BthPAEReal" #name
2352#define PGM_BTH_NAME_R0_PAE_PROT_STR(name) "pgmR0BthPAEProt" #name
2353#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name
2354#define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name
2355#define PGM_BTH_NAME_R0_AMD64_PROT_STR(name) "pgmR0BthAMD64Prot" #name
2356#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name
2357#define PGM_BTH_NAME_R0_NESTED_REAL_STR(name) "pgmR0BthNestedReal" #name
2358#define PGM_BTH_NAME_R0_NESTED_PROT_STR(name) "pgmR0BthNestedProt" #name
2359#define PGM_BTH_NAME_R0_NESTED_32BIT_STR(name) "pgmR0BthNested32Bit" #name
2360#define PGM_BTH_NAME_R0_NESTED_PAE_STR(name) "pgmR0BthNestedPAE" #name
2361#define PGM_BTH_NAME_R0_NESTED_AMD64_STR(name) "pgmR0BthNestedAMD64" #name
2362#define PGM_BTH_NAME_R0_EPT_REAL_STR(name) "pgmR0BthEPTReal" #name
2363#define PGM_BTH_NAME_R0_EPT_PROT_STR(name) "pgmR0BthEPTProt" #name
2364#define PGM_BTH_NAME_R0_EPT_32BIT_STR(name) "pgmR0BthEPT32Bit" #name
2365#define PGM_BTH_NAME_R0_EPT_PAE_STR(name) "pgmR0BthEPTPAE" #name
2366#define PGM_BTH_NAME_R0_EPT_AMD64_STR(name) "pgmR0BthEPTAMD64" #name
2367
2368#define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name)
2369#define PGM_BTH_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Bth##name))
2370/** @} */
2371
2372/**
2373 * Data for each paging mode.
2374 */
2375typedef struct PGMMODEDATA
2376{
2377 /** The guest mode type. */
2378 uint32_t uGstType;
2379 /** The shadow mode type. */
2380 uint32_t uShwType;
2381
2382 /** @name Function pointers for Shadow paging.
2383 * @{
2384 */
2385 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2386 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVMCPU pVCpu));
2387 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2388 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2389
2390 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2391 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2392
2393 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2394 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2395 /** @} */
2396
2397 /** @name Function pointers for Guest paging.
2398 * @{
2399 */
2400 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2401 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVMCPU pVCpu));
2402 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2403 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2404 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2405 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2406 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2407 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2408 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2409 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2410 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2411 /** @} */
2412
2413 /** @name Function pointers for Both Shadow and Guest paging.
2414 * @{
2415 */
2416 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2417 /* no pfnR3BthTrap0eHandler */
2418 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2419 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2420 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2421 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2422 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2423#ifdef VBOX_STRICT
2424 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2425#endif
2426 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2427 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVMCPU pVCpu));
2428
2429 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
2430 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2431 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2432 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2433 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2434 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2435#ifdef VBOX_STRICT
2436 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2437#endif
2438 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2439 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVMCPU pVCpu));
2440
2441 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
2442 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2443 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2444 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2445 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2446 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2447#ifdef VBOX_STRICT
2448 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2449#endif
2450 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2451 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVMCPU pVCpu));
2452 /** @} */
2453} PGMMODEDATA, *PPGMMODEDATA;
2454
2455
2456
2457/**
2458 * Converts a PGM pointer into a VM pointer.
2459 * @returns Pointer to the VM structure the PGM is part of.
2460 * @param pPGM Pointer to PGM instance data.
2461 */
2462#define PGM2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
2463
2464/**
2465 * PGM Data (part of VM)
2466 */
2467typedef struct PGM
2468{
2469 /** Offset to the VM structure. */
2470 RTINT offVM;
2471 /** Offset of the PGMCPU structure relative to VMCPU. */
2472 RTINT offVCpuPGM;
2473
2474 /** @cfgm{RamPreAlloc, boolean, false}
2475 * Indicates whether the base RAM should all be allocated before starting
2476 * the VM (default), or if it should be allocated when first written to.
2477 */
2478 bool fRamPreAlloc;
2479 /** Indicates whether write monitoring is currently in use.
2480 * This is used to prevent conflicts between live saving and page sharing
2481 * detection. */
2482 bool fPhysWriteMonitoringEngaged;
2483 /** Alignment padding. */
2484 bool afAlignment0[2];
2485
2486 /*
2487 * This will be redefined at least two more times before we're done, I'm sure.
2488 * The current code is only to get on with the coding.
2489 * - 2004-06-10: initial version, bird.
2490 * - 2004-07-02: 1st time, bird.
2491 * - 2004-10-18: 2nd time, bird.
2492 * - 2005-07-xx: 3rd time, bird.
2493 */
2494
2495 /** The host paging mode. (This is what SUPLib reports.) */
2496 SUPPAGINGMODE enmHostMode;
2497
2498 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2499 RCPTRTYPE(PX86PTE) paDynPageMap32BitPTEsGC;
2500 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2501 RCPTRTYPE(PX86PTEPAE) paDynPageMapPaePTEsGC;
2502
2503 /** 4 MB page mask; 32 or 36 bits depending on PSE-36 (identical for all VCPUs) */
2504 RTGCPHYS GCPhys4MBPSEMask;
2505
2506 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.
2507 * This is sorted by physical address and contains no overlapping ranges. */
2508 R3PTRTYPE(PPGMRAMRANGE) pRamRangesR3;
2509 /** R0 pointer corresponding to PGM::pRamRangesR3. */
2510 R0PTRTYPE(PPGMRAMRANGE) pRamRangesR0;
2511 /** RC pointer corresponding to PGM::pRamRangesR3. */
2512 RCPTRTYPE(PPGMRAMRANGE) pRamRangesRC;
2513 /** Generation ID for the RAM ranges. This member is incremented everytime a RAM
2514 * range is linked or unlinked. */
2515 uint32_t volatile idRamRangesGen;
2516
2517 /** Pointer to the list of ROM ranges - for R3.
2518 * This is sorted by physical address and contains no overlapping ranges. */
2519 R3PTRTYPE(PPGMROMRANGE) pRomRangesR3;
2520 /** R0 pointer corresponding to PGM::pRomRangesR3. */
2521 R0PTRTYPE(PPGMROMRANGE) pRomRangesR0;
2522 /** RC pointer corresponding to PGM::pRomRangesR3. */
2523 RCPTRTYPE(PPGMROMRANGE) pRomRangesRC;
2524#if HC_ARCH_BITS == 64
2525 /** Alignment padding. */
2526 RTRCPTR GCPtrPadding2;
2527#endif
2528
2529 /** Pointer to the list of MMIO2 ranges - for R3.
2530 * Registration order. */
2531 R3PTRTYPE(PPGMMMIO2RANGE) pMmio2RangesR3;
2532
2533 /** PGM offset based trees - R3 Ptr. */
2534 R3PTRTYPE(PPGMTREES) pTreesR3;
2535 /** PGM offset based trees - R0 Ptr. */
2536 R0PTRTYPE(PPGMTREES) pTreesR0;
2537 /** PGM offset based trees - RC Ptr. */
2538 RCPTRTYPE(PPGMTREES) pTreesRC;
2539
2540 /** Linked list of GC mappings - for RC.
2541 * The list is sorted ascending on address.
2542 */
2543 RCPTRTYPE(PPGMMAPPING) pMappingsRC;
2544 /** Linked list of GC mappings - for HC.
2545 * The list is sorted ascending on address.
2546 */
2547 R3PTRTYPE(PPGMMAPPING) pMappingsR3;
2548 /** Linked list of GC mappings - for R0.
2549 * The list is sorted ascending on address.
2550 */
2551 R0PTRTYPE(PPGMMAPPING) pMappingsR0;
2552
2553 /** Pointer to the 5 page CR3 content mapping.
2554 * The first page is always the CR3 (in some form) while the 4 other pages
2555 * are used of the PDs in PAE mode. */
2556 RTGCPTR GCPtrCR3Mapping;
2557#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
2558 uint32_t u32Alignment1;
2559#endif
2560
2561 /** Indicates that PGMR3FinalizeMappings has been called and that further
2562 * PGMR3MapIntermediate calls will be rejected. */
2563 bool fFinalizedMappings;
2564 /** If set no conflict checks are required. */
2565 bool fMappingsFixed;
2566 /** If set if restored as fixed but we were unable to re-fixate at the old
2567 * location because of room or address incompatibilities. */
2568 bool fMappingsFixedRestored;
2569 /** If set, then no mappings are put into the shadow page table.
2570 * Use pgmMapAreMappingsEnabled() instead of direct access. */
2571 bool fMappingsDisabled;
2572 /** Size of fixed mapping.
2573 * This is valid if either fMappingsFixed or fMappingsFixedRestored is set. */
2574 uint32_t cbMappingFixed;
2575 /** Base address (GC) of fixed mapping.
2576 * This is valid if either fMappingsFixed or fMappingsFixedRestored is set. */
2577 RTGCPTR GCPtrMappingFixed;
2578 /** The address of the previous RAM range mapping. */
2579 RTGCPTR GCPtrPrevRamRangeMapping;
2580
2581 /** @name Intermediate Context
2582 * @{ */
2583 /** Pointer to the intermediate page directory - Normal. */
2584 R3PTRTYPE(PX86PD) pInterPD;
2585 /** Pointer to the intermedate page tables - Normal.
2586 * There are two page tables, one for the identity mapping and one for
2587 * the host context mapping (of the core code). */
2588 R3PTRTYPE(PX86PT) apInterPTs[2];
2589 /** Pointer to the intermedate page tables - PAE. */
2590 R3PTRTYPE(PX86PTPAE) apInterPaePTs[2];
2591 /** Pointer to the intermedate page directory - PAE. */
2592 R3PTRTYPE(PX86PDPAE) apInterPaePDs[4];
2593 /** Pointer to the intermedate page directory - PAE. */
2594 R3PTRTYPE(PX86PDPT) pInterPaePDPT;
2595 /** Pointer to the intermedate page-map level 4 - AMD64. */
2596 R3PTRTYPE(PX86PML4) pInterPaePML4;
2597 /** Pointer to the intermedate page directory - AMD64. */
2598 R3PTRTYPE(PX86PDPT) pInterPaePDPT64;
2599 /** The Physical Address (HC) of the intermediate Page Directory - Normal. */
2600 RTHCPHYS HCPhysInterPD;
2601 /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */
2602 RTHCPHYS HCPhysInterPaePDPT;
2603 /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */
2604 RTHCPHYS HCPhysInterPaePML4;
2605 /** @} */
2606
2607 /** Base address of the dynamic page mapping area.
2608 * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
2609 */
2610 RCPTRTYPE(uint8_t *) pbDynPageMapBaseGC;
2611 /** The index of the last entry used in the dynamic page mapping area. */
2612 RTUINT iDynPageMapLast;
2613 /** Cache containing the last entries in the dynamic page mapping area.
2614 * The cache size is covering half of the mapping area. */
2615 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
2616 /** Keep a lock counter for the full (!) mapping area. */
2617 uint32_t aLockedDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)];
2618
2619 /** The address of the ring-0 mapping cache if we're making use of it. */
2620 RTR0PTR pvR0DynMapUsed;
2621#if HC_ARCH_BITS == 32
2622 /** Alignment padding that makes the next member start on a 8 byte boundrary. */
2623 uint32_t u32Alignment2;
2624#endif
2625
2626 /** PGM critical section.
2627 * This protects the physical & virtual access handlers, ram ranges,
2628 * and the page flag updating (some of it anyway).
2629 */
2630 PDMCRITSECT CritSect;
2631
2632 /** Pointer to SHW+GST mode data (function pointers).
2633 * The index into this table is made up from */
2634 R3PTRTYPE(PPGMMODEDATA) paModeData;
2635
2636 /** Shadow Page Pool - R3 Ptr. */
2637 R3PTRTYPE(PPGMPOOL) pPoolR3;
2638 /** Shadow Page Pool - R0 Ptr. */
2639 R0PTRTYPE(PPGMPOOL) pPoolR0;
2640 /** Shadow Page Pool - RC Ptr. */
2641 RCPTRTYPE(PPGMPOOL) pPoolRC;
2642
2643 /** We're not in a state which permits writes to guest memory.
2644 * (Only used in strict builds.) */
2645 bool fNoMorePhysWrites;
2646 /** Alignment padding that makes the next member start on a 8 byte boundrary. */
2647 bool afAlignment3[HC_ARCH_BITS == 32 ? 7: 3];
2648
2649 /**
2650 * Data associated with managing the ring-3 mappings of the allocation chunks.
2651 */
2652 struct
2653 {
2654 /** The chunk tree, ordered by chunk id. */
2655#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2656 R3PTRTYPE(PAVLU32NODECORE) pTree;
2657#else
2658 R3R0PTRTYPE(PAVLU32NODECORE) pTree;
2659#endif
2660 /** The chunk age tree, ordered by ageing sequence number. */
2661 R3PTRTYPE(PAVLLU32NODECORE) pAgeTree;
2662 /** The chunk mapping TLB. */
2663 PGMCHUNKR3MAPTLB Tlb;
2664 /** The number of mapped chunks. */
2665 uint32_t c;
2666 /** The maximum number of mapped chunks.
2667 * @cfgm PGM/MaxRing3Chunks */
2668 uint32_t cMax;
2669 /** The current time. */
2670 uint32_t iNow;
2671 /** Number of pgmR3PhysChunkFindUnmapCandidate calls left to the next ageing. */
2672 uint32_t AgeingCountdown;
2673 } ChunkR3Map;
2674
2675 /**
2676 * The page mapping TLB for ring-3 and (for the time being) ring-0.
2677 */
2678 PGMPAGER3MAPTLB PhysTlbHC;
2679
2680 /** @name The zero page.
2681 * @{ */
2682 /** The host physical address of the zero page. */
2683 RTHCPHYS HCPhysZeroPg;
2684 /** The ring-3 mapping of the zero page. */
2685 RTR3PTR pvZeroPgR3;
2686 /** The ring-0 mapping of the zero page. */
2687 RTR0PTR pvZeroPgR0;
2688 /** The GC mapping of the zero page. */
2689 RTGCPTR pvZeroPgRC;
2690 /** @}*/
2691
2692 /** The number of handy pages. */
2693 uint32_t cHandyPages;
2694
2695 /** The number of large handy pages. */
2696 uint32_t cLargeHandyPages;
2697
2698 /**
2699 * Array of handy pages.
2700 *
2701 * This array is used in a two way communication between pgmPhysAllocPage
2702 * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as
2703 * an intermediary.
2704 *
2705 * The size of this array is important, see pgmPhysEnsureHandyPage for details.
2706 * (The current size of 32 pages, means 128 KB of handy memory.)
2707 */
2708 GMMPAGEDESC aHandyPages[PGM_HANDY_PAGES];
2709
2710 /**
2711 * Array of large handy pages. (currently size 1)
2712 *
2713 * This array is used in a two way communication between pgmPhysAllocLargePage
2714 * and GMMR0AllocateLargePage, with PGMR3PhysAllocateLargePage serving as
2715 * an intermediary.
2716 */
2717 GMMPAGEDESC aLargeHandyPage[1];
2718
2719 /**
2720 * Live save data.
2721 */
2722 struct
2723 {
2724 /** Per type statistics. */
2725 struct
2726 {
2727 /** The number of ready pages. */
2728 uint32_t cReadyPages;
2729 /** The number of dirty pages. */
2730 uint32_t cDirtyPages;
2731 /** The number of ready zero pages. */
2732 uint32_t cZeroPages;
2733 /** The number of write monitored pages. */
2734 uint32_t cMonitoredPages;
2735 } Rom,
2736 Mmio2,
2737 Ram;
2738 /** The number of ignored pages in the RAM ranges (i.e. MMIO, MMIO2 and ROM). */
2739 uint32_t cIgnoredPages;
2740 /** Indicates that a live save operation is active. */
2741 bool fActive;
2742 /** Padding. */
2743 bool afReserved[2];
2744 /** The next history index. */
2745 uint8_t iDirtyPagesHistory;
2746 /** History of the total amount of dirty pages. */
2747 uint32_t acDirtyPagesHistory[64];
2748 /** Short term dirty page average. */
2749 uint32_t cDirtyPagesShort;
2750 /** Long term dirty page average. */
2751 uint32_t cDirtyPagesLong;
2752 /** The number of saved pages. This is used to get some kind of estimate of the
2753 * link speed so we can decide when we're done. It is reset after the first
2754 * 7 passes so the speed estimate doesn't get inflated by the initial set of
2755 * zero pages. */
2756 uint64_t cSavedPages;
2757 /** The nanosecond timestamp when cSavedPages was 0. */
2758 uint64_t uSaveStartNS;
2759 /** Pages per second (for statistics). */
2760 uint32_t cPagesPerSecond;
2761 uint32_t cAlignment;
2762 } LiveSave;
2763
2764 /** @name Error injection.
2765 * @{ */
2766 /** Inject handy page allocation errors pretending we're completely out of
2767 * memory. */
2768 bool volatile fErrInjHandyPages;
2769 /** Padding. */
2770 bool afReserved[3];
2771 /** @} */
2772
2773 /** @name Release Statistics
2774 * @{ */
2775 uint32_t cAllPages; /**< The total number of pages. (Should be Private + Shared + Zero + Pure MMIO.) */
2776 uint32_t cPrivatePages; /**< The number of private pages. */
2777 uint32_t cSharedPages; /**< The number of shared pages. */
2778 uint32_t cZeroPages; /**< The number of zero backed pages. */
2779 uint32_t cPureMmioPages; /**< The number of pure MMIO pages. */
2780 uint32_t cMonitoredPages; /**< The number of write monitored pages. */
2781 uint32_t cWrittenToPages; /**< The number of previously write monitored pages. */
2782 uint32_t cWriteLockedPages; /**< The number of write locked pages. */
2783 uint32_t cReadLockedPages; /**< The number of read locked pages. */
2784
2785 /** The number of times we were forced to change the hypervisor region location. */
2786 STAMCOUNTER cRelocations;
2787
2788 STAMCOUNTER StatLargePageAlloc; /**< The number of large pages we've allocated.*/
2789 STAMCOUNTER StatLargePageReused; /**< The number of large pages we've reused.*/
2790 STAMCOUNTER StatLargePageRefused; /**< The number of times we couldn't use a large page.*/
2791 STAMCOUNTER StatLargePageRecheck; /**< The number of times we rechecked a disabled large page.*/
2792 /** @} */
2793
2794#ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */
2795 /* R3 only: */
2796 STAMCOUNTER StatR3DetectedConflicts; /**< R3: Number of times PGMR3MapHasConflicts() detected a conflict. */
2797 STAMPROFILE StatR3ResolveConflict; /**< R3: pgmR3SyncPTResolveConflict() profiling (includes the entire relocation). */
2798
2799 STAMCOUNTER StatRZChunkR3MapTlbHits; /**< RC/R0: Ring-3/0 chunk mapper TLB hits. */
2800 STAMCOUNTER StatRZChunkR3MapTlbMisses; /**< RC/R0: Ring-3/0 chunk mapper TLB misses. */
2801 STAMCOUNTER StatRZPageMapTlbHits; /**< RC/R0: Ring-3/0 page mapper TLB hits. */
2802 STAMCOUNTER StatRZPageMapTlbMisses; /**< RC/R0: Ring-3/0 page mapper TLB misses. */
2803 STAMCOUNTER StatPageMapTlbFlushes; /**< ALL: Ring-3/0 page mapper TLB flushes. */
2804 STAMCOUNTER StatPageMapTlbFlushEntry; /**< ALL: Ring-3/0 page mapper TLB flushes. */
2805 STAMCOUNTER StatR3ChunkR3MapTlbHits; /**< R3: Ring-3/0 chunk mapper TLB hits. */
2806 STAMCOUNTER StatR3ChunkR3MapTlbMisses; /**< R3: Ring-3/0 chunk mapper TLB misses. */
2807 STAMCOUNTER StatR3PageMapTlbHits; /**< R3: Ring-3/0 page mapper TLB hits. */
2808 STAMCOUNTER StatR3PageMapTlbMisses; /**< R3: Ring-3/0 page mapper TLB misses. */
2809 STAMPROFILE StatRZSyncCR3HandlerVirtualReset; /**< RC/R0: Profiling of the virtual handler resets. */
2810 STAMPROFILE StatRZSyncCR3HandlerVirtualUpdate; /**< RC/R0: Profiling of the virtual handler updates. */
2811 STAMPROFILE StatR3SyncCR3HandlerVirtualReset; /**< R3: Profiling of the virtual handler resets. */
2812 STAMPROFILE StatR3SyncCR3HandlerVirtualUpdate; /**< R3: Profiling of the virtual handler updates. */
2813 STAMCOUNTER StatR3PhysHandlerReset; /**< R3: The number of times PGMHandlerPhysicalReset is called. */
2814 STAMCOUNTER StatRZPhysHandlerReset; /**< RC/R0: The number of times PGMHandlerPhysicalReset is called. */
2815 STAMPROFILE StatRZVirtHandlerSearchByPhys; /**< RC/R0: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2816 STAMPROFILE StatR3VirtHandlerSearchByPhys; /**< R3: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2817 STAMCOUNTER StatRZPageReplaceShared; /**< RC/R0: Times a shared page has been replaced by a private one. */
2818 STAMCOUNTER StatRZPageReplaceZero; /**< RC/R0: Times the zero page has been replaced by a private one. */
2819/// @todo STAMCOUNTER StatRZPageHandyAllocs; /**< RC/R0: The number of times we've executed GMMR3AllocateHandyPages. */
2820 STAMCOUNTER StatR3PageReplaceShared; /**< R3: Times a shared page has been replaced by a private one. */
2821 STAMCOUNTER StatR3PageReplaceZero; /**< R3: Times the zero page has been replaced by a private one. */
2822/// @todo STAMCOUNTER StatR3PageHandyAllocs; /**< R3: The number of times we've executed GMMR3AllocateHandyPages. */
2823
2824 /* RC only: */
2825 STAMCOUNTER StatRCDynMapCacheMisses; /**< RC: The number of dynamic page mapping cache misses */
2826 STAMCOUNTER StatRCDynMapCacheHits; /**< RC: The number of dynamic page mapping cache hits */
2827 STAMCOUNTER StatRCInvlPgConflict; /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */
2828 STAMCOUNTER StatRCInvlPgSyncMonCR3; /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */
2829
2830 STAMCOUNTER StatRZPhysRead;
2831 STAMCOUNTER StatRZPhysReadBytes;
2832 STAMCOUNTER StatRZPhysWrite;
2833 STAMCOUNTER StatRZPhysWriteBytes;
2834 STAMCOUNTER StatR3PhysRead;
2835 STAMCOUNTER StatR3PhysReadBytes;
2836 STAMCOUNTER StatR3PhysWrite;
2837 STAMCOUNTER StatR3PhysWriteBytes;
2838 STAMCOUNTER StatRCPhysRead;
2839 STAMCOUNTER StatRCPhysReadBytes;
2840 STAMCOUNTER StatRCPhysWrite;
2841 STAMCOUNTER StatRCPhysWriteBytes;
2842
2843 STAMCOUNTER StatRZPhysSimpleRead;
2844 STAMCOUNTER StatRZPhysSimpleReadBytes;
2845 STAMCOUNTER StatRZPhysSimpleWrite;
2846 STAMCOUNTER StatRZPhysSimpleWriteBytes;
2847 STAMCOUNTER StatR3PhysSimpleRead;
2848 STAMCOUNTER StatR3PhysSimpleReadBytes;
2849 STAMCOUNTER StatR3PhysSimpleWrite;
2850 STAMCOUNTER StatR3PhysSimpleWriteBytes;
2851 STAMCOUNTER StatRCPhysSimpleRead;
2852 STAMCOUNTER StatRCPhysSimpleReadBytes;
2853 STAMCOUNTER StatRCPhysSimpleWrite;
2854 STAMCOUNTER StatRCPhysSimpleWriteBytes;
2855
2856 STAMCOUNTER StatTrackVirgin; /**< The number of first time shadowings. */
2857 STAMCOUNTER StatTrackAliased; /**< The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
2858 STAMCOUNTER StatTrackAliasedMany; /**< The number of times we're tracking using cRef2. */
2859 STAMCOUNTER StatTrackAliasedLots; /**< The number of times we're hitting pages which has overflowed cRef2. */
2860 STAMCOUNTER StatTrackOverflows; /**< The number of times the extent list grows to long. */
2861 STAMPROFILE StatTrackDeref; /**< Profiling of SyncPageWorkerTrackDeref (expensive). */
2862#endif
2863} PGM;
2864#ifndef IN_TSTVMSTRUCTGC /* HACK */
2865AssertCompileMemberAlignment(PGM, paDynPageMap32BitPTEsGC, 8);
2866AssertCompileMemberAlignment(PGM, GCPtrMappingFixed, sizeof(RTGCPTR));
2867AssertCompileMemberAlignment(PGM, HCPhysInterPD, 8);
2868AssertCompileMemberAlignment(PGM, aHCPhysDynPageMapCache, 8);
2869AssertCompileMemberAlignment(PGM, CritSect, 8);
2870AssertCompileMemberAlignment(PGM, ChunkR3Map, 8);
2871AssertCompileMemberAlignment(PGM, PhysTlbHC, 8);
2872AssertCompileMemberAlignment(PGM, HCPhysZeroPg, 8);
2873AssertCompileMemberAlignment(PGM, aHandyPages, 8);
2874AssertCompileMemberAlignment(PGM, cRelocations, 8);
2875#endif /* !IN_TSTVMSTRUCTGC */
2876/** Pointer to the PGM instance data. */
2877typedef PGM *PPGM;
2878
2879
2880/**
2881 * Converts a PGMCPU pointer into a VM pointer.
2882 * @returns Pointer to the VM structure the PGM is part of.
2883 * @param pPGM Pointer to PGMCPU instance data.
2884 */
2885#define PGMCPU2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
2886
2887/**
2888 * Converts a PGMCPU pointer into a PGM pointer.
2889 * @returns Pointer to the VM structure the PGM is part of.
2890 * @param pPGM Pointer to PGMCPU instance data.
2891 */
2892#define PGMCPU2PGM(pPGMCpu) ( (PPGM)((char*)pPGMCpu - pPGMCpu->offPGM) )
2893
2894/**
2895 * PGMCPU Data (part of VMCPU).
2896 */
2897typedef struct PGMCPU
2898{
2899 /** Offset to the VM structure. */
2900 RTINT offVM;
2901 /** Offset to the VMCPU structure. */
2902 RTINT offVCpu;
2903 /** Offset of the PGM structure relative to VMCPU. */
2904 RTINT offPGM;
2905 RTINT uPadding0; /**< structure size alignment. */
2906
2907#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2908 /** Automatically tracked physical memory mapping set.
2909 * Ring-0 and strict raw-mode builds. */
2910 PGMMAPSET AutoSet;
2911#endif
2912
2913 /** A20 gate mask.
2914 * Our current approach to A20 emulation is to let REM do it and don't bother
2915 * anywhere else. The interesting Guests will be operating with it enabled anyway.
2916 * But whould need arrise, we'll subject physical addresses to this mask. */
2917 RTGCPHYS GCPhysA20Mask;
2918 /** A20 gate state - boolean! */
2919 bool fA20Enabled;
2920
2921 /** What needs syncing (PGM_SYNC_*).
2922 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
2923 * PGMFlushTLB, and PGMR3Load. */
2924 RTUINT fSyncFlags;
2925
2926 /** The shadow paging mode. */
2927 PGMMODE enmShadowMode;
2928 /** The guest paging mode. */
2929 PGMMODE enmGuestMode;
2930
2931 /** The current physical address representing in the guest CR3 register. */
2932 RTGCPHYS GCPhysCR3;
2933
2934 /** @name 32-bit Guest Paging.
2935 * @{ */
2936 /** The guest's page directory, R3 pointer. */
2937 R3PTRTYPE(PX86PD) pGst32BitPdR3;
2938#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2939 /** The guest's page directory, R0 pointer. */
2940 R0PTRTYPE(PX86PD) pGst32BitPdR0;
2941#endif
2942 /** The guest's page directory, static RC mapping. */
2943 RCPTRTYPE(PX86PD) pGst32BitPdRC;
2944 /** @} */
2945
2946 /** @name PAE Guest Paging.
2947 * @{ */
2948 /** The guest's page directory pointer table, static RC mapping. */
2949 RCPTRTYPE(PX86PDPT) pGstPaePdptRC;
2950 /** The guest's page directory pointer table, R3 pointer. */
2951 R3PTRTYPE(PX86PDPT) pGstPaePdptR3;
2952#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2953 /** The guest's page directory pointer table, R0 pointer. */
2954 R0PTRTYPE(PX86PDPT) pGstPaePdptR0;
2955#endif
2956
2957 /** The guest's page directories, R3 pointers.
2958 * These are individual pointers and don't have to be adjecent.
2959 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
2960 R3PTRTYPE(PX86PDPAE) apGstPaePDsR3[4];
2961 /** The guest's page directories, R0 pointers.
2962 * Same restrictions as apGstPaePDsR3. */
2963#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2964 R0PTRTYPE(PX86PDPAE) apGstPaePDsR0[4];
2965#endif
2966 /** The guest's page directories, static GC mapping.
2967 * Unlike the R3/R0 array the first entry can be accessed as a 2048 entry PD.
2968 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
2969 RCPTRTYPE(PX86PDPAE) apGstPaePDsRC[4];
2970 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
2971 RTGCPHYS aGCPhysGstPaePDs[4];
2972 /** The physical addresses of the monitored guest page directories (PAE). */
2973 RTGCPHYS aGCPhysGstPaePDsMonitored[4];
2974 /** @} */
2975
2976 /** @name AMD64 Guest Paging.
2977 * @{ */
2978 /** The guest's page directory pointer table, R3 pointer. */
2979 R3PTRTYPE(PX86PML4) pGstAmd64Pml4R3;
2980#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2981 /** The guest's page directory pointer table, R0 pointer. */
2982 R0PTRTYPE(PX86PML4) pGstAmd64Pml4R0;
2983#else
2984 RTR0PTR alignment6b; /**< alignment equalizer. */
2985#endif
2986 /** @} */
2987
2988 /** Pointer to the page of the current active CR3 - R3 Ptr. */
2989 R3PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R3;
2990 /** Pointer to the page of the current active CR3 - R0 Ptr. */
2991 R0PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R0;
2992 /** Pointer to the page of the current active CR3 - RC Ptr. */
2993 RCPTRTYPE(PPGMPOOLPAGE) pShwPageCR3RC;
2994 /* The shadow page pool index of the user table as specified during allocation; useful for freeing root pages */
2995 uint32_t iShwUser;
2996 /* The index into the user table (shadowed) as specified during allocation; useful for freeing root pages. */
2997 uint32_t iShwUserTable;
2998# if HC_ARCH_BITS == 64
2999 RTRCPTR alignment6; /**< structure size alignment. */
3000# endif
3001 /** @} */
3002
3003 /** @name Function pointers for Shadow paging.
3004 * @{
3005 */
3006 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
3007 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVMCPU pVCpu));
3008 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
3009 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3010
3011 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
3012 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3013
3014 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
3015 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3016
3017 /** @} */
3018
3019 /** @name Function pointers for Guest paging.
3020 * @{
3021 */
3022 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
3023 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVMCPU pVCpu));
3024 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
3025 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3026 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
3027 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
3028 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3029 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
3030#if HC_ARCH_BITS == 64
3031 RTRCPTR alignment3; /**< structure size alignment. */
3032#endif
3033
3034 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
3035 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
3036 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
3037 /** @} */
3038
3039 /** @name Function pointers for Both Shadow and Guest paging.
3040 * @{
3041 */
3042 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
3043 /* no pfnR3BthTrap0eHandler */
3044 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3045 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
3046 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
3047 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3048 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
3049 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
3050 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
3051 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVMCPU pVCpu));
3052
3053 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
3054 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3055 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
3056 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
3057 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3058 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
3059 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
3060 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
3061 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVMCPU pVCpu));
3062
3063 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
3064 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3065 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
3066 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
3067 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3068 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
3069 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
3070 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
3071 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVMCPU pVCpu));
3072 RTRCPTR alignment2; /**< structure size alignment. */
3073 /** @} */
3074
3075 /** For saving stack space, the disassembler state is allocated here instead of
3076 * on the stack.
3077 * @note The DISCPUSTATE structure is not R3/R0/RZ clean! */
3078 union
3079 {
3080 /** The disassembler scratch space. */
3081 DISCPUSTATE DisState;
3082 /** Padding. */
3083 uint8_t abDisStatePadding[DISCPUSTATE_PADDING_SIZE];
3084 };
3085
3086 /* Count the number of pgm pool access handler calls. */
3087 uint64_t cPoolAccessHandler;
3088
3089 /** @name Release Statistics
3090 * @{ */
3091 /** The number of times the guest has switched mode since last reset or statistics reset. */
3092 STAMCOUNTER cGuestModeChanges;
3093 /** @} */
3094
3095#ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */
3096 /** @name Statistics
3097 * @{ */
3098 /** RC: Which statistic this \#PF should be attributed to. */
3099 RCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionRC;
3100 RTRCPTR padding0;
3101 /** R0: Which statistic this \#PF should be attributed to. */
3102 R0PTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionR0;
3103 RTR0PTR padding1;
3104
3105 /* Common */
3106 STAMCOUNTER StatSyncPtPD[X86_PG_ENTRIES]; /**< SyncPT - PD distribution. */
3107 STAMCOUNTER StatSyncPagePD[X86_PG_ENTRIES]; /**< SyncPage - PD distribution. */
3108
3109 /* R0 only: */
3110 STAMCOUNTER StatR0DynMapMigrateInvlPg; /**< R0: invlpg in PGMDynMapMigrateAutoSet. */
3111 STAMPROFILE StatR0DynMapGCPageInl; /**< R0: Calls to pgmR0DynMapGCPageInlined. */
3112 STAMCOUNTER StatR0DynMapGCPageInlHits; /**< R0: Hash table lookup hits. */
3113 STAMCOUNTER StatR0DynMapGCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
3114 STAMCOUNTER StatR0DynMapGCPageInlRamHits; /**< R0: 1st ram range hits. */
3115 STAMCOUNTER StatR0DynMapGCPageInlRamMisses; /**< R0: 1st ram range misses, takes slow path. */
3116 STAMPROFILE StatR0DynMapHCPageInl; /**< R0: Calls to pgmR0DynMapHCPageInlined. */
3117 STAMCOUNTER StatR0DynMapHCPageInlHits; /**< R0: Hash table lookup hits. */
3118 STAMCOUNTER StatR0DynMapHCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
3119 STAMPROFILE StatR0DynMapHCPage; /**< R0: Calls to PGMDynMapHCPage. */
3120 STAMCOUNTER StatR0DynMapSetOptimize; /**< R0: Calls to pgmDynMapOptimizeAutoSet. */
3121 STAMCOUNTER StatR0DynMapSetSearchFlushes; /**< R0: Set search restorting to subset flushes. */
3122 STAMCOUNTER StatR0DynMapSetSearchHits; /**< R0: Set search hits. */
3123 STAMCOUNTER StatR0DynMapSetSearchMisses; /**< R0: Set search misses. */
3124 STAMCOUNTER StatR0DynMapPage; /**< R0: Calls to pgmR0DynMapPage. */
3125 STAMCOUNTER StatR0DynMapPageHits0; /**< R0: Hits at iPage+0. */
3126 STAMCOUNTER StatR0DynMapPageHits1; /**< R0: Hits at iPage+1. */
3127 STAMCOUNTER StatR0DynMapPageHits2; /**< R0: Hits at iPage+2. */
3128 STAMCOUNTER StatR0DynMapPageInvlPg; /**< R0: invlpg. */
3129 STAMCOUNTER StatR0DynMapPageSlow; /**< R0: Calls to pgmR0DynMapPageSlow. */
3130 STAMCOUNTER StatR0DynMapPageSlowLoopHits; /**< R0: Hits in the pgmR0DynMapPageSlow search loop. */
3131 STAMCOUNTER StatR0DynMapPageSlowLoopMisses; /**< R0: Misses in the pgmR0DynMapPageSlow search loop. */
3132 //STAMCOUNTER StatR0DynMapPageSlowLostHits; /**< R0: Lost hits. */
3133 STAMCOUNTER StatR0DynMapSubsets; /**< R0: Times PGMDynMapPushAutoSubset was called. */
3134 STAMCOUNTER StatR0DynMapPopFlushes; /**< R0: Times PGMDynMapPopAutoSubset flushes the subset. */
3135 STAMCOUNTER aStatR0DynMapSetSize[11]; /**< R0: Set size distribution. */
3136
3137 /* RZ only: */
3138 STAMPROFILE StatRZTrap0e; /**< RC/R0: PGMTrap0eHandler() profiling. */
3139 STAMPROFILE StatRZTrap0eTimeCheckPageFault;
3140 STAMPROFILE StatRZTrap0eTimeSyncPT;
3141 STAMPROFILE StatRZTrap0eTimeMapping;
3142 STAMPROFILE StatRZTrap0eTimeOutOfSync;
3143 STAMPROFILE StatRZTrap0eTimeHandlers;
3144 STAMPROFILE StatRZTrap0eTime2CSAM; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CSAM. */
3145 STAMPROFILE StatRZTrap0eTime2DirtyAndAccessed; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
3146 STAMPROFILE StatRZTrap0eTime2GuestTrap; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a guest trap. */
3147 STAMPROFILE StatRZTrap0eTime2HndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a physical handler. */
3148 STAMPROFILE StatRZTrap0eTime2HndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a virtual handler. */
3149 STAMPROFILE StatRZTrap0eTime2HndUnhandled; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
3150 STAMPROFILE StatRZTrap0eTime2Misc; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is not known. */
3151 STAMPROFILE StatRZTrap0eTime2OutOfSync; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
3152 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
3153 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
3154 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndObs; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
3155 STAMPROFILE StatRZTrap0eTime2SyncPT; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
3156 STAMCOUNTER StatRZTrap0eConflicts; /**< RC/R0: The number of times \#PF was caused by an undetected conflict. */
3157 STAMCOUNTER StatRZTrap0eHandlersMapping; /**< RC/R0: Number of traps due to access handlers in mappings. */
3158 STAMCOUNTER StatRZTrap0eHandlersOutOfSync; /**< RC/R0: Number of out-of-sync handled pages. */
3159 STAMCOUNTER StatRZTrap0eHandlersPhysical; /**< RC/R0: Number of traps due to physical access handlers. */
3160 STAMCOUNTER StatRZTrap0eHandlersVirtual; /**< RC/R0: Number of traps due to virtual access handlers. */
3161 STAMCOUNTER StatRZTrap0eHandlersVirtualByPhys; /**< RC/R0: Number of traps due to virtual access handlers found by physical address. */
3162 STAMCOUNTER StatRZTrap0eHandlersVirtualUnmarked;/**< RC/R0: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
3163 STAMCOUNTER StatRZTrap0eHandlersUnhandled; /**< RC/R0: Number of traps due to access outside range of monitored page(s). */
3164 STAMCOUNTER StatRZTrap0eHandlersInvalid; /**< RC/R0: Number of traps due to access to invalid physical memory. */
3165 STAMCOUNTER StatRZTrap0eUSNotPresentRead; /**< RC/R0: \#PF err kind */
3166 STAMCOUNTER StatRZTrap0eUSNotPresentWrite; /**< RC/R0: \#PF err kind */
3167 STAMCOUNTER StatRZTrap0eUSWrite; /**< RC/R0: \#PF err kind */
3168 STAMCOUNTER StatRZTrap0eUSReserved; /**< RC/R0: \#PF err kind */
3169 STAMCOUNTER StatRZTrap0eUSNXE; /**< RC/R0: \#PF err kind */
3170 STAMCOUNTER StatRZTrap0eUSRead; /**< RC/R0: \#PF err kind */
3171 STAMCOUNTER StatRZTrap0eSVNotPresentRead; /**< RC/R0: \#PF err kind */
3172 STAMCOUNTER StatRZTrap0eSVNotPresentWrite; /**< RC/R0: \#PF err kind */
3173 STAMCOUNTER StatRZTrap0eSVWrite; /**< RC/R0: \#PF err kind */
3174 STAMCOUNTER StatRZTrap0eSVReserved; /**< RC/R0: \#PF err kind */
3175 STAMCOUNTER StatRZTrap0eSNXE; /**< RC/R0: \#PF err kind */
3176 STAMCOUNTER StatRZTrap0eGuestPF; /**< RC/R0: Real guest \#PFs. */
3177 STAMCOUNTER StatRZTrap0eGuestPFUnh; /**< RC/R0: Real guest \#PF ending up at the end of the \#PF code. */
3178 STAMCOUNTER StatRZTrap0eGuestPFMapping; /**< RC/R0: Real guest \#PF to HMA or other mapping. */
3179 STAMCOUNTER StatRZTrap0eWPEmulInRZ; /**< RC/R0: WP=0 virtualization trap, handled. */
3180 STAMCOUNTER StatRZTrap0eWPEmulToR3; /**< RC/R0: WP=0 virtualization trap, chickened out. */
3181 STAMCOUNTER StatRZTrap0ePD[X86_PG_ENTRIES]; /**< RC/R0: PD distribution of the \#PFs. */
3182 STAMCOUNTER StatRZGuestCR3WriteHandled; /**< RC/R0: The number of times WriteHandlerCR3() was successfully called. */
3183 STAMCOUNTER StatRZGuestCR3WriteUnhandled; /**< RC/R0: The number of times WriteHandlerCR3() was called and we had to fall back to the recompiler. */
3184 STAMCOUNTER StatRZGuestCR3WriteConflict; /**< RC/R0: The number of times WriteHandlerCR3() was called and a conflict was detected. */
3185 STAMCOUNTER StatRZGuestROMWriteHandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was successfully called. */
3186 STAMCOUNTER StatRZGuestROMWriteUnhandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was called and we had to fall back to the recompiler */
3187
3188 /* HC - R3 and (maybe) R0: */
3189
3190 /* RZ & R3: */
3191 STAMPROFILE StatRZSyncCR3; /**< RC/R0: PGMSyncCR3() profiling. */
3192 STAMPROFILE StatRZSyncCR3Handlers; /**< RC/R0: Profiling of the PGMSyncCR3() update handler section. */
3193 STAMCOUNTER StatRZSyncCR3Global; /**< RC/R0: The number of global CR3 syncs. */
3194 STAMCOUNTER StatRZSyncCR3NotGlobal; /**< RC/R0: The number of non-global CR3 syncs. */
3195 STAMCOUNTER StatRZSyncCR3DstCacheHit; /**< RC/R0: The number of times we got some kind of cache hit on a page table. */
3196 STAMCOUNTER StatRZSyncCR3DstFreed; /**< RC/R0: The number of times we've had to free a shadow entry. */
3197 STAMCOUNTER StatRZSyncCR3DstFreedSrcNP; /**< RC/R0: The number of times we've had to free a shadow entry for which the source entry was not present. */
3198 STAMCOUNTER StatRZSyncCR3DstNotPresent; /**< RC/R0: The number of times we've encountered a not present shadow entry for a present guest entry. */
3199 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPD; /**< RC/R0: The number of times a global page directory wasn't flushed. */
3200 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPT; /**< RC/R0: The number of times a page table with only global entries wasn't flushed. */
3201 STAMPROFILE StatRZSyncPT; /**< RC/R0: PGMSyncPT() profiling. */
3202 STAMCOUNTER StatRZSyncPTFailed; /**< RC/R0: The number of times PGMSyncPT() failed. */
3203 STAMCOUNTER StatRZSyncPT4K; /**< RC/R0: Number of 4KB syncs. */
3204 STAMCOUNTER StatRZSyncPT4M; /**< RC/R0: Number of 4MB syncs. */
3205 STAMCOUNTER StatRZSyncPagePDNAs; /**< RC/R0: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
3206 STAMCOUNTER StatRZSyncPagePDOutOfSync; /**< RC/R0: The number of time we've encountered an out-of-sync PD in SyncPage. */
3207 STAMCOUNTER StatRZAccessedPage; /**< RC/R0: The number of pages marked not present for accessed bit emulation. */
3208 STAMPROFILE StatRZDirtyBitTracking; /**< RC/R0: Profiling the dirty bit tracking in CheckPageFault().. */
3209 STAMCOUNTER StatRZDirtyPage; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
3210 STAMCOUNTER StatRZDirtyPageBig; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
3211 STAMCOUNTER StatRZDirtyPageSkipped; /**< RC/R0: The number of pages already dirty or readonly. */
3212 STAMCOUNTER StatRZDirtyPageTrap; /**< RC/R0: The number of traps generated for dirty bit tracking. */
3213 STAMCOUNTER StatRZDirtyPageStale; /**< RC/R0: The number of traps generated for dirty bit tracking. (stale tlb entries) */
3214 STAMCOUNTER StatRZDirtyTrackRealPF; /**< RC/R0: The number of real pages faults during dirty bit tracking. */
3215 STAMCOUNTER StatRZDirtiedPage; /**< RC/R0: The number of pages marked dirty because of write accesses. */
3216 STAMCOUNTER StatRZPageAlreadyDirty; /**< RC/R0: The number of pages already marked dirty because of write accesses. */
3217 STAMPROFILE StatRZInvalidatePage; /**< RC/R0: PGMInvalidatePage() profiling. */
3218 STAMCOUNTER StatRZInvalidatePage4KBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4KB page. */
3219 STAMCOUNTER StatRZInvalidatePage4MBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4MB page. */
3220 STAMCOUNTER StatRZInvalidatePage4MBPagesSkip; /**< RC/R0: The number of times PGMInvalidatePage() skipped a 4MB page. */
3221 STAMCOUNTER StatRZInvalidatePagePDMappings; /**< RC/R0: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
3222 STAMCOUNTER StatRZInvalidatePagePDNAs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
3223 STAMCOUNTER StatRZInvalidatePagePDNPs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not present page directory. */
3224 STAMCOUNTER StatRZInvalidatePagePDOutOfSync; /**< RC/R0: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
3225 STAMCOUNTER StatRZInvalidatePageSkipped; /**< RC/R0: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
3226 STAMCOUNTER StatRZPageOutOfSyncUser; /**< RC/R0: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
3227 STAMCOUNTER StatRZPageOutOfSyncSupervisor; /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
3228 STAMCOUNTER StatRZPageOutOfSyncUserWrite; /**< RC/R0: The number of times user page is out of sync was detected in \#PF. */
3229 STAMCOUNTER StatRZPageOutOfSyncSupervisorWrite; /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF. */
3230 STAMPROFILE StatRZPrefetch; /**< RC/R0: PGMPrefetchPage. */
3231 STAMPROFILE StatRZFlushTLB; /**< RC/R0: Profiling of the PGMFlushTLB() body. */
3232 STAMCOUNTER StatRZFlushTLBNewCR3; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
3233 STAMCOUNTER StatRZFlushTLBNewCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
3234 STAMCOUNTER StatRZFlushTLBSameCR3; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
3235 STAMCOUNTER StatRZFlushTLBSameCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
3236 STAMPROFILE StatRZGstModifyPage; /**< RC/R0: Profiling of the PGMGstModifyPage() body */
3237
3238 STAMPROFILE StatR3SyncCR3; /**< R3: PGMSyncCR3() profiling. */
3239 STAMPROFILE StatR3SyncCR3Handlers; /**< R3: Profiling of the PGMSyncCR3() update handler section. */
3240 STAMCOUNTER StatR3SyncCR3Global; /**< R3: The number of global CR3 syncs. */
3241 STAMCOUNTER StatR3SyncCR3NotGlobal; /**< R3: The number of non-global CR3 syncs. */
3242 STAMCOUNTER StatR3SyncCR3DstFreed; /**< R3: The number of times we've had to free a shadow entry. */
3243 STAMCOUNTER StatR3SyncCR3DstFreedSrcNP; /**< R3: The number of times we've had to free a shadow entry for which the source entry was not present. */
3244 STAMCOUNTER StatR3SyncCR3DstNotPresent; /**< R3: The number of times we've encountered a not present shadow entry for a present guest entry. */
3245 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPD; /**< R3: The number of times a global page directory wasn't flushed. */
3246 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPT; /**< R3: The number of times a page table with only global entries wasn't flushed. */
3247 STAMCOUNTER StatR3SyncCR3DstCacheHit; /**< R3: The number of times we got some kind of cache hit on a page table. */
3248 STAMPROFILE StatR3SyncPT; /**< R3: PGMSyncPT() profiling. */
3249 STAMCOUNTER StatR3SyncPTFailed; /**< R3: The number of times PGMSyncPT() failed. */
3250 STAMCOUNTER StatR3SyncPT4K; /**< R3: Number of 4KB syncs. */
3251 STAMCOUNTER StatR3SyncPT4M; /**< R3: Number of 4MB syncs. */
3252 STAMCOUNTER StatR3SyncPagePDNAs; /**< R3: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
3253 STAMCOUNTER StatR3SyncPagePDOutOfSync; /**< R3: The number of time we've encountered an out-of-sync PD in SyncPage. */
3254 STAMCOUNTER StatR3AccessedPage; /**< R3: The number of pages marked not present for accessed bit emulation. */
3255 STAMPROFILE StatR3DirtyBitTracking; /**< R3: Profiling the dirty bit tracking in CheckPageFault(). */
3256 STAMCOUNTER StatR3DirtyPage; /**< R3: The number of pages marked read-only for dirty bit tracking. */
3257 STAMCOUNTER StatR3DirtyPageBig; /**< R3: The number of pages marked read-only for dirty bit tracking. */
3258 STAMCOUNTER StatR3DirtyPageSkipped; /**< R3: The number of pages already dirty or readonly. */
3259 STAMCOUNTER StatR3DirtyPageTrap; /**< R3: The number of traps generated for dirty bit tracking. */
3260 STAMCOUNTER StatR3DirtyTrackRealPF; /**< R3: The number of real pages faults during dirty bit tracking. */
3261 STAMCOUNTER StatR3DirtiedPage; /**< R3: The number of pages marked dirty because of write accesses. */
3262 STAMCOUNTER StatR3PageAlreadyDirty; /**< R3: The number of pages already marked dirty because of write accesses. */
3263 STAMPROFILE StatR3InvalidatePage; /**< R3: PGMInvalidatePage() profiling. */
3264 STAMCOUNTER StatR3InvalidatePage4KBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4KB page. */
3265 STAMCOUNTER StatR3InvalidatePage4MBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4MB page. */
3266 STAMCOUNTER StatR3InvalidatePage4MBPagesSkip; /**< R3: The number of times PGMInvalidatePage() skipped a 4MB page. */
3267 STAMCOUNTER StatR3InvalidatePagePDNAs; /**< R3: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
3268 STAMCOUNTER StatR3InvalidatePagePDNPs; /**< R3: The number of times PGMInvalidatePage() was called for a not present page directory. */
3269 STAMCOUNTER StatR3InvalidatePagePDMappings; /**< R3: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
3270 STAMCOUNTER StatR3InvalidatePagePDOutOfSync; /**< R3: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
3271 STAMCOUNTER StatR3InvalidatePageSkipped; /**< R3: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
3272 STAMCOUNTER StatR3PageOutOfSyncUser; /**< R3: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
3273 STAMCOUNTER StatR3PageOutOfSyncSupervisor; /**< R3: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
3274 STAMCOUNTER StatR3PageOutOfSyncUserWrite; /**< R3: The number of times user page is out of sync was detected in \#PF. */
3275 STAMCOUNTER StatR3PageOutOfSyncSupervisorWrite; /**< R3: The number of times supervisor page is out of sync was detected in in \#PF. */
3276 STAMPROFILE StatR3Prefetch; /**< R3: PGMPrefetchPage. */
3277 STAMPROFILE StatR3FlushTLB; /**< R3: Profiling of the PGMFlushTLB() body. */
3278 STAMCOUNTER StatR3FlushTLBNewCR3; /**< R3: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
3279 STAMCOUNTER StatR3FlushTLBNewCR3Global; /**< R3: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
3280 STAMCOUNTER StatR3FlushTLBSameCR3; /**< R3: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
3281 STAMCOUNTER StatR3FlushTLBSameCR3Global; /**< R3: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
3282 STAMPROFILE StatR3GstModifyPage; /**< R3: Profiling of the PGMGstModifyPage() body */
3283 /** @} */
3284#endif /* VBOX_WITH_STATISTICS */
3285} PGMCPU;
3286/** Pointer to the per-cpu PGM data. */
3287typedef PGMCPU *PPGMCPU;
3288
3289
3290/** @name PGM::fSyncFlags Flags
3291 * @{
3292 */
3293/** Updates the virtual access handler state bit in PGMPAGE. */
3294#define PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL RT_BIT(0)
3295/** Always sync CR3. */
3296#define PGM_SYNC_ALWAYS RT_BIT(1)
3297/** Check monitoring on next CR3 (re)load and invalidate page.
3298 * @todo This is obsolete now. Remove after 2.2.0 is branched off. */
3299#define PGM_SYNC_MONITOR_CR3 RT_BIT(2)
3300/** Check guest mapping in SyncCR3. */
3301#define PGM_SYNC_MAP_CR3 RT_BIT(3)
3302/** Clear the page pool (a light weight flush). */
3303#define PGM_SYNC_CLEAR_PGM_POOL_BIT 8
3304#define PGM_SYNC_CLEAR_PGM_POOL RT_BIT(PGM_SYNC_CLEAR_PGM_POOL_BIT)
3305/** @} */
3306
3307
3308RT_C_DECLS_BEGIN
3309
3310int pgmLock(PVM pVM);
3311void pgmUnlock(PVM pVM);
3312
3313int pgmR3MappingsFixInternal(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb);
3314int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping);
3315int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping);
3316PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
3317int pgmMapResolveConflicts(PVM pVM);
3318DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
3319
3320void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
3321bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys);
3322void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage);
3323int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
3324DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
3325#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
3326void pgmHandlerVirtualDumpPhysPages(PVM pVM);
3327#else
3328# define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
3329#endif
3330DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
3331int pgmR3InitSavedState(PVM pVM, uint64_t cbRam);
3332
3333int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3334int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys);
3335int pgmPhysIsValidLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage);
3336int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
3337int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3338void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage);
3339int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3340int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3341int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
3342int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
3343int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv);
3344int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv);
3345int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
3346int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv);
3347VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
3348#ifdef IN_RING3
3349void pgmR3PhysRelinkRamRanges(PVM pVM);
3350int pgmR3PhysRamPreAllocate(PVM pVM);
3351int pgmR3PhysRamReset(PVM pVM);
3352int pgmR3PhysRomReset(PVM pVM);
3353int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
3354
3355int pgmR3PoolInit(PVM pVM);
3356void pgmR3PoolRelocate(PVM pVM);
3357void pgmR3PoolResetUnpluggedCpu(PVM pVM, PVMCPU pVCpu);
3358void pgmR3PoolReset(PVM pVM);
3359void pgmR3PoolClearAll(PVM pVM);
3360DECLCALLBACK(VBOXSTRICTRC) pgmR3PoolClearAllRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser);
3361
3362#endif /* IN_RING3 */
3363#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3364int pgmR0DynMapHCPageCommon(PVM pVM, PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv);
3365#endif
3366int pgmPoolAllocEx(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false);
3367
3368DECLINLINE(int) pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false)
3369{
3370 return pgmPoolAllocEx(pVM, GCPhys, enmKind, PGMPOOLACCESS_DONTCARE, iUser, iUserTable, ppPage, fLockPage);
3371}
3372
3373void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable);
3374void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
3375int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fFlush = true /* DO NOT USE false UNLESS YOU KNOWN WHAT YOU'RE DOING!! */);
3376void pgmPoolFlushPageByGCPhys(PVM pVM, RTGCPHYS GCPhys);
3377PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys);
3378int pgmPoolSyncCR3(PVMCPU pVCpu);
3379bool pgmPoolIsDirtyPage(PVM pVM, RTGCPHYS GCPhys);
3380int pgmPoolTrackUpdateGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs);
3381void pgmPoolInvalidateDirtyPage(PVM pVM, RTGCPHYS GCPhysPT);
3382DECLINLINE(int) pgmPoolTrackFlushGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool *pfFlushTLBs)
3383{
3384 return pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPhysPage, true /* flush PTEs */, pfFlushTLBs);
3385}
3386
3387uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT);
3388void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage);
3389void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, unsigned cbWrite);
3390int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3391void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3392
3393void pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3394void pgmPoolResetDirtyPages(PVM pVM);
3395
3396int pgmR3ExitShadowModeBeforePoolFlush(PVM pVM, PVMCPU pVCpu);
3397int pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu);
3398
3399void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
3400void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3);
3401int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
3402int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
3403
3404int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
3405#ifndef IN_RC
3406int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
3407#endif
3408int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
3409
3410PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM);
3411PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM);
3412PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt);
3413PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM);
3414
3415RT_C_DECLS_END
3416
3417/** @} */
3418
3419#endif
3420
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette