VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInternal.h@ 23

Last change on this file since 23 was 23, checked in by vboxsync, 18 years ago

string.h & stdio.h + header cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 109.5 KB
Line 
1/* $Id: PGMInternal.h 23 2007-01-15 14:08:28Z vboxsync $ */
2/** @file
3 * PGM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22#ifndef __PGMInternal_h__
23#define __PGMInternal_h__
24
25#include <VBox/cdefs.h>
26#include <VBox/types.h>
27#include <VBox/err.h>
28#include <VBox/stam.h>
29#include <VBox/param.h>
30#include <VBox/vmm.h>
31#include <VBox/mm.h>
32#include <VBox/pdm.h>
33#include <iprt/avl.h>
34#include <iprt/assert.h>
35#include <iprt/critsect.h>
36
37#if !defined(IN_PGM_R3) && !defined(IN_PGM_R0) && !defined(IN_PGM_GC)
38# error "Not in PGM! This is an internal header!"
39#endif
40
41
42/** @defgroup grp_pgm_int Internals
43 * @ingroup grp_pgm
44 * @internal
45 * @{
46 */
47
48
49/** @name PGM Compile Time Config
50 * @{
51 */
52
53/**
54 * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).
55 * Comment it if it will break something.
56 */
57#define PGM_OUT_OF_SYNC_IN_GC
58
59/**
60 * Virtualize the dirty bit
61 * This also makes a half-hearted attempt at the accessed bit. For full
62 * accessed bit virtualization define PGM_SYNC_ACCESSED_BIT.
63 */
64#define PGM_SYNC_DIRTY_BIT
65
66/**
67 * Fully virtualize the accessed bit.
68 * @remark This requires SYNC_DIRTY_ACCESSED_BITS to be defined!
69 */
70#define PGM_SYNC_ACCESSED_BIT
71
72/**
73 * Check and skip global PDEs for non-global flushes
74 */
75#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
76
77/**
78 * Sync N pages instead of a whole page table
79 */
80#define PGM_SYNC_N_PAGES
81
82/**
83 * Number of pages to sync during a page fault
84 *
85 * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
86 * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
87 */
88#define PGM_SYNC_NR_PAGES 8
89
90/**
91 * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
92 */
93#define PGM_MAX_PHYSCACHE_ENTRIES 64
94#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
95
96/**
97 * Enable caching of PGMR3PhysRead/WriteByte/Word/Dword
98 */
99#define PGM_PHYSMEMACCESS_CACHING
100
101/*
102 * Assert Sanity.
103 */
104#if defined(PGM_SYNC_ACCESSED_BIT) && !defined(PGM_SYNC_DIRTY_BIT)
105# error "PGM_SYNC_ACCESSED_BIT requires PGM_SYNC_DIRTY_BIT!"
106#endif
107
108/** @def PGMPOOL_WITH_CACHE
109 * Enable agressive caching using the page pool.
110 *
111 * This requires PGMPOOL_WITH_USER_TRACKING and PGMPOOL_WITH_MONITORING.
112 */
113#define PGMPOOL_WITH_CACHE
114
115/** @def PGMPOOL_WITH_MIXED_PT_CR3
116 * When defined, we'll deal with 'uncachable' pages.
117 */
118#ifdef PGMPOOL_WITH_CACHE
119# define PGMPOOL_WITH_MIXED_PT_CR3
120#endif
121
122/** @def PGMPOOL_WITH_MONITORING
123 * Monitor the guest pages which are shadowed.
124 * When this is enabled, PGMPOOL_WITH_CACHE or PGMPOOL_WITH_GCPHYS_TRACKING must
125 * be enabled as well.
126 * @remark doesn't really work without caching now. (Mixed PT/CR3 change.)
127 */
128#ifdef PGMPOOL_WITH_CACHE
129# define PGMPOOL_WITH_MONITORING
130#endif
131
132/** @def PGMPOOL_WITH_GCPHYS_TRACKING
133 * Tracking the of shadow pages mapping guest physical pages.
134 *
135 * This is very expensive, the current cache prototype is trying to figure out
136 * whether it will be acceptable with an agressive caching policy.
137 */
138#if defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
139# define PGMPOOL_WITH_GCPHYS_TRACKING
140#endif
141
142/** @def PGMPOOL_WITH_USER_TRACKING
143 * Tracking users of shadow pages. This is required for the linking of shadow page
144 * tables and physical guest addresses.
145 */
146#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
147# define PGMPOOL_WITH_USER_TRACKING
148#endif
149
150/** @def PGMPOOL_CFG_MAX_GROW
151 * The maximum number of pages to add to the pool in one go.
152 */
153#define PGMPOOL_CFG_MAX_GROW (_256K >> PAGE_SHIFT)
154
155/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
156 * Enables some extra assertions for virtual handlers (mainly phys2virt related).
157 */
158#ifdef VBOX_STRICT
159# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
160#endif
161/** @} */
162
163
164/** @name PDPTR and PML4 flags.
165 * These are placed in the three bits available for system programs in
166 * the PDPTR and PML4 entries.
167 * @{ */
168/** The entry is a permanent one and it's must always be present.
169 * Never free such an entry. */
170#define PGM_PLXFLAGS_PERMANENT BIT64(10)
171/** @} */
172
173/** @name Page directory flags.
174 * These are placed in the three bits available for system programs in
175 * the page directory entries.
176 * @{ */
177/** Mapping (hypervisor allocated pagetable). */
178#define PGM_PDFLAGS_MAPPING BIT64(10)
179/** Made read-only to facilitate dirty bit tracking. */
180#define PGM_PDFLAGS_TRACK_DIRTY BIT64(11)
181/** @} */
182
183/** @name Page flags.
184 * These are placed in the three bits available for system programs in
185 * the page entries.
186 * @{ */
187/** Made read-only to facilitate dirty bit tracking. */
188#define PGM_PTFLAGS_TRACK_DIRTY BIT64(9)
189
190#ifndef PGM_PTFLAGS_CSAM_VALIDATED
191/** Scanned and approved by CSAM (tm).
192 * NOTE: Must be identical to the one defined in CSAMInternal.h!!
193 * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/pgm.h. */
194#define PGM_PTFLAGS_CSAM_VALIDATED BIT64(11)
195#endif
196/** @} */
197
198/** @name Defines used to indicate the shadow and guest paging in the templates.
199 * @{ */
200#define PGM_TYPE_REAL 1
201#define PGM_TYPE_PROT 2
202#define PGM_TYPE_32BIT 3
203#define PGM_TYPE_PAE 4
204#define PGM_TYPE_AMD64 5
205/** @} */
206
207/** @def PGM_HCPHYS_2_PTR
208 * Maps a HC physical page pool address to a virtual address.
209 *
210 * @returns VBox status code.
211 * @param pVM The VM handle.
212 * @param HCPhys The HC physical address to map to a virtual one.
213 * @param ppv Where to store the virtual address. No need to cast this.
214 *
215 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
216 * small page window employeed by that function. Be careful.
217 * @remark There is no need to assert on the result.
218 */
219#ifdef IN_GC
220# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) PGMGCDynMapHCPage(pVM, HCPhys, (void **)(ppv))
221#else
222# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
223#endif
224
225/** @def PGM_GCPHYS_2_PTR
226 * Maps a GC physical page address to a virtual address.
227 *
228 * @returns VBox status code.
229 * @param pVM The VM handle.
230 * @param GCPhys The GC physical address to map to a virtual one.
231 * @param ppv Where to store the virtual address. No need to cast this.
232 *
233 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
234 * small page window employeed by that function. Be careful.
235 * @remark There is no need to assert on the result.
236 */
237#ifdef IN_GC
238# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMGCDynMapGCPage(pVM, GCPhys, (void **)(ppv))
239#else
240# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMPhysGCPhys2HCPtr(pVM, GCPhys, (void **)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
241#endif
242
243/** @def PGM_GCPHYS_2_PTR_EX
244 * Maps a unaligned GC physical page address to a virtual address.
245 *
246 * @returns VBox status code.
247 * @param pVM The VM handle.
248 * @param GCPhys The GC physical address to map to a virtual one.
249 * @param ppv Where to store the virtual address. No need to cast this.
250 *
251 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
252 * small page window employeed by that function. Be careful.
253 * @remark There is no need to assert on the result.
254 */
255#ifdef IN_GC
256# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMGCDynMapGCPageEx(pVM, GCPhys, (void **)(ppv))
257#else
258# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMPhysGCPhys2HCPtr(pVM, GCPhys, (void **)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
259#endif
260
261/** @def PGM_INVL_PG
262 * Invalidates a page when in GC does nothing in HC.
263 *
264 * @param GCVirt The virtual address of the page to invalidate.
265 */
266#ifdef IN_GC
267# define PGM_INVL_PG(GCVirt) ASMInvalidatePage((void *)(GCVirt))
268#else
269# define PGM_INVL_PG(GCVirt) ((void)0)
270#endif
271
272/** @def PGM_INVL_BIG_PG
273 * Invalidates a 4MB page directory entry when in GC does nothing in HC.
274 *
275 * @param GCVirt The virtual address within the page directory to invalidate.
276 */
277#ifdef IN_GC
278# define PGM_INVL_BIG_PG(GCVirt) ASMReloadCR3()
279#else
280# define PGM_INVL_BIG_PG(GCVirt) ((void)0)
281#endif
282
283/** @def PGM_INVL_GUEST_TLBS()
284 * Invalidates all guest TLBs.
285 */
286#ifdef IN_GC
287# define PGM_INVL_GUEST_TLBS() ASMReloadCR3()
288#else
289# define PGM_INVL_GUEST_TLBS() ((void)0)
290#endif
291
292
293/**
294 * Structure for tracking GC Mappings.
295 *
296 * This structure is used by linked list in both GC and HC.
297 */
298typedef struct PGMMAPPING
299{
300 /** Pointer to next entry. */
301 HCPTRTYPE(struct PGMMAPPING *) pNextHC;
302 /** Pointer to next entry. */
303 GCPTRTYPE(struct PGMMAPPING *) pNextGC;
304 /** Start Virtual address. */
305 RTGCUINTPTR GCPtr;
306 /** Last Virtual address (inclusive). */
307 RTGCUINTPTR GCPtrLast;
308 /** Range size (bytes). */
309 RTGCUINTPTR cb;
310 /** Pointer to relocation callback function. */
311 HCPTRTYPE(PFNPGMRELOCATE) pfnRelocate;
312 /** User argument to the callback. */
313 HCPTRTYPE(void *) pvUser;
314 /** Mapping description / name. For easing debugging. */
315 HCPTRTYPE(const char *) pszDesc;
316 /** Number of page tables. */
317 RTUINT cPTs;
318#if HC_ARCH_BITS != GC_ARCH_BITS
319 RTUINT uPadding0; /**< Alignment padding. */
320#endif
321 /** Array of page table mapping data. Each entry
322 * describes one page table. The array can be longer
323 * than the declared length.
324 */
325 struct
326 {
327 /** The HC physical address of the page table. */
328 RTHCPHYS HCPhysPT;
329 /** The HC physical address of the first PAE page table. */
330 RTHCPHYS HCPhysPaePT0;
331 /** The HC physical address of the second PAE page table. */
332 RTHCPHYS HCPhysPaePT1;
333 /** The HC virtual address of the 32-bit page table. */
334 HCPTRTYPE(PVBOXPT) pPTHC;
335 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */
336 HCPTRTYPE(PX86PTPAE) paPaePTsHC;
337 /** The GC virtual address of the 32-bit page table. */
338 GCPTRTYPE(PVBOXPT) pPTGC;
339 /** The GC virtual address of the two PAE page table. */
340 GCPTRTYPE(PX86PTPAE) paPaePTsGC;
341 } aPTs[1];
342} PGMMAPPING;
343/** Pointer to structure for tracking GC Mappings. */
344typedef struct PGMMAPPING *PPGMMAPPING;
345
346
347/**
348 * Physical page access handler structure.
349 *
350 * This is used to keep track of physical address ranges
351 * which are being monitored in some kind of way.
352 */
353typedef struct PGMPHYSHANDLER
354{
355 AVLROGCPHYSNODECORE Core;
356 /** Alignment padding. */
357 uint32_t u32Padding;
358 /** Access type. */
359 PGMPHYSHANDLERTYPE enmType;
360 /** Number of pages to update. */
361 uint32_t cPages;
362 /** Pointer to R3 callback function. */
363 HCPTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3;
364 /** User argument for R3 handlers. */
365 HCPTRTYPE(void *) pvUserR3;
366 /** Pointer to R0 callback function. */
367 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0;
368 /** User argument for R0 handlers. */
369 HCPTRTYPE(void *) pvUserR0;
370 /** Pointer to GC callback function. */
371 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC;
372 /** User argument for GC handlers. */
373 GCPTRTYPE(void *) pvUserGC;
374 /** Description / Name. For easing debugging. */
375 HCPTRTYPE(const char *) pszDesc;
376#ifdef VBOX_WITH_STATISTICS
377 /** Profiling of this handler. */
378 STAMPROFILE Stat;
379#endif
380} PGMPHYSHANDLER;
381/** Pointer to a physical page access handler structure. */
382typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
383
384
385/**
386 * Cache node for the physical addresses covered by a virtual handler.
387 */
388typedef struct PGMPHYS2VIRTHANDLER
389{
390 /** Core node for the tree based on physical ranges. */
391 AVLROGCPHYSNODECORE Core;
392 /** Offset from this struct to the PGMVIRTHANDLER structure. */
393 RTGCINTPTR offVirtHandler;
394 /** Offset of the next alias relativer to this one.
395 * Bit 0 is used for indicating whether we're in the tree.
396 * Bit 1 is used for indicating that we're the head node.
397 */
398 int32_t offNextAlias;
399} PGMPHYS2VIRTHANDLER;
400/** Pointer to a phys to virtual handler structure. */
401typedef PGMPHYS2VIRTHANDLER *PPGMPHYS2VIRTHANDLER;
402
403/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
404 * node is in the tree. */
405#define PGMPHYS2VIRTHANDLER_IN_TREE BIT(0)
406/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
407 * node is in the head of an alias chain.
408 * The PGMPHYS2VIRTHANDLER_IN_TREE is always set if this bit is set. */
409#define PGMPHYS2VIRTHANDLER_IS_HEAD BIT(1)
410/** The mask to apply to PGMPHYS2VIRTHANDLER::offNextAlias to get the offset. */
411#define PGMPHYS2VIRTHANDLER_OFF_MASK (~(int32_t)3)
412
413
414/**
415 * Virtual page access handler structure.
416 *
417 * This is used to keep track of virtual address ranges
418 * which are being monitored in some kind of way.
419 */
420typedef struct PGMVIRTHANDLER
421{
422 /** Core node for the tree based on virtual ranges. */
423 AVLROGCPTRNODECORE Core;
424 /** Number of cache pages. */
425 uint32_t u32Padding;
426 /** Access type. */
427 PGMVIRTHANDLERTYPE enmType;
428 /** Number of cache pages. */
429 uint32_t cPages;
430
431/** @todo The next two members are redundant. It adds some readability though. */
432 /** Start of the range. */
433 RTGCPTR GCPtr;
434 /** End of the range (exclusive). */
435 RTGCPTR GCPtrLast;
436 /** Size of the range (in bytes). */
437 RTGCUINTPTR cb;
438 /** Pointer to the GC callback function. */
439 GCPTRTYPE(PFNPGMGCVIRTHANDLER) pfnHandlerGC;
440 /** Pointer to the HC callback function for invalidation. */
441 HCPTRTYPE(PFNPGMHCVIRTINVALIDATE) pfnInvalidateHC;
442 /** Pointer to the HC callback function. */
443 HCPTRTYPE(PFNPGMHCVIRTHANDLER) pfnHandlerHC;
444 /** Description / Name. For easing debugging. */
445 HCPTRTYPE(const char *) pszDesc;
446#ifdef VBOX_WITH_STATISTICS
447 /** Profiling of this handler. */
448 STAMPROFILE Stat;
449#endif
450 /** Array of cached physical addresses for the monitored ranged. */
451 PGMPHYS2VIRTHANDLER aPhysToVirt[1];
452} PGMVIRTHANDLER;
453/** Pointer to a virtual page access handler structure. */
454typedef PGMVIRTHANDLER *PPGMVIRTHANDLER;
455
456
457/**
458 * Ram range for GC Phys to HC Phys conversion.
459 *
460 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
461 * conversions too, but we'll let MM handle that for now.
462 *
463 * This structure is used by linked lists in both GC and HC.
464 */
465typedef struct PGMRAMRANGE
466{
467 /** Pointer to the next RAM range - for HC. */
468 HCPTRTYPE(struct PGMRAMRANGE *) pNextHC;
469 /** Pointer to the next RAM range - for GC. */
470 GCPTRTYPE(struct PGMRAMRANGE *) pNextGC;
471 /** Start of the range. Page aligned. */
472 RTGCPHYS GCPhys;
473 /** Last address in the range (inclusive). Page aligned (-1). */
474 RTGCPHYS GCPhysLast;
475 /** Size of the range. (Page aligned of course). */
476 RTGCPHYS cb;
477 /** MM_RAM_* flags */
478 uint32_t fFlags;
479
480 /** HC virtual lookup ranges for 4 MB chunks. Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges. */
481 GCPTRTYPE(void **) pvHCChunkGC; /**< @todo r=bird: ditto */
482 /** HC virtual lookup ranges for 4 MB chunks. Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges. */
483 HCPTRTYPE(void **) pvHCChunkHC; /**< @todo r=bird: rename to papvHCChunkHC */
484
485 /** Start of the HC mapping of the range.
486 * For pure MMIO and dynamically allocated ranges this is NULL, while for all ranges this is a valid pointer. */
487 HCPTRTYPE(void *) pvHC;
488
489 /** Array of the flags and HC physical addresses corresponding to the range.
490 * The index is the page number in the range. The size is cb >> PAGE_SHIFT.
491 *
492 * The 12 lower bits of the physical address are flags and must be masked
493 * off to get the correct physical address.
494 *
495 * For pure MMIO ranges only the flags are valid.
496 */
497 RTHCPHYS aHCPhys[1];
498} PGMRAMRANGE;
499/** Pointer to Ram range for GC Phys to HC Phys conversion. */
500typedef PGMRAMRANGE *PPGMRAMRANGE;
501
502/** Return hc ptr corresponding to the ram range and physical offset */
503#define PGMRAMRANGE_GETHCPTR(pRam, off) \
504 (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) ? (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pvHCChunk)[(off >> PGM_DYNAMIC_CHUNK_SHIFT)] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK)) \
505 : (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
506
507/** @todo r=bird: fix typename. */
508/**
509 * PGMPhysRead/Write cache entry
510 */
511typedef struct PGMPHYSCACHE_ENTRY
512{
513 /** HC pointer to physical page */
514 HCPTRTYPE(uint8_t *) pbHC;
515 /** GC Physical address for cache entry */
516 RTGCPHYS GCPhys;
517#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
518 RTGCPHYS u32Padding0; /**< alignment padding. */
519#endif
520} PGMPHYSCACHE_ENTRY;
521
522/**
523 * PGMPhysRead/Write cache to reduce REM memory access overhead
524 */
525typedef struct PGMPHYSCACHE
526{
527 /** Bitmap of valid cache entries */
528 uint64_t aEntries;
529 /** Cache entries */
530 PGMPHYSCACHE_ENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES];
531} PGMPHYSCACHE;
532
533
534/** @name PGM Pool Indexes.
535 * Aka. the unique shadow page identifier.
536 * @{ */
537/** NIL page pool IDX. */
538#define NIL_PGMPOOL_IDX 0
539/** The first normal index. */
540#define PGMPOOL_IDX_FIRST_SPECIAL 1
541/** Page directory (32-bit root). */
542#define PGMPOOL_IDX_PD 1
543/** The extended PAE page directory (2048 entries, works as root currently). */
544#define PGMPOOL_IDX_PAE_PD 2
545/** Page Directory Pointer Table (PAE root, not currently used). */
546#define PGMPOOL_IDX_PDPTR 3
547/** Page Map Level-4 (64-bit root). */
548#define PGMPOOL_IDX_PML4 4
549/** The first normal index. */
550#define PGMPOOL_IDX_FIRST 5
551/** The last valid index. (inclusive, 14 bits) */
552#define PGMPOOL_IDX_LAST 0x3fff
553/** @} */
554
555/** The NIL index for the parent chain. */
556#define NIL_PGMPOOL_USER_INDEX ((uint16_t)0xffff)
557
558/**
559 * Node in the chain linking a shadowed page to it's parent (user).
560 */
561#pragma pack(1)
562typedef struct PGMPOOLUSER
563{
564 /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
565 uint16_t iNext;
566 /** The user page index. */
567 uint16_t iUser;
568 /** Index into the user table. */
569 uint16_t iUserTable;
570} PGMPOOLUSER, *PPGMPOOLUSER;
571typedef const PGMPOOLUSER *PCPGMPOOLUSER;
572#pragma pack()
573
574
575/** The NIL index for the phys ext chain. */
576#define NIL_PGMPOOL_PHYSEXT_INDEX ((uint16_t)0xffff)
577
578/**
579 * Node in the chain of physical cross reference extents.
580 */
581#pragma pack(1)
582typedef struct PGMPOOLPHYSEXT
583{
584 /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
585 uint16_t iNext;
586 /** The user page index. */
587 uint16_t aidx[3];
588} PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
589typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
590#pragma pack()
591
592
593/**
594 * The kind of page that's being shadowed.
595 */
596typedef enum PGMPOOLKIND
597{
598 /** The ritual invalid 0 entry. */
599 PGMPOOLKIND_INVALID = 0,
600 /** The entry is free (=unused). */
601 PGMPOOLKIND_FREE,
602
603 /** Shw: 32-bit page table; Gst: 32-bit page table. */
604 PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
605 /** Shw: 32-bit page table; Gst: 4MB page. */
606 PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
607 /** Shw: PAE page table; Gst: 32-bit page table. */
608 PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
609 /** Shw: PAE page table; Gst: Half of a 4MB page. */
610 PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
611 /** Shw: PAE page table; Gst: PAE page table. */
612 PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
613 /** Shw: PAE page table; Gst: 2MB page. */
614 PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
615
616 /** Shw: PAE page directory; Gst: 32-bit page directory. */
617 PGMPOOLKIND_PAE_PD_FOR_32BIT_PD,
618 /** Shw: PAE page directory; Gst: PAE page directory. */
619 PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
620
621 /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */
622 PGMPOOLKIND_64BIT_PDPTR_FOR_64BIT_PDPTR,
623
624 /** Shw: Root 32-bit page directory. */
625 PGMPOOLKIND_ROOT_32BIT_PD,
626 /** Shw: Root PAE page directory */
627 PGMPOOLKIND_ROOT_PAE_PD,
628 /** Shw: Root PAE page directory pointer table (legacy, 4 entries). */
629 PGMPOOLKIND_ROOT_PDPTR,
630 /** Shw: Root page map level-4 table. */
631 PGMPOOLKIND_ROOT_PML4,
632
633 /** The last valid entry. */
634 PGMPOOLKIND_LAST = PGMPOOLKIND_ROOT_PML4
635} PGMPOOLKIND;
636
637
638/**
639 * The tracking data for a page in the pool.
640 */
641typedef struct PGMPOOLPAGE
642{
643 /** AVL node code with the (HC) physical address of this page. */
644 AVLOHCPHYSNODECORE Core;
645 /** The guest physical address. */
646 RTGCPHYS GCPhys;
647 /** Pointer to the HC mapping of the page. */
648 HCPTRTYPE(void *) pvPageHC;
649 /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
650 uint8_t enmKind;
651 uint8_t bPadding;
652 /** The index of this page. */
653 uint16_t idx;
654 /** The next entry in the list this page currently resides in.
655 * It's either in the free list or in the GCPhys hash. */
656 uint16_t iNext;
657#ifdef PGMPOOL_WITH_USER_TRACKING
658 /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
659 uint16_t iUserHead;
660 /** The number of present entries. */
661 uint16_t cPresent;
662 /** The first entry in the table which is present. */
663 uint16_t iFirstPresent;
664#endif
665#ifdef PGMPOOL_WITH_MONITORING
666 /** The number of modifications to the monitored page. */
667 uint16_t cModifications;
668 /** The next modified page. NIL_PGMPOOL_IDX if tail. */
669 uint16_t iModifiedNext;
670 /** The previous modified page. NIL_PGMPOOL_IDX if head. */
671 uint16_t iModifiedPrev;
672 /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
673 uint16_t iMonitoredNext;
674 /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
675 uint16_t iMonitoredPrev;
676#endif
677#ifdef PGMPOOL_WITH_CACHE
678 /** The next page in the age list. */
679 uint16_t iAgeNext;
680 /** The previous page in the age list. */
681 uint16_t iAgePrev;
682/** @todo add more from PGMCache.h when merging with it. */
683#endif /* PGMPOOL_WITH_CACHE */
684 /** Used to indicate that the page is zeroed. */
685 bool fZeroed;
686 /** Used to indicate that a PT has non-global entries. */
687 bool fSeenNonGlobal;
688 /** Used to indicate that we're monitoring writes to the guest page. */
689 bool fMonitored;
690 /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
691 * (All pages are in the age list.) */
692 bool fCached;
693 /** This is used by the R3 access handlers when invoked by an async thread.
694 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
695 bool volatile fReusedFlushPending;
696 /** Used to indicate that the guest is mapping the page is also used as a CR3.
697 * In these cases the access handler acts differently and will check
698 * for mapping conflicts like the normal CR3 handler.
699 * @todo When we change the CR3 shadowing to use pool pages, this flag can be
700 * replaced by a list of pages which share access handler.
701 */
702 bool fCR3Mix;
703} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
704
705
706#ifdef PGMPOOL_WITH_CACHE
707/** The hash table size. */
708# define PGMPOOL_HASH_SIZE 0x40
709/** The hash function. */
710# define PGMPOOL_HASH(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
711#endif
712
713
714/**
715 * The shadow page pool instance data.
716 *
717 * It's all one big allocation made at init time, except for the
718 * pages that is. The user nodes follows immediatly after the
719 * page structures.
720 */
721typedef struct PGMPOOL
722{
723 /** The VM handle - HC Ptr. */
724 HCPTRTYPE(PVM) pVMHC;
725 /** The VM handle - GC Ptr. */
726 GCPTRTYPE(PVM) pVMGC;
727 /** The max pool size. This includes the special IDs. */
728 uint16_t cMaxPages;
729 /** The current pool size. */
730 uint16_t cCurPages;
731 /** The head of the free page list. */
732 uint16_t iFreeHead;
733 /* Padding. */
734 uint16_t u16Padding;
735#ifdef PGMPOOL_WITH_USER_TRACKING
736 /** Head of the chain of free user nodes. */
737 uint16_t iUserFreeHead;
738 /** The number of user nodes we've allocated. */
739 uint16_t cMaxUsers;
740 /** The number of present page table entries in the entire pool. */
741 uint32_t cPresent;
742 /** Pointer to the array of user nodes - HC pointer. */
743 HCPTRTYPE(PPGMPOOLUSER) paUsersHC;
744 /** Pointer to the array of user nodes - GC pointer. */
745 GCPTRTYPE(PPGMPOOLUSER) paUsersGC;
746#endif /* PGMPOOL_WITH_USER_TRACKING */
747#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
748 /** Head of the chain of free phys ext nodes. */
749 uint16_t iPhysExtFreeHead;
750 /** The number of user nodes we've allocated. */
751 uint16_t cMaxPhysExts;
752 /** Pointer to the array of physical xref extent nodes - HC pointer. */
753 HCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsHC;
754 /** Pointer to the array of physical xref extent - GC pointer. */
755 GCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsGC;
756#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
757#ifdef PGMPOOL_WITH_CACHE
758 /** Hash table for GCPhys addresses. */
759 uint16_t aiHash[PGMPOOL_HASH_SIZE];
760 /** The head of the age list. */
761 uint16_t iAgeHead;
762 /** The tail of the age list. */
763 uint16_t iAgeTail;
764 /** Set if the cache is enabled. */
765 bool fCacheEnabled;
766#endif /* PGMPOOL_WITH_CACHE */
767#ifdef PGMPOOL_WITH_MONITORING
768 /** Access handler, GC. */
769 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnAccessHandlerGC;
770 /** Access handler, R0. */
771 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnAccessHandlerR0;
772 /** Access handler, R3. */
773 HCPTRTYPE(PFNPGMR3PHYSHANDLER) pfnAccessHandlerR3;
774 /** The access handler description (HC ptr). */
775 HCPTRTYPE(const char *) pszAccessHandler;
776 /** Head of the list of modified pages. */
777 uint16_t iModifiedHead;
778 /** The current number of modified pages. */
779 uint16_t cModifiedPages;
780#endif /* PGMPOOL_WITH_MONITORING */
781 /** The number of pages currently in use. */
782 uint16_t cUsedPages;
783#ifdef VBOX_WITH_STATISTICS
784 /** The high wather mark for cUsedPages. */
785 uint16_t cUsedPagesHigh;
786 /** Profiling pgmPoolAlloc(). */
787 STAMPROFILEADV StatAlloc;
788 /** Profiling pgmPoolClearAll(). */
789 STAMPROFILE StatClearAll;
790 /** Profiling pgmPoolFlushAllInt(). */
791 STAMPROFILE StatFlushAllInt;
792 /** Profiling pgmPoolFlushPage(). */
793 STAMPROFILE StatFlushPage;
794 /** Profiling pgmPoolFree(). */
795 STAMPROFILE StatFree;
796 /** Profiling time spent zeroing pages. */
797 STAMPROFILE StatZeroPage;
798# ifdef PGMPOOL_WITH_USER_TRACKING
799 /** Profiling of pgmPoolTrackDeref. */
800 STAMPROFILE StatTrackDeref;
801 /** Profiling pgmTrackFlushGCPhysPT. */
802 STAMPROFILE StatTrackFlushGCPhysPT;
803 /** Profiling pgmTrackFlushGCPhysPTs. */
804 STAMPROFILE StatTrackFlushGCPhysPTs;
805 /** Profiling pgmTrackFlushGCPhysPTsSlow. */
806 STAMPROFILE StatTrackFlushGCPhysPTsSlow;
807 /** Number of times we've been out of user records. */
808 STAMCOUNTER StatTrackFreeUpOneUser;
809# endif
810# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
811 /** Profiling deref activity related tracking GC physical pages. */
812 STAMPROFILE StatTrackDerefGCPhys;
813 /** Number of linear searches for a HCPhys in the ram ranges. */
814 STAMCOUNTER StatTrackLinearRamSearches;
815 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
816 STAMCOUNTER StamTrackPhysExtAllocFailures;
817# endif
818# ifdef PGMPOOL_WITH_MONITORING
819 /** Profiling the GC PT access handler. */
820 STAMPROFILE StatMonitorGC;
821 /** Times we've failed interpreting the instruction. */
822 STAMCOUNTER StatMonitorGCEmulateInstr;
823 /** Profiling the pgmPoolFlushPage calls made from the GC PT access handler. */
824 STAMPROFILE StatMonitorGCFlushPage;
825 /** Times we've detected fork(). */
826 STAMCOUNTER StatMonitorGCFork;
827 /** Profiling the GC access we've handled (except REP STOSD). */
828 STAMPROFILE StatMonitorGCHandled;
829 /** Times we've failed interpreting a patch code instruction. */
830 STAMCOUNTER StatMonitorGCIntrFailPatch1;
831 /** Times we've failed interpreting a patch code instruction during flushing. */
832 STAMCOUNTER StatMonitorGCIntrFailPatch2;
833 /** The number of times we've seen rep prefixes we can't handle. */
834 STAMCOUNTER StatMonitorGCRepPrefix;
835 /** Profiling the REP STOSD cases we've handled. */
836 STAMPROFILE StatMonitorGCRepStosd;
837
838 /** Profiling the HC PT access handler. */
839 STAMPROFILE StatMonitorHC;
840 /** Times we've failed interpreting the instruction. */
841 STAMCOUNTER StatMonitorHCEmulateInstr;
842 /** Profiling the pgmPoolFlushPage calls made from the HC PT access handler. */
843 STAMPROFILE StatMonitorHCFlushPage;
844 /** Times we've detected fork(). */
845 STAMCOUNTER StatMonitorHCFork;
846 /** Profiling the HC access we've handled (except REP STOSD). */
847 STAMPROFILE StatMonitorHCHandled;
848 /** The number of times we've seen rep prefixes we can't handle. */
849 STAMCOUNTER StatMonitorHCRepPrefix;
850 /** Profiling the REP STOSD cases we've handled. */
851 STAMPROFILE StatMonitorHCRepStosd;
852 /** The number of times we're called in an async thread an need to flush. */
853 STAMCOUNTER StatMonitorHCAsync;
854 /** The high wather mark for cModifiedPages. */
855 uint16_t cModifiedPagesHigh;
856# endif
857# ifdef PGMPOOL_WITH_CACHE
858 /** The number of cache hits. */
859 STAMCOUNTER StatCacheHits;
860 /** The number of cache misses. */
861 STAMCOUNTER StatCacheMisses;
862 /** The number of times we've got a conflict of 'kind' in the cache. */
863 STAMCOUNTER StatCacheKindMismatches;
864 /** Number of times we've been out of pages. */
865 STAMCOUNTER StatCacheFreeUpOne;
866 /** The number of cacheable allocations. */
867 STAMCOUNTER StatCacheCacheable;
868 /** The number of uncacheable allocations. */
869 STAMCOUNTER StatCacheUncacheable;
870# endif
871#endif
872 /** The AVL tree for looking up a page by its HC physical address. */
873 AVLOHCPHYSTREE HCPhysTree;
874 /** Array of pages. (cMaxPages in length)
875 * The Id is the index into thist array.
876 */
877 PGMPOOLPAGE aPages[PGMPOOL_IDX_FIRST];
878} PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
879
880
881/** @def PGMPOOL_PAGE_2_PTR
882 * Maps a pool page pool into the current context.
883 *
884 * @returns VBox status code.
885 * @param pVM The VM handle.
886 * @param pPage The pool page.
887 *
888 * @remark In HC this uses PGMGCDynMapHCPage(), so it will consume of the
889 * small page window employeed by that function. Be careful.
890 * @remark There is no need to assert on the result.
891 */
892#ifdef IN_GC
893# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmGCPoolMapPage((pVM), (pPage))
894#else
895# define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageHC)
896#endif
897
898
899/**
900 * Trees are using self relative offsets as pointers.
901 * So, all its data, including the root pointer, must be in the heap for HC and GC
902 * to have the same layout.
903 */
904typedef struct PGMTREES
905{
906 /** Physical access handlers (AVL range+offsetptr tree). */
907 AVLROGCPHYSTREE PhysHandlers;
908 /** Virtual access handlers (AVL range + GC ptr tree). */
909 AVLROGCPTRTREE VirtHandlers;
910 /** Virtual access handlers (Phys range AVL range + offsetptr tree). */
911 AVLROGCPHYSTREE PhysToVirtHandlers;
912 uint32_t auPadding[1];
913} PGMTREES;
914/** Pointer to PGM trees. */
915typedef PGMTREES *PPGMTREES;
916
917
918/** @name Paging mode macros
919 * @{ */
920#ifdef IN_GC
921# define PGM_CTX(a,b) a##GC##b
922# define PGM_CTX_STR(a,b) a "GC" b
923# define PGM_CTX_DECL(type) PGMGCDECL(type)
924#else
925# ifdef IN_RING3
926# define PGM_CTX(a,b) a##R3##b
927# define PGM_CTX_STR(a,b) a "R3" b
928# define PGM_CTX_DECL(type) DECLCALLBACK(type)
929# else
930# define PGM_CTX(a,b) a##R0##b
931# define PGM_CTX_STR(a,b) a "R0" b
932# define PGM_CTX_DECL(type) PGMDECL(type)
933# endif
934#endif
935
936#define PGM_GST_NAME_REAL(name) PGM_CTX(pgm,GstReal##name)
937#define PGM_GST_NAME_GC_REAL_STR(name) "pgmGCGstReal" #name
938#define PGM_GST_NAME_R0_REAL_STR(name) "pgmR0GstReal" #name
939#define PGM_GST_NAME_PROT(name) PGM_CTX(pgm,GstProt##name)
940#define PGM_GST_NAME_GC_PROT_STR(name) "pgmGCGstProt" #name
941#define PGM_GST_NAME_R0_PROT_STR(name) "pgmR0GstProt" #name
942#define PGM_GST_NAME_32BIT(name) PGM_CTX(pgm,Gst32Bit##name)
943#define PGM_GST_NAME_GC_32BIT_STR(name) "pgmGCGst32Bit" #name
944#define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
945#define PGM_GST_NAME_PAE(name) PGM_CTX(pgm,GstPAE##name)
946#define PGM_GST_NAME_GC_PAE_STR(name) "pgmGCGstPAE" #name
947#define PGM_GST_NAME_R0_PAE_STR(name) "pgmR0GstPAE" #name
948#define PGM_GST_NAME_AMD64(name) PGM_CTX(pgm,GstAMD64##name)
949#define PGM_GST_NAME_GC_AMD64_STR(name) "pgmGCGstAMD64" #name
950#define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
951#define PGM_GST_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Gst##name))
952#define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name)
953
954#define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name)
955#define PGM_SHW_NAME_GC_32BIT_STR(name) "pgmGCShw32Bit" #name
956#define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
957#define PGM_SHW_NAME_PAE(name) PGM_CTX(pgm,ShwPAE##name)
958#define PGM_SHW_NAME_GC_PAE_STR(name) "pgmGCShwPAE" #name
959#define PGM_SHW_NAME_R0_PAE_STR(name) "pgmR0ShwPAE" #name
960#define PGM_SHW_NAME_AMD64(name) PGM_CTX(pgm,ShwAMD64##name)
961#define PGM_SHW_NAME_GC_AMD64_STR(name) "pgmGCShwAMD64" #name
962#define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
963#define PGM_SHW_DECL(type, name) PGM_CTX_DECL(type) PGM_SHW_NAME(name)
964#define PGM_SHW_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Shw##name))
965
966/* Shw_Gst */
967#define PGM_BTH_NAME_32BIT_REAL(name) PGM_CTX(pgm,Bth32BitReal##name)
968#define PGM_BTH_NAME_32BIT_PROT(name) PGM_CTX(pgm,Bth32BitProt##name)
969#define PGM_BTH_NAME_32BIT_32BIT(name) PGM_CTX(pgm,Bth32Bit32Bit##name)
970#define PGM_BTH_NAME_PAE_REAL(name) PGM_CTX(pgm,BthPAEReal##name)
971#define PGM_BTH_NAME_PAE_PROT(name) PGM_CTX(pgm,BthPAEProt##name)
972#define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name)
973#define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name)
974#define PGM_BTH_NAME_AMD64_REAL(name) PGM_CTX(pgm,BthAMD64Real##name)
975#define PGM_BTH_NAME_AMD64_PROT(name) PGM_CTX(pgm,BthAMD64Prot##name)
976#define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name)
977#define PGM_BTH_NAME_GC_32BIT_REAL_STR(name) "pgmGCBth32BitReal" #name
978#define PGM_BTH_NAME_GC_32BIT_PROT_STR(name) "pgmGCBth32BitProt" #name
979#define PGM_BTH_NAME_GC_32BIT_32BIT_STR(name) "pgmGCBth32Bit32Bit" #name
980#define PGM_BTH_NAME_GC_PAE_REAL_STR(name) "pgmGCBthPAEReal" #name
981#define PGM_BTH_NAME_GC_PAE_PROT_STR(name) "pgmGCBthPAEProt" #name
982#define PGM_BTH_NAME_GC_PAE_32BIT_STR(name) "pgmGCBthPAE32Bit" #name
983#define PGM_BTH_NAME_GC_PAE_PAE_STR(name) "pgmGCBthPAEPAE" #name
984#define PGM_BTH_NAME_GC_AMD64_REAL_STR(name) "pgmGCBthAMD64Real" #name
985#define PGM_BTH_NAME_GC_AMD64_PROT_STR(name) "pgmGCBthAMD64Prot" #name
986#define PGM_BTH_NAME_GC_AMD64_AMD64_STR(name) "pgmGCBthAMD64AMD64" #name
987#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name
988#define PGM_BTH_NAME_R0_32BIT_PROT_STR(name) "pgmR0Bth32BitProt" #name
989#define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name) "pgmR0Bth32Bit32Bit" #name
990#define PGM_BTH_NAME_R0_PAE_REAL_STR(name) "pgmR0BthPAEReal" #name
991#define PGM_BTH_NAME_R0_PAE_PROT_STR(name) "pgmR0BthPAEProt" #name
992#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name
993#define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name
994#define PGM_BTH_NAME_R0_AMD64_REAL_STR(name) "pgmR0BthAMD64Real" #name
995#define PGM_BTH_NAME_R0_AMD64_PROT_STR(name) "pgmR0BthAMD64Prot" #name
996#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name
997#define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name)
998#define PGM_BTH_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Bth##name))
999/** @} */
1000
1001/**
1002 * Data for each paging mode.
1003 */
1004typedef struct PGMMODEDATA
1005{
1006 /** The guest mode type. */
1007 uint32_t uGstType;
1008 /** The shadow mode type. */
1009 uint32_t uShwType;
1010
1011 /** @name Function pointers for Shadow paging.
1012 * @{
1013 */
1014 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1015 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
1016 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1017 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1018 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1019 DECLR3CALLBACKMEMBER(int, pfnR3ShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1020 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1021
1022 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1023 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1024 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1025 DECLGCCALLBACKMEMBER(int, pfnGCShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1026 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1027
1028 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1029 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1030 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1031 DECLR0CALLBACKMEMBER(int, pfnR0ShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1032 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1033 /** @} */
1034
1035 /** @name Function pointers for Guest paging.
1036 * @{
1037 */
1038 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1039 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
1040 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1041 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1042 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1043 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1044 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));
1045 DECLR3CALLBACKMEMBER(int, pfnR3GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1046 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmapCR3,(PVM pVM));
1047 HCPTRTYPE(PFNPGMR3PHYSHANDLER) pfnHCGstWriteHandlerCR3;
1048 HCPTRTYPE(const char *) pszHCGstWriteHandlerCR3;
1049
1050 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1051 DECLGCCALLBACKMEMBER(int, pfnGCGstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1052 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1053 DECLGCCALLBACKMEMBER(int, pfnGCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1054 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmonitorCR3,(PVM pVM));
1055 DECLGCCALLBACKMEMBER(int, pfnGCGstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1056 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmapCR3,(PVM pVM));
1057 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnGCGstWriteHandlerCR3;
1058
1059 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1060 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1061 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1062 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1063 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));
1064 DECLR0CALLBACKMEMBER(int, pfnR0GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1065 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmapCR3,(PVM pVM));
1066 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnR0GstWriteHandlerCR3;
1067 /** @} */
1068
1069 /** @name Function pointers for Both Shadow and Guest paging.
1070 * @{
1071 */
1072 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1073 DECLR3CALLBACKMEMBER(int, pfnR3BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1074 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1075 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1076 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1077 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1078 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1079#ifdef VBOX_STRICT
1080 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1081#endif
1082
1083 DECLGCCALLBACKMEMBER(int, pfnGCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1084 DECLGCCALLBACKMEMBER(int, pfnGCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1085 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1086 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1087 DECLGCCALLBACKMEMBER(int, pfnGCBthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1088 DECLGCCALLBACKMEMBER(int, pfnGCBthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1089#ifdef VBOX_STRICT
1090 DECLGCCALLBACKMEMBER(unsigned, pfnGCBthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1091#endif
1092
1093 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1094 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1095 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1096 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1097 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1098 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1099#ifdef VBOX_STRICT
1100 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1101#endif
1102 /** @} */
1103} PGMMODEDATA, *PPGMMODEDATA;
1104
1105
1106
1107/**
1108 * Converts a PGM pointer into a VM pointer.
1109 * @returns Pointer to the VM structure the PGM is part of.
1110 * @param pPGM Pointer to PGM instance data.
1111 */
1112#define PGM2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
1113
1114/**
1115 * PGM Data (part of VM)
1116 */
1117typedef struct PGM
1118{
1119 /** Offset to the VM structure. */
1120 RTINT offVM;
1121
1122 /*
1123 * This will be redefined at least two more times before we're done, I'm sure.
1124 * The current code is only to get on with the coding.
1125 * - 2004-06-10: initial version, bird.
1126 * - 2004-07-02: 1st time, bird.
1127 * - 2004-10-18: 2nd time, bird.
1128 * - 2005-07-xx: 3rd time, bird.
1129 */
1130
1131 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
1132 GCPTRTYPE(PX86PTE) paDynPageMap32BitPTEsGC;
1133 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
1134 GCPTRTYPE(PX86PTEPAE) paDynPageMapPaePTEsGC;
1135
1136 /** The host paging mode. (This is what SUPLib reports.) */
1137 SUPPAGINGMODE enmHostMode;
1138 /** The shadow paging mode. */
1139 PGMMODE enmShadowMode;
1140 /** The guest paging mode. */
1141 PGMMODE enmGuestMode;
1142
1143 /** The current physical address representing in the guest CR3 register. */
1144 RTGCPHYS GCPhysCR3;
1145 /** Pointer to the 5 page CR3 content mapping.
1146 * The first page is always the CR3 (in some form) while the 4 other pages
1147 * are used of the PDs in PAE mode. */
1148 RTGCPTR GCPtrCR3Mapping;
1149 /** The physical address of the currently monitored guest CR3 page.
1150 * When this value is NIL_RTGCPHYS no page is being monitored. */
1151 RTGCPHYS GCPhysGstCR3Monitored;
1152#if HC_ARCH_BITS == 64 || GC_ARCH_BITS == 64
1153 RTGCPHYS GCPhysPadding0; /**< alignment padding. */
1154#endif
1155
1156 /** @name 32-bit Guest Paging.
1157 * @{ */
1158 /** The guest's page directory, HC pointer. */
1159 HCPTRTYPE(PVBOXPD) pGuestPDHC;
1160 /** The guest's page directory, static GC mapping. */
1161 GCPTRTYPE(PVBOXPD) pGuestPDGC;
1162 /** @} */
1163
1164 /** @name PAE Guest Paging.
1165 * @{ */
1166 /** The guest's page directory pointer table, static GC mapping. */
1167 GCPTRTYPE(PX86PDPTR) pGstPaePDPTRGC;
1168 /** The guest's page directory pointer table, HC pointer. */
1169 HCPTRTYPE(PX86PDPTR) pGstPaePDPTRHC;
1170 /** The guest's page directories, HC pointers.
1171 * These are individual pointers and doesn't have to be adjecent.
1172 * These doesn't have to be update to date - use pgmGstGetPaePD() to access them. */
1173 HCPTRTYPE(PX86PDPAE) apGstPaePDsHC[4];
1174 /** The guest's page directories, static GC mapping.
1175 * Unlike the HC array the first entry can be accessed as a 2048 entry PD.
1176 * These doesn't have to be update to date - use pgmGstGetPaePD() to access them. */
1177 GCPTRTYPE(PX86PDPAE) apGstPaePDsGC[4];
1178 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
1179 RTGCPHYS aGCPhysGstPaePDs[4];
1180 /** The physical addresses of the monitored guest page directories (PAE). */
1181 RTGCPHYS aGCPhysGstPaePDsMonitored[4];
1182 /** @} */
1183
1184
1185 /** @name 32-bit Shadow Paging
1186 * @{ */
1187 /** The 32-Bit PD - HC Ptr. */
1188 HCPTRTYPE(PX86PD) pHC32BitPD;
1189 /** The 32-Bit PD - GC Ptr. */
1190 GCPTRTYPE(PX86PD) pGC32BitPD;
1191#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1192 uint32_t u32Padding1; /**< alignment padding. */
1193#endif
1194 /** The Physical Address (HC) of the 32-Bit PD. */
1195 RTHCPHYS HCPhys32BitPD;
1196 /** @} */
1197
1198 /** @name PAE Shadow Paging
1199 * @{ */
1200 /** The four PDs for the low 4GB - HC Ptr.
1201 * Even though these are 4 pointers, what they point at is a single table.
1202 * Thus, it's possible to walk the 2048 entries starting where apHCPaePDs[0] points. */
1203 HCPTRTYPE(PX86PDPAE) apHCPaePDs[4];
1204 /** The four PDs for the low 4GB - GC Ptr.
1205 * Same kind of mapping as apHCPaePDs. */
1206 GCPTRTYPE(PX86PDPAE) apGCPaePDs[4];
1207 /** The Physical Address (HC) of the four PDs for the low 4GB.
1208 * These are *NOT* 4 contiguous pages. */
1209 RTHCPHYS aHCPhysPaePDs[4];
1210 /** The PAE PDPTR - HC Ptr. */
1211 HCPTRTYPE(PX86PDPTR) pHCPaePDPTR;
1212 /** The Physical Address (HC) of the PAE PDPTR. */
1213 RTHCPHYS HCPhysPaePDPTR;
1214 /** The PAE PDPTR - GC Ptr. */
1215 GCPTRTYPE(PX86PDPTR) pGCPaePDPTR;
1216 /** @} */
1217
1218 /** @name AMD64 Shadow Paging
1219 * Extends PAE Paging.
1220 * @{ */
1221 /** The Page Map Level 4 table - HC Ptr. */
1222 GCPTRTYPE(PX86PML4) pGCPaePML4;
1223 /** The Page Map Level 4 table - GC Ptr. */
1224 HCPTRTYPE(PX86PML4) pHCPaePML4;
1225 /** The Physical Address (HC) of the Page Map Level 4 table. */
1226 RTHCPHYS HCPhysPaePML4;
1227 /** @}*/
1228
1229 /** @name Function pointers for Shadow paging.
1230 * @{
1231 */
1232 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1233 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
1234 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1235 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1236 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1237 DECLR3CALLBACKMEMBER(int, pfnR3ShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1238 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1239
1240 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1241 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1242 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1243 DECLGCCALLBACKMEMBER(int, pfnGCShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1244 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1245
1246 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1247 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1248 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1249 DECLR0CALLBACKMEMBER(int, pfnR0ShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1250 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1251
1252 /** @} */
1253
1254 /** @name Function pointers for Guest paging.
1255 * @{
1256 */
1257 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1258 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
1259 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1260 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1261 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1262 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1263 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));
1264 DECLR3CALLBACKMEMBER(int, pfnR3GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1265 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmapCR3,(PVM pVM));
1266 HCPTRTYPE(PFNPGMR3PHYSHANDLER) pfnHCGstWriteHandlerCR3;
1267 HCPTRTYPE(const char *) pszHCGstWriteHandlerCR3;
1268
1269 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1270 DECLGCCALLBACKMEMBER(int, pfnGCGstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1271 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1272 DECLGCCALLBACKMEMBER(int, pfnGCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1273 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmonitorCR3,(PVM pVM));
1274 DECLGCCALLBACKMEMBER(int, pfnGCGstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1275 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmapCR3,(PVM pVM));
1276 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnGCGstWriteHandlerCR3;
1277
1278 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1279 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1280 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1281 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1282 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));
1283 DECLR0CALLBACKMEMBER(int, pfnR0GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1284 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmapCR3,(PVM pVM));
1285 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnR0GstWriteHandlerCR3;
1286 /** @} */
1287
1288 /** @name Function pointers for Both Shadow and Guest paging.
1289 * @{
1290 */
1291 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1292 DECLR3CALLBACKMEMBER(int, pfnR3BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1293 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1294 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1295 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1296 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1297 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1298 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1299
1300 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1301 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1302 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1303 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1304 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1305 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1306 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1307
1308 DECLGCCALLBACKMEMBER(int, pfnGCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1309 DECLGCCALLBACKMEMBER(int, pfnGCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1310 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1311 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1312 DECLGCCALLBACKMEMBER(int, pfnGCBthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1313 DECLGCCALLBACKMEMBER(int, pfnGCBthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1314 DECLGCCALLBACKMEMBER(unsigned, pfnGCBthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1315 /** @} */
1316
1317 /** Pointer to SHW+GST mode data (function pointers).
1318 * The index into this table is made up from */
1319 R3PTRTYPE(PPGMMODEDATA) paModeData;
1320
1321
1322 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for HC.
1323 * This is sorted by physical address and contains no overlaps.
1324 * The memory locks and other conversions are managed by MM at the moment.
1325 */
1326 HCPTRTYPE(PPGMRAMRANGE) pRamRangesHC;
1327 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for GC.
1328 * This is sorted by physical address and contains no overlaps.
1329 * The memory locks and other conversions are managed by MM at the moment.
1330 */
1331 GCPTRTYPE(PPGMRAMRANGE) pRamRangesGC;
1332 /** The configured RAM size. */
1333 RTUINT cbRamSize;
1334
1335 /** PGM offset based trees - HC Ptr. */
1336 HCPTRTYPE(PPGMTREES) pTreesHC;
1337 /** PGM offset based trees - GC Ptr. */
1338 GCPTRTYPE(PPGMTREES) pTreesGC;
1339
1340 /** Linked list of GC mappings - for GC.
1341 * The list is sorted ascending on address.
1342 */
1343 GCPTRTYPE(PPGMMAPPING) pMappingsGC;
1344 /** Linked list of GC mappings - for HC.
1345 * The list is sorted ascending on address.
1346 */
1347 HCPTRTYPE(PPGMMAPPING) pMappingsHC;
1348
1349 /** If set no conflict checks are required. (boolean) */
1350 bool fMappingsFixed;
1351 /** If set, then no mappings are put into the shadow page table. (boolean) */
1352 bool fDisableMappings;
1353 /** Size of fixed mapping */
1354 uint32_t cbMappingFixed;
1355 /** Base address (GC) of fixed mapping */
1356 RTGCPTR GCPtrMappingFixed;
1357#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1358 uint32_t u32Padding0; /**< alignment padding. */
1359#endif
1360
1361
1362 /** @name Intermediate Context
1363 * @{ */
1364 /** Pointer to the intermediate page directory - Normal. */
1365 HCPTRTYPE(PX86PD) pInterPD;
1366 /** Pointer to the intermedate page tables - Normal.
1367 * There are two page tables, one for the identity mapping and one for
1368 * the host context mapping (of the core code). */
1369 HCPTRTYPE(PX86PT) apInterPTs[2];
1370 /** Pointer to the intermedate page tables - PAE. */
1371 HCPTRTYPE(PX86PTPAE) apInterPaePTs[2];
1372 /** Pointer to the intermedate page directory - PAE. */
1373 HCPTRTYPE(PX86PDPAE) apInterPaePDs[4];
1374 /** Pointer to the intermedate page directory - PAE. */
1375 HCPTRTYPE(PX86PDPTR) pInterPaePDPTR;
1376 /** Pointer to the intermedate page-map level 4 - AMD64. */
1377 HCPTRTYPE(PX86PML4) pInterPaePML4;
1378 /** Pointer to the intermedate page directory - AMD64. */
1379 HCPTRTYPE(PX86PDPTR) pInterPaePDPTR64;
1380 /** The Physical Address (HC) of the intermediate Page Directory - Normal. */
1381 RTHCPHYS HCPhysInterPD;
1382 /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */
1383 RTHCPHYS HCPhysInterPaePDPTR;
1384 /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */
1385 RTHCPHYS HCPhysInterPaePML4;
1386 /** @} */
1387
1388 /** Base address of the dynamic page mapping area.
1389 * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
1390 */
1391 GCPTRTYPE(uint8_t *) pbDynPageMapBaseGC;
1392 /** The index of the last entry used in the dynamic page mapping area. */
1393 RTUINT iDynPageMapLast;
1394 /** Cache containing the last entries in the dynamic page mapping area.
1395 * The cache size is covering half of the mapping area. */
1396 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
1397
1398 /** A20 gate mask.
1399 * Our current approach to A20 emulation is to let REM do it and don't bother
1400 * anywhere else. The interesting Guests will be operating with it enabled anyway.
1401 * But whould need arrise, we'll subject physical addresses to this mask. */
1402 RTGCPHYS GCPhysA20Mask;
1403 /** A20 gate state - boolean! */
1404 RTUINT fA20Enabled;
1405
1406 /** What needs syncing (PGM_SYNC_*).
1407 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
1408 * PGMFlushTLB, and PGMR3Load. */
1409 RTUINT fSyncFlags;
1410
1411#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1412 RTUINT uPadding3; /**< alignment padding. */
1413#endif
1414 /** PGM critical section.
1415 * This protects the physical & virtual access handlers, ram ranges,
1416 * and the page flag updating (some of it anyway).
1417 */
1418 PDMCRITSECT CritSect;
1419
1420 /** Shadow Page Pool - HC Ptr. */
1421 HCPTRTYPE(PPGMPOOL) pPoolHC;
1422 /** Shadow Page Pool - GC Ptr. */
1423 GCPTRTYPE(PPGMPOOL) pPoolGC;
1424
1425 /** Flush the cache on the next access. */
1426 bool fPhysCacheFlushPending;
1427/** @todo r=bird: Fix member names!*/
1428 /** PGMPhysRead cache */
1429 PGMPHYSCACHE pgmphysreadcache;
1430 /** PGMPhysWrite cache */
1431 PGMPHYSCACHE pgmphyswritecache;
1432
1433 /** @name Release Statistics
1434 * @{ */
1435 /** The number of times the guest has switched mode since last reset or statistics reset. */
1436 STAMCOUNTER cGuestModeChanges;
1437 /** @} */
1438
1439#ifdef VBOX_WITH_STATISTICS
1440 /** GC: Which statistic this \#PF should be attributed to. */
1441 GCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionGC;
1442 RTGCPTR padding0;
1443 /** HC: Which statistic this \#PF should be attributed to. */
1444 HCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionHC;
1445 RTHCPTR padding1;
1446 STAMPROFILE StatGCTrap0e; /**< GC: PGMGCTrap0eHandler() profiling. */
1447 STAMPROFILE StatTrap0eCSAM; /**< Profiling of the Trap0eHandler body when the cause is CSAM. */
1448 STAMPROFILE StatTrap0eDirtyAndAccessedBits; /**< Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
1449 STAMPROFILE StatTrap0eGuestTrap; /**< Profiling of the Trap0eHandler body when the cause is a guest trap. */
1450 STAMPROFILE StatTrap0eHndPhys; /**< Profiling of the Trap0eHandler body when the cause is a physical handler. */
1451 STAMPROFILE StatTrap0eHndVirt; /**< Profiling of the Trap0eHandler body when the cause is a virtual handler. */
1452 STAMPROFILE StatTrap0eHndUnhandled; /**< Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
1453 STAMPROFILE StatTrap0eMisc; /**< Profiling of the Trap0eHandler body when the cause is not known. */
1454 STAMPROFILE StatTrap0eOutOfSync; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
1455 STAMPROFILE StatTrap0eOutOfSyncHndPhys; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
1456 STAMPROFILE StatTrap0eOutOfSyncHndVirt; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
1457 STAMPROFILE StatTrap0eOutOfSyncObsHnd; /**< Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
1458 STAMPROFILE StatTrap0eSyncPT; /**< Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
1459
1460 STAMCOUNTER StatTrap0eMapHandler; /**< Number of traps due to access handlers in mappings. */
1461 STAMCOUNTER StatGCTrap0eConflicts; /**< GC: The number of times \#PF was caused by an undetected conflict. */
1462
1463 STAMCOUNTER StatGCTrap0eUSNotPresentRead;
1464 STAMCOUNTER StatGCTrap0eUSNotPresentWrite;
1465 STAMCOUNTER StatGCTrap0eUSWrite;
1466 STAMCOUNTER StatGCTrap0eUSReserved;
1467 STAMCOUNTER StatGCTrap0eUSRead;
1468
1469 STAMCOUNTER StatGCTrap0eSVNotPresentRead;
1470 STAMCOUNTER StatGCTrap0eSVNotPresentWrite;
1471 STAMCOUNTER StatGCTrap0eSVWrite;
1472 STAMCOUNTER StatGCTrap0eSVReserved;
1473
1474 STAMCOUNTER StatGCTrap0eUnhandled;
1475 STAMCOUNTER StatGCTrap0eMap;
1476
1477 /** GC: PGMSyncPT() profiling. */
1478 STAMPROFILE StatGCSyncPT;
1479 /** GC: The number of times PGMSyncPT() needed to allocate page tables. */
1480 STAMCOUNTER StatGCSyncPTAlloc;
1481 /** GC: The number of times PGMSyncPT() detected conflicts. */
1482 STAMCOUNTER StatGCSyncPTConflict;
1483 /** GC: The number of times PGMSyncPT() failed. */
1484 STAMCOUNTER StatGCSyncPTFailed;
1485 /** GC: PGMGCInvalidatePage() profiling. */
1486 STAMPROFILE StatGCInvalidatePage;
1487 /** GC: The number of times PGMGCInvalidatePage() was called for a 4KB page. */
1488 STAMCOUNTER StatGCInvalidatePage4KBPages;
1489 /** GC: The number of times PGMGCInvalidatePage() was called for a 4MB page. */
1490 STAMCOUNTER StatGCInvalidatePage4MBPages;
1491 /** GC: The number of times PGMGCInvalidatePage() skipped a 4MB page. */
1492 STAMCOUNTER StatGCInvalidatePage4MBPagesSkip;
1493 /** GC: The number of times PGMGCInvalidatePage() was called for a not accessed page directory. */
1494 STAMCOUNTER StatGCInvalidatePagePDNAs;
1495 /** GC: The number of times PGMGCInvalidatePage() was called for a not present page directory. */
1496 STAMCOUNTER StatGCInvalidatePagePDNPs;
1497 /** GC: The number of times PGMGCInvalidatePage() was called for a page directory containing mappings (no conflict). */
1498 STAMCOUNTER StatGCInvalidatePagePDMappings;
1499 /** GC: The number of times PGMGCInvalidatePage() was called for an out of sync page directory. */
1500 STAMCOUNTER StatGCInvalidatePagePDOutOfSync;
1501 /** HC: The number of times PGMGCInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
1502 STAMCOUNTER StatGCInvalidatePageSkipped;
1503 /** GC: The number of times user page is out of sync was detected in GC. */
1504 STAMCOUNTER StatGCPageOutOfSyncUser;
1505 /** GC: The number of times supervisor page is out of sync was detected in GC. */
1506 STAMCOUNTER StatGCPageOutOfSyncSupervisor;
1507 /** GC: The number of dynamic page mapping cache hits */
1508 STAMCOUNTER StatDynMapCacheMisses;
1509 /** GC: The number of dynamic page mapping cache misses */
1510 STAMCOUNTER StatDynMapCacheHits;
1511 /** GC: The number of times pgmGCGuestPDWriteHandler() was successfully called. */
1512 STAMCOUNTER StatGCGuestCR3WriteHandled;
1513 /** GC: The number of times pgmGCGuestPDWriteHandler() was called and we had to fall back to the recompiler. */
1514 STAMCOUNTER StatGCGuestCR3WriteUnhandled;
1515 /** GC: The number of times pgmGCGuestPDWriteHandler() was called and a conflict was detected. */
1516 STAMCOUNTER StatGCGuestCR3WriteConflict;
1517 /** GC: Number of out-of-sync handled pages. */
1518 STAMCOUNTER StatHandlersOutOfSync;
1519 /** GC: Number of traps due to physical access handlers. */
1520 STAMCOUNTER StatHandlersPhysical;
1521 /** GC: Number of traps due to virtual access handlers. */
1522 STAMCOUNTER StatHandlersVirtual;
1523 /** GC: Number of traps due to virtual access handlers found by physical address. */
1524 STAMCOUNTER StatHandlersVirtualByPhys;
1525 /** GC: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
1526 STAMCOUNTER StatHandlersVirtualUnmarked;
1527 /** GC: Number of traps due to access outside range of monitored page(s). */
1528 STAMCOUNTER StatHandlersUnhandled;
1529
1530 /** GC: The number of times pgmGCGuestROMWriteHandler() was successfully called. */
1531 STAMCOUNTER StatGCGuestROMWriteHandled;
1532 /** GC: The number of times pgmGCGuestROMWriteHandler() was called and we had to fall back to the recompiler */
1533 STAMCOUNTER StatGCGuestROMWriteUnhandled;
1534
1535 /** HC: PGMR3InvalidatePage() profiling. */
1536 STAMPROFILE StatHCInvalidatePage;
1537 /** HC: The number of times PGMR3InvalidatePage() was called for a 4KB page. */
1538 STAMCOUNTER StatHCInvalidatePage4KBPages;
1539 /** HC: The number of times PGMR3InvalidatePage() was called for a 4MB page. */
1540 STAMCOUNTER StatHCInvalidatePage4MBPages;
1541 /** HC: The number of times PGMR3InvalidatePage() skipped a 4MB page. */
1542 STAMCOUNTER StatHCInvalidatePage4MBPagesSkip;
1543 /** HC: The number of times PGMR3InvalidatePage() was called for a not accessed page directory. */
1544 STAMCOUNTER StatHCInvalidatePagePDNAs;
1545 /** HC: The number of times PGMR3InvalidatePage() was called for a not present page directory. */
1546 STAMCOUNTER StatHCInvalidatePagePDNPs;
1547 /** HC: The number of times PGMR3InvalidatePage() was called for a page directory containing mappings (no conflict). */
1548 STAMCOUNTER StatHCInvalidatePagePDMappings;
1549 /** HC: The number of times PGMGCInvalidatePage() was called for an out of sync page directory. */
1550 STAMCOUNTER StatHCInvalidatePagePDOutOfSync;
1551 /** HC: The number of times PGMR3InvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
1552 STAMCOUNTER StatHCInvalidatePageSkipped;
1553 /** HC: PGMR3SyncPT() profiling. */
1554 STAMPROFILE StatHCSyncPT;
1555 /** HC: pgmr3SyncPTResolveConflict() profiling (includes the entire relocation). */
1556 STAMPROFILE StatHCResolveConflict;
1557 /** HC: Number of times PGMR3CheckMappingConflicts() detected a conflict. */
1558 STAMCOUNTER StatHCDetectedConflicts;
1559 /** HC: The total number of times pgmHCGuestPDWriteHandler() was called. */
1560 STAMCOUNTER StatHCGuestPDWrite;
1561 /** HC: The number of times pgmHCGuestPDWriteHandler() detected a conflict */
1562 STAMCOUNTER StatHCGuestPDWriteConflict;
1563
1564 /** HC: The number of pages marked not present for accessed bit emulation. */
1565 STAMCOUNTER StatHCAccessedPage;
1566 /** HC: The number of pages marked read-only for dirty bit tracking. */
1567 STAMCOUNTER StatHCDirtyPage;
1568 /** HC: The number of pages marked read-only for dirty bit tracking. */
1569 STAMCOUNTER StatHCDirtyPageBig;
1570 /** HC: The number of traps generated for dirty bit tracking. */
1571 STAMCOUNTER StatHCDirtyPageTrap;
1572 /** HC: The number of pages already dirty or readonly. */
1573 STAMCOUNTER StatHCDirtyPageSkipped;
1574
1575 /** GC: The number of pages marked not present for accessed bit emulation. */
1576 STAMCOUNTER StatGCAccessedPage;
1577 /** GC: The number of pages marked read-only for dirty bit tracking. */
1578 STAMCOUNTER StatGCDirtyPage;
1579 /** GC: The number of pages marked read-only for dirty bit tracking. */
1580 STAMCOUNTER StatGCDirtyPageBig;
1581 /** GC: The number of traps generated for dirty bit tracking. */
1582 STAMCOUNTER StatGCDirtyPageTrap;
1583 /** GC: The number of pages already dirty or readonly. */
1584 STAMCOUNTER StatGCDirtyPageSkipped;
1585 /** GC: The number of pages marked dirty because of write accesses. */
1586 STAMCOUNTER StatGCDirtiedPage;
1587 /** GC: The number of pages already marked dirty because of write accesses. */
1588 STAMCOUNTER StatGCPageAlreadyDirty;
1589 /** GC: The number of real pages faults during dirty bit tracking. */
1590 STAMCOUNTER StatGCDirtyTrackRealPF;
1591
1592 /** GC: Profiling of the PGMTrackDirtyBit() body */
1593 STAMPROFILE StatGCDirtyBitTracking;
1594 /** HC: Profiling of the PGMTrackDirtyBit() body */
1595 STAMPROFILE StatHCDirtyBitTracking;
1596
1597 /** GC: Profiling of the PGMGstModifyPage() body */
1598 STAMPROFILE StatGCGstModifyPage;
1599 /** HC: Profiling of the PGMGstModifyPage() body */
1600 STAMPROFILE StatHCGstModifyPage;
1601
1602 /** GC: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
1603 STAMCOUNTER StatGCSyncPagePDNAs;
1604 /** GC: The number of time we've encountered an out-of-sync PD in SyncPage. */
1605 STAMCOUNTER StatGCSyncPagePDOutOfSync;
1606 /** HC: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
1607 STAMCOUNTER StatHCSyncPagePDNAs;
1608 /** HC: The number of time we've encountered an out-of-sync PD in SyncPage. */
1609 STAMCOUNTER StatHCSyncPagePDOutOfSync;
1610
1611 STAMCOUNTER StatSynPT4kGC;
1612 STAMCOUNTER StatSynPT4kHC;
1613 STAMCOUNTER StatSynPT4MGC;
1614 STAMCOUNTER StatSynPT4MHC;
1615
1616 /** Profiling of the PGMFlushTLB() body. */
1617 STAMPROFILE StatFlushTLB;
1618 /** The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
1619 STAMCOUNTER StatFlushTLBNewCR3;
1620 /** The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
1621 STAMCOUNTER StatFlushTLBNewCR3Global;
1622 /** The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
1623 STAMCOUNTER StatFlushTLBSameCR3;
1624 /** The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
1625 STAMCOUNTER StatFlushTLBSameCR3Global;
1626
1627 STAMPROFILE StatGCSyncCR3; /**< GC: PGMSyncCR3() profiling. */
1628 STAMPROFILE StatGCSyncCR3Handlers; /**< GC: Profiling of the PGMSyncCR3() update handler section. */
1629 STAMPROFILE StatGCSyncCR3HandlerVirtualReset; /**< GC: Profiling of the virtual handler resets. */
1630 STAMPROFILE StatGCSyncCR3HandlerVirtualUpdate; /**< GC: Profiling of the virtual handler updates. */
1631 STAMCOUNTER StatGCSyncCR3Global; /**< GC: The number of global CR3 syncs. */
1632 STAMCOUNTER StatGCSyncCR3NotGlobal; /**< GC: The number of non-global CR3 syncs. */
1633 STAMCOUNTER StatGCSyncCR3DstFreed; /**< GC: The number of times we've had to free a shadow entry. */
1634 STAMCOUNTER StatGCSyncCR3DstFreedSrcNP; /**< GC: The number of times we've had to free a shadow entry for which the source entry was not present. */
1635 STAMCOUNTER StatGCSyncCR3DstNotPresent; /**< GC: The number of times we've encountered a not present shadow entry for a present guest entry. */
1636 STAMCOUNTER StatGCSyncCR3DstSkippedGlobalPD; /**< GC: The number of times a global page directory wasn't flushed. */
1637 STAMCOUNTER StatGCSyncCR3DstSkippedGlobalPT; /**< GC: The number of times a page table with only global entries wasn't flushed. */
1638 STAMCOUNTER StatGCSyncCR3DstCacheHit; /**< GC: The number of times we got some kind of cache hit on a page table. */
1639
1640 STAMPROFILE StatHCSyncCR3; /**< HC: PGMSyncCR3() profiling. */
1641 STAMPROFILE StatHCSyncCR3Handlers; /**< HC: Profiling of the PGMSyncCR3() update handler section. */
1642 STAMPROFILE StatHCSyncCR3HandlerVirtualReset; /**< HC: Profiling of the virtual handler resets. */
1643 STAMPROFILE StatHCSyncCR3HandlerVirtualUpdate; /**< HC: Profiling of the virtual handler updates. */
1644 STAMCOUNTER StatHCSyncCR3Global; /**< HC: The number of global CR3 syncs. */
1645 STAMCOUNTER StatHCSyncCR3NotGlobal; /**< HC: The number of non-global CR3 syncs. */
1646 STAMCOUNTER StatHCSyncCR3DstFreed; /**< HC: The number of times we've had to free a shadow entry. */
1647 STAMCOUNTER StatHCSyncCR3DstFreedSrcNP; /**< HC: The number of times we've had to free a shadow entry for which the source entry was not present. */
1648 STAMCOUNTER StatHCSyncCR3DstNotPresent; /**< HC: The number of times we've encountered a not present shadow entry for a present guest entry. */
1649 STAMCOUNTER StatHCSyncCR3DstSkippedGlobalPD; /**< HC: The number of times a global page directory wasn't flushed. */
1650 STAMCOUNTER StatHCSyncCR3DstSkippedGlobalPT; /**< HC: The number of times a page table with only global entries wasn't flushed. */
1651 STAMCOUNTER StatHCSyncCR3DstCacheHit; /**< HC: The number of times we got some kind of cache hit on a page table. */
1652
1653 /** GC: Profiling of pgmHandlerVirtualFindByPhysAddr. */
1654 STAMPROFILE StatVirtHandleSearchByPhysGC;
1655 /** HC: Profiling of pgmHandlerVirtualFindByPhysAddr. */
1656 STAMPROFILE StatVirtHandleSearchByPhysHC;
1657 /** HC: The number of times PGMR3HandlerPhysicalReset is called. */
1658 STAMCOUNTER StatHandlePhysicalReset;
1659
1660 STAMPROFILE StatCheckPageFault;
1661 STAMPROFILE StatLazySyncPT;
1662 STAMPROFILE StatMapping;
1663 STAMPROFILE StatOutOfSync;
1664 STAMPROFILE StatHandlers;
1665 STAMPROFILE StatEIPHandlers;
1666 STAMPROFILE StatHCPrefetch;
1667
1668# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1669 /** The number of first time shadowings. */
1670 STAMCOUNTER StatTrackVirgin;
1671 /** The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
1672 STAMCOUNTER StatTrackAliased;
1673 /** The number of times we're tracking using cRef2. */
1674 STAMCOUNTER StatTrackAliasedMany;
1675 /** The number of times we're hitting pages which has overflowed cRef2. */
1676 STAMCOUNTER StatTrackAliasedLots;
1677 /** The number of times the extent list grows to long. */
1678 STAMCOUNTER StatTrackOverflows;
1679 /** Profiling of SyncPageWorkerTrackDeref (expensive). */
1680 STAMPROFILE StatTrackDeref;
1681# endif
1682
1683 /** Allocated mbs of guest ram */
1684 STAMCOUNTER StatDynRamTotal;
1685 /** Nr of pgmr3PhysGrowRange calls. */
1686 STAMCOUNTER StatDynRamGrow;
1687
1688 STAMCOUNTER StatGCTrap0ePD[X86_PG_ENTRIES];
1689 STAMCOUNTER StatGCSyncPtPD[X86_PG_ENTRIES];
1690 STAMCOUNTER StatGCSyncPagePD[X86_PG_ENTRIES];
1691#endif
1692} PGM, *PPGM;
1693
1694
1695/** @name PGM::fSyncFlags Flags
1696 * @{
1697 */
1698/** Updates the MM_RAM_FLAGS_VIRTUAL_HANDLER page bit. */
1699#define PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL BIT(0)
1700/** Always sync CR3. */
1701#define PGM_SYNC_ALWAYS BIT(1)
1702/** Check monitoring on next CR3 (re)load and invalidate page. */
1703#define PGM_SYNC_MONITOR_CR3 BIT(2)
1704/** Clear the page pool (a light weight flush). */
1705#define PGM_SYNC_CLEAR_PGM_POOL BIT(8)
1706/** @} */
1707
1708
1709__BEGIN_DECLS
1710
1711PGMGCDECL(int) pgmGCGuestPDWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
1712PGMDECL(int) pgmGuestROMWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
1713PGMGCDECL(int) pgmCachePTWriteGC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
1714int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PVBOXPD pPDSrc, int iPDOld);
1715PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
1716void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, int iPDOld, int iPDNew);
1717int pgmR3ChangeMode(PVM pVM, PGMMODE enmGuestMode);
1718int pgmLock(PVM pVM);
1719void pgmUnlock(PVM pVM);
1720
1721void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
1722int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
1723DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
1724#ifdef VBOX_STRICT
1725void pgmHandlerVirtualDumpPhysPages(PVM pVM);
1726#else
1727# define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
1728#endif
1729DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
1730
1731
1732#ifdef IN_RING3
1733int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys);
1734
1735int pgmR3PoolInit(PVM pVM);
1736void pgmR3PoolRelocate(PVM pVM);
1737void pgmR3PoolReset(PVM pVM);
1738
1739#endif
1740#ifdef IN_GC
1741void *pgmGCPoolMapPage(PVM pVM, PPGMPOOLPAGE pPage);
1742#endif
1743int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint16_t iUserTable, PPPGMPOOLPAGE ppPage);
1744PPGMPOOLPAGE pgmPoolGetPageByHCPhys(PVM pVM, RTHCPHYS HCPhys);
1745void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint16_t iUserTable);
1746void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint16_t iUserTable);
1747int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
1748void pgmPoolFlushAll(PVM pVM);
1749void pgmPoolClearAll(PVM pVM);
1750void pgmPoolTrackFlushGCPhysPT(PVM pVM, PRTHCPHYS pHCPhys, uint16_t iShw, uint16_t cRefs);
1751void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PRTHCPHYS pHCPhys, uint16_t iPhysExt);
1752int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PRTHCPHYS pHCPhys);
1753PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt);
1754void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt);
1755void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt);
1756uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT);
1757void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PRTHCPHYS pHCPhys);
1758#ifdef PGMPOOL_WITH_MONITORING
1759# ifdef IN_RING3
1760void pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, RTHCPTR pvAddress, PDISCPUSTATE pCpu);
1761# else
1762void pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, RTGCPTR pvAddress, PDISCPUSTATE pCpu);
1763# endif
1764int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
1765void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
1766void pgmPoolMonitorModifiedClearAll(PVM pVM);
1767int pgmPoolMonitorMonitorCR3(PPGMPOOL pPool, uint16_t idxRoot, RTGCPHYS GCPhysCR3);
1768int pgmPoolMonitorUnmonitorCR3(PPGMPOOL pPool, uint16_t idxRoot);
1769#endif
1770
1771__END_DECLS
1772
1773
1774/**
1775 * Convert GC Phys to HC Phys.
1776 *
1777 * @returns VBox status.
1778 * @param pPGM PGM handle.
1779 * @param GCPhys The GC physical address.
1780 * @param pHCPhys Where to store the corresponding HC physical address.
1781 */
1782DECLINLINE(int) PGMRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
1783{
1784 /*
1785 * Walk range list.
1786 */
1787 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
1788 while (pRam)
1789 {
1790 RTGCPHYS off = GCPhys - pRam->GCPhys;
1791 if (off < pRam->cb)
1792 {
1793 unsigned iPage = off >> PAGE_SHIFT;
1794 /* Physical chunk in dynamically allocated range not present? */
1795 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
1796 {
1797#ifdef IN_RING3
1798 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
1799#else
1800 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
1801#endif
1802 if (rc != VINF_SUCCESS)
1803 return rc;
1804 }
1805 *pHCPhys = (pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK) | (off & PAGE_OFFSET_MASK);
1806 return VINF_SUCCESS;
1807 }
1808
1809 pRam = CTXSUFF(pRam->pNext);
1810 }
1811 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1812}
1813
1814
1815/**
1816 * Convert GC Phys to HC Virt.
1817 *
1818 * @returns VBox status.
1819 * @param pPGM PGM handle.
1820 * @param GCPhys The GC physical address.
1821 * @param pHCPtr Where to store the corresponding HC virtual address.
1822 */
1823DECLINLINE(int) PGMRamGCPhys2HCPtr(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr)
1824{
1825 /*
1826 * Walk range list.
1827 */
1828 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
1829 while (pRam)
1830 {
1831 RTGCPHYS off = GCPhys - pRam->GCPhys;
1832 if (off < pRam->cb)
1833 {
1834 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1835 {
1836 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
1837 /* Physical chunk in dynamically allocated range not present? */
1838 if (RT_UNLIKELY(!CTXSUFF(pRam->pvHCChunk)[idx]))
1839 {
1840#ifdef IN_RING3
1841 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
1842#else
1843 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
1844#endif
1845 if (rc != VINF_SUCCESS)
1846 return rc;
1847 }
1848 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pvHCChunk)[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1849 return VINF_SUCCESS;
1850 }
1851 if (pRam->pvHC)
1852 {
1853 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
1854 return VINF_SUCCESS;
1855 }
1856 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1857 }
1858
1859 pRam = CTXSUFF(pRam->pNext);
1860 }
1861 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1862}
1863
1864
1865/**
1866 * Convert GC Phys to HC Virt.
1867 *
1868 * @returns VBox status.
1869 * @param PVM VM handle.
1870 * @param pRam Ram range
1871 * @param GCPhys The GC physical address.
1872 * @param pHCPtr Where to store the corresponding HC virtual address.
1873 */
1874DECLINLINE(int) PGMRamGCPhys2HCPtr(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, PRTHCPTR pHCPtr)
1875{
1876 RTGCPHYS off = GCPhys - pRam->GCPhys;
1877 Assert(off < pRam->cb);
1878
1879 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1880 {
1881 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
1882 /* Physical chunk in dynamically allocated range not present? */
1883 if (RT_UNLIKELY(!CTXSUFF(pRam->pvHCChunk)[idx]))
1884 {
1885#ifdef IN_RING3
1886 int rc = pgmr3PhysGrowRange(pVM, GCPhys);
1887#else
1888 int rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
1889#endif
1890 if (rc != VINF_SUCCESS)
1891 return rc;
1892 }
1893 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pvHCChunk)[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1894 return VINF_SUCCESS;
1895 }
1896 if (pRam->pvHC)
1897 {
1898 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
1899 return VINF_SUCCESS;
1900 }
1901 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1902}
1903
1904
1905/**
1906 * Convert GC Phys to HC Virt and HC Phys.
1907 *
1908 * @returns VBox status.
1909 * @param pPGM PGM handle.
1910 * @param GCPhys The GC physical address.
1911 * @param pHCPtr Where to store the corresponding HC virtual address.
1912 * @param pHCPhys Where to store the HC Physical address and its flags.
1913 */
1914DECLINLINE(int) PGMRamGCPhys2HCPtrAndHCPhysWithFlags(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr, PRTHCPHYS pHCPhys)
1915{
1916 /*
1917 * Walk range list.
1918 */
1919 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
1920 while (pRam)
1921 {
1922 RTGCPHYS off = GCPhys - pRam->GCPhys;
1923 if (off < pRam->cb)
1924 {
1925 unsigned iPage = off >> PAGE_SHIFT;
1926 /* Physical chunk in dynamically allocated range not present? */
1927 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
1928 {
1929#ifdef IN_RING3
1930 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
1931#else
1932 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
1933#endif
1934 if (rc != VINF_SUCCESS)
1935 return rc;
1936 }
1937 *pHCPhys = pRam->aHCPhys[iPage];
1938
1939 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1940 {
1941 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
1942 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pvHCChunk)[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1943 return VINF_SUCCESS;
1944 }
1945 if (pRam->pvHC)
1946 {
1947 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
1948 return VINF_SUCCESS;
1949 }
1950 *pHCPtr = 0;
1951 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1952 }
1953
1954 pRam = CTXSUFF(pRam->pNext);
1955 }
1956 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1957}
1958
1959
1960/**
1961 * Convert GC Phys page to a page entry pointer.
1962 *
1963 * This is used by code which may have to update the flags.
1964 *
1965 * @returns VBox status.
1966 * @param pPGM PGM handle.
1967 * @param GCPhys The GC physical address.
1968 * @param ppHCPhys Where to store the pointer to the page entry.
1969 */
1970DECLINLINE(int) PGMRamGCPhys2PagePtr(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS *ppHCPhys)
1971{
1972 /*
1973 * Walk range list.
1974 */
1975 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
1976 while (pRam)
1977 {
1978 RTGCPHYS off = GCPhys - pRam->GCPhys;
1979 if (off < pRam->cb)
1980 {
1981 unsigned iPage = off >> PAGE_SHIFT;
1982 /* Physical chunk in dynamically allocated range not present? */
1983 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
1984 {
1985#ifdef IN_RING3
1986 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
1987#else
1988 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
1989#endif
1990 if (rc != VINF_SUCCESS)
1991 return rc;
1992 }
1993 *ppHCPhys = &pRam->aHCPhys[iPage];
1994 return VINF_SUCCESS;
1995 }
1996
1997 pRam = CTXSUFF(pRam->pNext);
1998 }
1999 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2000}
2001
2002
2003/**
2004 * Convert GC Phys page to HC Phys page and flags.
2005 *
2006 * @returns VBox status.
2007 * @param pPGM PGM handle.
2008 * @param GCPhys The GC physical address.
2009 * @param pHCPhys Where to store the corresponding HC physical address of the page
2010 * and the page flags.
2011 */
2012DECLINLINE(int) PGMRamGCPhys2HCPhysWithFlags(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
2013{
2014 /*
2015 * Walk range list.
2016 */
2017 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
2018 while (pRam)
2019 {
2020 RTGCPHYS off = GCPhys - pRam->GCPhys;
2021 if (off < pRam->cb)
2022 {
2023 unsigned iPage = off >> PAGE_SHIFT;
2024 /* Physical chunk in dynamically allocated range not present? */
2025 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2026 {
2027#ifdef IN_RING3
2028 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2029#else
2030 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2031#endif
2032 if (rc != VINF_SUCCESS)
2033 return rc;
2034 }
2035 *pHCPhys = pRam->aHCPhys[iPage];
2036 return VINF_SUCCESS;
2037 }
2038
2039 pRam = CTXSUFF(pRam->pNext);
2040 }
2041 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2042}
2043
2044
2045/**
2046 * Clears flags associated with a RAM address.
2047 *
2048 * @returns VBox status code.
2049 * @param pPGM PGM handle.
2050 * @param GCPhys Guest context physical address.
2051 * @param fFlags fFlags to clear. (Bits 0-11.)
2052 */
2053DECLINLINE(int) PGMRamFlagsClearByGCPhys(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags)
2054{
2055 /*
2056 * Walk range list.
2057 */
2058 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
2059 while (pRam)
2060 {
2061 RTGCPHYS off = GCPhys - pRam->GCPhys;
2062 if (off < pRam->cb)
2063 {
2064 unsigned iPage = off >> PAGE_SHIFT;
2065 /* Physical chunk in dynamically allocated range not present? */
2066 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2067 {
2068#ifdef IN_RING3
2069 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2070#else
2071 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2072#endif
2073 if (rc != VINF_SUCCESS)
2074 return rc;
2075 }
2076 fFlags &= ~X86_PTE_PAE_PG_MASK;
2077 pRam->aHCPhys[iPage] &= ~(RTHCPHYS)fFlags;
2078 return VINF_SUCCESS;
2079 }
2080
2081 pRam = CTXSUFF(pRam->pNext);
2082 }
2083 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2084}
2085
2086
2087/**
2088 * Clears flags associated with a RAM address.
2089 *
2090 * @returns VBox status code.
2091 * @param pPGM PGM handle.
2092 * @param GCPhys Guest context physical address.
2093 * @param fFlags fFlags to clear. (Bits 0-11.)
2094 * @param ppRamHint Where to read and store the ram list hint.
2095 * The caller initializes this to NULL before the call.
2096 */
2097DECLINLINE(int) PGMRamFlagsClearByGCPhysWithHint(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags, PPGMRAMRANGE *ppRamHint)
2098{
2099 /*
2100 * Check the hint.
2101 */
2102 PPGMRAMRANGE pRam = *ppRamHint;
2103 if (pRam)
2104 {
2105 RTGCPHYS off = GCPhys - pRam->GCPhys;
2106 if (off < pRam->cb)
2107 {
2108 unsigned iPage = off >> PAGE_SHIFT;
2109 /* Physical chunk in dynamically allocated range not present? */
2110 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2111 {
2112#ifdef IN_RING3
2113 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2114#else
2115 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2116#endif
2117 if (rc != VINF_SUCCESS)
2118 return rc;
2119 }
2120 fFlags &= ~X86_PTE_PAE_PG_MASK;
2121 pRam->aHCPhys[iPage] &= ~(RTHCPHYS)fFlags;
2122 return VINF_SUCCESS;
2123 }
2124 }
2125
2126 /*
2127 * Walk range list.
2128 */
2129 pRam = CTXSUFF(pPGM->pRamRanges);
2130 while (pRam)
2131 {
2132 RTGCPHYS off = GCPhys - pRam->GCPhys;
2133 if (off < pRam->cb)
2134 {
2135 unsigned iPage = off >> PAGE_SHIFT;
2136 /* Physical chunk in dynamically allocated range not present? */
2137 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2138 {
2139#ifdef IN_RING3
2140 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2141#else
2142 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2143#endif
2144 if (rc != VINF_SUCCESS)
2145 return rc;
2146 }
2147 fFlags &= ~X86_PTE_PAE_PG_MASK;
2148 pRam->aHCPhys[iPage] &= ~(RTHCPHYS)fFlags;
2149 *ppRamHint = pRam;
2150 return VINF_SUCCESS;
2151 }
2152
2153 pRam = CTXSUFF(pRam->pNext);
2154 }
2155 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2156}
2157
2158/**
2159 * Sets (bitwise OR) flags associated with a RAM address.
2160 *
2161 * @returns VBox status code.
2162 * @param pPGM PGM handle.
2163 * @param GCPhys Guest context physical address.
2164 * @param fFlags fFlags to set clear. (Bits 0-11.)
2165 */
2166DECLINLINE(int) PGMRamFlagsSetByGCPhys(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags)
2167{
2168 /*
2169 * Walk range list.
2170 */
2171 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
2172 while (pRam)
2173 {
2174 RTGCPHYS off = GCPhys - pRam->GCPhys;
2175 if (off < pRam->cb)
2176 {
2177 unsigned iPage = off >> PAGE_SHIFT;
2178 /* Physical chunk in dynamically allocated range not present? */
2179 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2180 {
2181#ifdef IN_RING3
2182 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2183#else
2184 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2185#endif
2186 if (rc != VINF_SUCCESS)
2187 return rc;
2188 }
2189 fFlags &= ~X86_PTE_PAE_PG_MASK;
2190 pRam->aHCPhys[iPage] |= fFlags;
2191 return VINF_SUCCESS;
2192 }
2193
2194 pRam = CTXSUFF(pRam->pNext);
2195 }
2196 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2197}
2198
2199/**
2200 * Sets (bitwise OR) flags associated with a RAM address.
2201 *
2202 * @returns VBox status code.
2203 * @param pPGM PGM handle.
2204 * @param GCPhys Guest context physical address.
2205 * @param fFlags fFlags to set clear. (Bits 0-11.)
2206 * @param ppRamHint Where to read and store the ram list hint.
2207 * The caller initializes this to NULL before the call.
2208 */
2209DECLINLINE(int) PGMRamFlagsSetByGCPhysWithHint(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags, PPGMRAMRANGE *ppRamHint)
2210{
2211 /*
2212 * Check the hint.
2213 */
2214 PPGMRAMRANGE pRam = *ppRamHint;
2215 if (pRam)
2216 {
2217 RTGCPHYS off = GCPhys - pRam->GCPhys;
2218 if (off < pRam->cb)
2219 {
2220 unsigned iPage = off >> PAGE_SHIFT;
2221 /* Physical chunk in dynamically allocated range not present? */
2222 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2223 {
2224#ifdef IN_RING3
2225 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2226#else
2227 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2228#endif
2229 if (rc != VINF_SUCCESS)
2230 return rc;
2231 }
2232 fFlags &= ~X86_PTE_PAE_PG_MASK;
2233 pRam->aHCPhys[iPage] |= fFlags;
2234 return VINF_SUCCESS;
2235 }
2236 }
2237
2238 /*
2239 * Walk range list.
2240 */
2241 pRam = CTXSUFF(pPGM->pRamRanges);
2242 while (pRam)
2243 {
2244 RTGCPHYS off = GCPhys - pRam->GCPhys;
2245 if (off < pRam->cb)
2246 {
2247 unsigned iPage = off >> PAGE_SHIFT;
2248 /* Physical chunk in dynamically allocated range not present? */
2249 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2250 {
2251#ifdef IN_RING3
2252 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2253#else
2254 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2255#endif
2256 if (rc != VINF_SUCCESS)
2257 return rc;
2258 }
2259 fFlags &= ~X86_PTE_PAE_PG_MASK;
2260 pRam->aHCPhys[iPage] |= fFlags;
2261 *ppRamHint = pRam;
2262 return VINF_SUCCESS;
2263 }
2264
2265 pRam = CTXSUFF(pRam->pNext);
2266 }
2267 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2268}
2269
2270
2271/**
2272 * Gets the page directory for the specified address.
2273 *
2274 * @returns Pointer to the page directory in question.
2275 * @returns NULL if the page directory is not present or on an invalid page.
2276 * @param pPGM Pointer to the PGM instance data.
2277 * @param GCPtr The address.
2278 */
2279DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGM pPGM, RTGCUINTPTR GCPtr)
2280{
2281 const unsigned iPdPtr = GCPtr >> X86_PDPTR_SHIFT;
2282 if (CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].n.u1Present)
2283 {
2284 if ((CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPtr])
2285 return CTXSUFF(pPGM->apGstPaePDs)[iPdPtr];
2286
2287 /* cache is out-of-sync. */
2288 PX86PDPAE pPD;
2289 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK, &pPD);
2290 if (VBOX_SUCCESS(rc))
2291 return pPD;
2292 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u));
2293 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emualted as all 0s. */
2294 }
2295 return NULL;
2296}
2297
2298
2299/**
2300 * Gets the page directory entry for the specified address.
2301 *
2302 * @returns Pointer to the page directory entry in question.
2303 * @returns NULL if the page directory is not present or on an invalid page.
2304 * @param pPGM Pointer to the PGM instance data.
2305 * @param GCPtr The address.
2306 */
2307DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGM pPGM, RTGCUINTPTR GCPtr)
2308{
2309 const unsigned iPdPtr = GCPtr >> X86_PDPTR_SHIFT;
2310 if (CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].n.u1Present)
2311 {
2312 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
2313 if ((CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPtr])
2314 return &CTXSUFF(pPGM->apGstPaePDs)[iPdPtr]->a[iPD];
2315
2316 /* cache is out-of-sync. */
2317 PX86PDPAE pPD;
2318 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK, &pPD);
2319 if (VBOX_SUCCESS(rc))
2320 return &pPD->a[iPD];
2321 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u));
2322 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. */
2323 }
2324 return NULL;
2325}
2326
2327
2328/**
2329 * Gets the page directory entry for the specified address.
2330 *
2331 * @returns The page directory entry in question.
2332 * @returns A non-present entry if the page directory is not present or on an invalid page.
2333 * @param pPGM Pointer to the PGM instance data.
2334 * @param GCPtr The address.
2335 */
2336DECLINLINE(uint64_t) pgmGstGetPaePDE(PPGM pPGM, RTGCUINTPTR GCPtr)
2337{
2338 const unsigned iPdPtr = GCPtr >> X86_PDPTR_SHIFT;
2339 if (CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].n.u1Present)
2340 {
2341 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
2342 if ((CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPtr])
2343 return CTXSUFF(pPGM->apGstPaePDs)[iPdPtr]->a[iPD].u;
2344
2345 /* cache is out-of-sync. */
2346 PX86PDPAE pPD;
2347 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK, &pPD);
2348 if (VBOX_SUCCESS(rc))
2349 return pPD->a[iPD].u;
2350 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u));
2351 }
2352 return 0;
2353}
2354
2355
2356/**
2357 * Checks if any of the specified page flags are set for the given page.
2358 *
2359 * @returns true if any of the flags are set.
2360 * @returns false if all the flags are clear.
2361 * @param pPGM PGM handle.
2362 * @param GCPhys The GC physical address.
2363 * @param fFlags The flags to check for.
2364 */
2365DECLINLINE(bool) PGMRamTestFlags(PPGM pPGM, RTGCPHYS GCPhys, uint64_t fFlags)
2366{
2367 /*
2368 * Walk range list.
2369 */
2370 for (PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
2371 pRam;
2372 pRam = CTXSUFF(pRam->pNext))
2373 {
2374 RTGCPHYS off = GCPhys - pRam->GCPhys;
2375 if (off < pRam->cb)
2376 return (pRam->aHCPhys[off >> PAGE_SHIFT] & fFlags) != 0;
2377 }
2378 return false;
2379}
2380
2381
2382/**
2383 * Gets the ram flags for a handler.
2384 *
2385 * @returns The ram flags.
2386 * @param pCur The physical handler in question.
2387 */
2388DECLINLINE(unsigned) pgmHandlerPhysicalCalcFlags(PPGMPHYSHANDLER pCur)
2389{
2390 switch (pCur->enmType)
2391 {
2392 case PGMPHYSHANDLERTYPE_PHYSICAL:
2393 return MM_RAM_FLAGS_PHYSICAL_HANDLER;
2394
2395 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
2396 return MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_WRITE;
2397
2398 case PGMPHYSHANDLERTYPE_MMIO:
2399 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
2400 return MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_ALL;
2401
2402 default:
2403 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
2404 }
2405}
2406
2407
2408/**
2409 * Clears one physical page of a virtual handler
2410 *
2411 * @param pPGM Pointer to the PGM instance.
2412 * @param pCur Virtual handler structure
2413 * @param iPage Physical page index
2414 */
2415DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
2416{
2417 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
2418
2419 /*
2420 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
2421 */
2422#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2423 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
2424 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
2425 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
2426#endif
2427 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
2428 {
2429 /* We're the head of the alias chain. */
2430 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
2431#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2432 AssertReleaseMsg(pRemove != NULL,
2433 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
2434 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
2435 AssertReleaseMsg(pRemove == pPhys2Virt,
2436 ("wanted: pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
2437 " got: pRemove=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
2438 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
2439 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
2440#endif
2441 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
2442 {
2443 /* Insert the next list in the alias chain into the tree. */
2444 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
2445#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2446 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
2447 ("pNext=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
2448 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
2449#endif
2450 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
2451 bool fRc = RTAvlroGCPhysInsert(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
2452 AssertRelease(fRc);
2453 }
2454 }
2455 else
2456 {
2457 /* Locate the previous node in the alias chain. */
2458 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
2459#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2460 AssertReleaseMsg(pPrev != pPhys2Virt,
2461 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
2462 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
2463#endif
2464 for (;;)
2465 {
2466 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
2467 if (pNext == pPhys2Virt)
2468 {
2469 /* unlink. */
2470 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%VGp-%VGp]\n",
2471 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
2472 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
2473 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
2474 else
2475 {
2476 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
2477 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
2478 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
2479 }
2480 break;
2481 }
2482
2483 /* next */
2484 if (pNext == pPrev)
2485 {
2486#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2487 AssertReleaseMsg(pNext != pPrev,
2488 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
2489 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
2490#endif
2491 break;
2492 }
2493 pPrev = pNext;
2494 }
2495 }
2496 Log2(("PHYS2VIRT: Removing %VGp-%VGp %#RX32 %s\n",
2497 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, HCSTRING(pCur->pszDesc)));
2498 pPhys2Virt->offNextAlias = 0;
2499 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
2500
2501 /*
2502 * Clear the ram flags for this page.
2503 */
2504 int rc = PGMRamFlagsClearByGCPhys(pPGM, pPhys2Virt->Core.Key,
2505 MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_VIRTUAL_WRITE);
2506 AssertRC(rc);
2507}
2508
2509
2510/**
2511 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
2512 *
2513 * @returns Pointer to the shadow page structure.
2514 * @param pPool The pool.
2515 * @param HCPhys The HC physical address of the shadow page.
2516 */
2517DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
2518{
2519 /*
2520 * Look up the page.
2521 */
2522 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
2523 AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%VHp pPage=%p type=%d\n", HCPhys, pPage, (pPage) ? pPage->enmKind : 0));
2524 return pPage;
2525}
2526
2527
2528/**
2529 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
2530 *
2531 * @returns Pointer to the shadow page structure.
2532 * @param pPool The pool.
2533 * @param idx The pool page index.
2534 */
2535DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
2536{
2537 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
2538 return &pPool->aPages[idx];
2539}
2540
2541
2542#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
2543/**
2544 * Clear references to guest physical memory.
2545 *
2546 * @param pPool The pool.
2547 * @param pPage The page.
2548 * @param pHCPhys Pointer to the aHCPhys entry in the ram range.
2549 */
2550DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PRTHCPHYS pHCPhys)
2551{
2552 /*
2553 * Just deal with the simple case here.
2554 */
2555#ifdef LOG_ENABLED
2556 const RTHCPHYS HCPhysOrg = *pHCPhys;
2557#endif
2558 const unsigned cRefs = *pHCPhys >> MM_RAM_FLAGS_CREFS_SHIFT;
2559 if (cRefs == 1)
2560 {
2561 Assert(pPage->idx == ((*pHCPhys >> MM_RAM_FLAGS_IDX_SHIFT) & MM_RAM_FLAGS_IDX_MASK));
2562 *pHCPhys = *pHCPhys & MM_RAM_FLAGS_NO_REFS_MASK;
2563 }
2564 else
2565 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPage, pHCPhys);
2566 LogFlow(("pgmTrackDerefGCPhys: *pHCPhys=%RHp -> %RHp\n", HCPhysOrg, *pHCPhys));
2567}
2568#endif
2569
2570
2571#ifdef PGMPOOL_WITH_CACHE
2572/**
2573 * Moves the page to the head of the age list.
2574 *
2575 * This is done when the cached page is used in one way or another.
2576 *
2577 * @param pPool The pool.
2578 * @param pPage The cached page.
2579 * @todo inline in PGMInternal.h!
2580 */
2581DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2582{
2583 /*
2584 * Move to the head of the age list.
2585 */
2586 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
2587 {
2588 /* unlink */
2589 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
2590 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
2591 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
2592 else
2593 pPool->iAgeTail = pPage->iAgePrev;
2594
2595 /* insert at head */
2596 pPage->iAgePrev = NIL_PGMPOOL_IDX;
2597 pPage->iAgeNext = pPool->iAgeHead;
2598 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
2599 pPool->iAgeHead = pPage->idx;
2600 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
2601 }
2602}
2603#endif /* PGMPOOL_WITH_CACHE */
2604
2605/**
2606 * Tells if mappings are to be put into the shadow page table or not
2607 *
2608 * @returns boolean result
2609 * @param pVM VM handle.
2610 */
2611
2612DECLINLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
2613{
2614 return !pPGM->fDisableMappings;
2615}
2616
2617/** @} */
2618
2619#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette