VirtualBox

source: vbox/trunk/src/VBox/VMM/PGM.cpp@ 3840

Last change on this file since 3840 was 3776, checked in by vboxsync, 17 years ago

Compile fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 158.0 KB
Line 
1/* $Id: PGM.cpp 3776 2007-07-23 09:16:56Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor. (Mixing stuff here, not good?)
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/** @page pg_pgm PGM - The Page Manager and Monitor
24 *
25 *
26 *
27 * @section sec_pg_modes Paging Modes
28 *
29 * There are three memory contexts: Host Context (HC), Guest Context (GC)
30 * and intermediate context. When talking about paging HC can also be refered to
31 * as "host paging", and GC refered to as "shadow paging".
32 *
33 * We define three basic paging modes: 32-bit, PAE and AMD64. The host paging mode
34 * is defined by the host operating system. The mode used in the shadow paging mode
35 * depends on the host paging mode and what the mode the guest is currently in. The
36 * following relation between the two is defined:
37 *
38 * @verbatim
39 Host > 32-bit | PAE | AMD64 |
40 Guest | | | |
41 ==v================================
42 32-bit 32-bit PAE PAE
43 -------|--------|--------|--------|
44 PAE PAE PAE PAE
45 -------|--------|--------|--------|
46 AMD64 AMD64 AMD64 AMD64
47 -------|--------|--------|--------| @endverbatim
48 *
49 * All configuration except those in the diagonal (upper left) are expected to
50 * require special effort from the switcher (i.e. a bit slower).
51 *
52 *
53 *
54 *
55 * @section sec_pg_shw The Shadow Memory Context
56 *
57 *
58 * [..]
59 *
60 * Because of guest context mappings requires PDPTR and PML4 entries to allow
61 * writing on AMD64, the two upper levels will have fixed flags whatever the
62 * guest is thinking of using there. So, when shadowing the PD level we will
63 * calculate the effective flags of PD and all the higher levels. In legacy
64 * PAE mode this only applies to the PWT and PCD bits (the rest are
65 * ignored/reserved/MBZ). We will ignore those bits for the present.
66 *
67 *
68 *
69 * @section sec_pg_int The Intermediate Memory Context
70 *
71 * The world switch goes thru an intermediate memory context which purpose it is
72 * to provide different mappings of the switcher code. All guest mappings are also
73 * present in this context.
74 *
75 * The switcher code is mapped at the same location as on the host, at an
76 * identity mapped location (physical equals virtual address), and at the
77 * hypervisor location.
78 *
79 * PGM maintain page tables for 32-bit, PAE and AMD64 paging modes. This
80 * simplifies switching guest CPU mode and consistency at the cost of more
81 * code to do the work. All memory use for those page tables is located below
82 * 4GB (this includes page tables for guest context mappings).
83 *
84 *
85 * @subsection subsec_pg_int_gc Guest Context Mappings
86 *
87 * During assignment and relocation of a guest context mapping the intermediate
88 * memory context is used to verify the new location.
89 *
90 * Guest context mappings are currently restricted to below 4GB, for reasons
91 * of simplicity. This may change when we implement AMD64 support.
92 *
93 *
94 *
95 *
96 * @section sec_pg_misc Misc
97 *
98 * @subsection subsec_pg_misc_diff Differences Between Legacy PAE and Long Mode PAE
99 *
100 * The differences between legacy PAE and long mode PAE are:
101 * -# PDPE bits 1, 2, 5 and 6 are defined differently. In leagcy mode they are
102 * all marked down as must-be-zero, while in long mode 1, 2 and 5 have the
103 * usual meanings while 6 is ignored (AMD). This means that upon switching to
104 * legacy PAE mode we'll have to clear these bits and when going to long mode
105 * they must be set. This applies to both intermediate and shadow contexts,
106 * however we don't need to do it for the intermediate one since we're
107 * executing with CR0.WP at that time.
108 * -# CR3 allows a 32-byte aligned address in legacy mode, while in long mode
109 * a page aligned one is required.
110 */
111
112
113
114/** Saved state data unit version. */
115#define PGM_SAVED_STATE_VERSION 5
116
117/*******************************************************************************
118* Header Files *
119*******************************************************************************/
120#define LOG_GROUP LOG_GROUP_PGM
121#include <VBox/dbgf.h>
122#include <VBox/pgm.h>
123#include <VBox/cpum.h>
124#include <VBox/iom.h>
125#include <VBox/sup.h>
126#include <VBox/mm.h>
127#include <VBox/pdm.h>
128#include <VBox/em.h>
129#include <VBox/stam.h>
130#include <VBox/rem.h>
131#include <VBox/dbgf.h>
132#include <VBox/rem.h>
133#include <VBox/selm.h>
134#include <VBox/ssm.h>
135#include "PGMInternal.h"
136#include <VBox/vm.h>
137#include <VBox/dbg.h>
138#include <VBox/hwaccm.h>
139
140#include <iprt/assert.h>
141#include <iprt/alloc.h>
142#include <iprt/asm.h>
143#include <iprt/thread.h>
144#include <iprt/string.h>
145#include <VBox/param.h>
146#include <VBox/err.h>
147
148
149
150/*******************************************************************************
151* Internal Functions *
152*******************************************************************************/
153static int pgmR3InitPaging(PVM pVM);
154static DECLCALLBACK(void) pgmR3PhysInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
155static DECLCALLBACK(void) pgmR3InfoMode(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
156static DECLCALLBACK(void) pgmR3InfoCr3(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
157static DECLCALLBACK(int) pgmR3RelocatePhysHandler(PAVLROGCPHYSNODECORE pNode, void *pvUser);
158static DECLCALLBACK(int) pgmR3RelocateVirtHandler(PAVLROGCPTRNODECORE pNode, void *pvUser);
159#ifdef VBOX_STRICT
160static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser);
161#endif
162static DECLCALLBACK(int) pgmR3Save(PVM pVM, PSSMHANDLE pSSM);
163static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
164static int pgmR3ModeDataInit(PVM pVM, bool fResolveGCAndR0);
165static void pgmR3ModeDataSwitch(PVM pVM, PGMMODE enmShw, PGMMODE enmGst);
166static PGMMODE pgmR3CalcShadowMode(PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher);
167
168#ifdef VBOX_WITH_STATISTICS
169static void pgmR3InitStats(PVM pVM);
170#endif
171
172#ifdef VBOX_WITH_DEBUGGER
173/** @todo all but the two last commands must be converted to 'info'. */
174static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
175static DECLCALLBACK(int) pgmR3CmdMap(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
176static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
177static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
178#endif
179
180
181/*******************************************************************************
182* Global Variables *
183*******************************************************************************/
184#ifdef VBOX_WITH_DEBUGGER
185/** Command descriptors. */
186static const DBGCCMD g_aCmds[] =
187{
188 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
189 { "pgmram", 0, 0, NULL, 0, NULL, 0, pgmR3CmdRam, "", "Display the ram ranges." },
190 { "pgmmap", 0, 0, NULL, 0, NULL, 0, pgmR3CmdMap, "", "Display the mapping ranges." },
191 { "pgmsync", 0, 0, NULL, 0, NULL, 0, pgmR3CmdSync, "", "Sync the CR3 page." },
192 { "pgmsyncalways", 0, 0, NULL, 0, NULL, 0, pgmR3CmdSyncAlways, "", "Toggle permanent CR3 syncing." },
193};
194#endif
195
196
197
198
199#if 1/// @todo ndef RT_ARCH_AMD64
200/*
201 * Shadow - 32-bit mode
202 */
203#define PGM_SHW_TYPE PGM_TYPE_32BIT
204#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
205#define PGM_SHW_NAME_GC_STR(name) PGM_SHW_NAME_GC_32BIT_STR(name)
206#define PGM_SHW_NAME_R0_STR(name) PGM_SHW_NAME_R0_32BIT_STR(name)
207#include "PGMShw.h"
208
209/* Guest - real mode */
210#define PGM_GST_TYPE PGM_TYPE_REAL
211#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
212#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_REAL_STR(name)
213#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_REAL_STR(name)
214#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
215#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_32BIT_REAL_STR(name)
216#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_32BIT_REAL_STR(name)
217#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
218#include "PGMGst.h"
219#include "PGMBth.h"
220#undef BTH_PGMPOOLKIND_PT_FOR_PT
221#undef PGM_BTH_NAME
222#undef PGM_BTH_NAME_GC_STR
223#undef PGM_BTH_NAME_R0_STR
224#undef PGM_GST_TYPE
225#undef PGM_GST_NAME
226#undef PGM_GST_NAME_GC_STR
227#undef PGM_GST_NAME_R0_STR
228
229/* Guest - protected mode */
230#define PGM_GST_TYPE PGM_TYPE_PROT
231#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
232#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_PROT_STR(name)
233#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PROT_STR(name)
234#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
235#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_32BIT_PROT_STR(name)
236#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_32BIT_PROT_STR(name)
237#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
238#include "PGMGst.h"
239#include "PGMBth.h"
240#undef BTH_PGMPOOLKIND_PT_FOR_PT
241#undef PGM_BTH_NAME
242#undef PGM_BTH_NAME_GC_STR
243#undef PGM_BTH_NAME_R0_STR
244#undef PGM_GST_TYPE
245#undef PGM_GST_NAME
246#undef PGM_GST_NAME_GC_STR
247#undef PGM_GST_NAME_R0_STR
248
249/* Guest - 32-bit mode */
250#define PGM_GST_TYPE PGM_TYPE_32BIT
251#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
252#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_32BIT_STR(name)
253#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_32BIT_STR(name)
254#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
255#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_32BIT_32BIT_STR(name)
256#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_32BIT_32BIT_STR(name)
257#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
258#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
259#include "PGMGst.h"
260#include "PGMBth.h"
261#undef BTH_PGMPOOLKIND_PT_FOR_BIG
262#undef BTH_PGMPOOLKIND_PT_FOR_PT
263#undef PGM_BTH_NAME
264#undef PGM_BTH_NAME_GC_STR
265#undef PGM_BTH_NAME_R0_STR
266#undef PGM_GST_TYPE
267#undef PGM_GST_NAME
268#undef PGM_GST_NAME_GC_STR
269#undef PGM_GST_NAME_R0_STR
270
271#undef PGM_SHW_TYPE
272#undef PGM_SHW_NAME
273#undef PGM_SHW_NAME_GC_STR
274#undef PGM_SHW_NAME_R0_STR
275#endif /* !RT_ARCH_AMD64 */
276
277
278/*
279 * Shadow - PAE mode
280 */
281#define PGM_SHW_TYPE PGM_TYPE_PAE
282#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
283#define PGM_SHW_NAME_GC_STR(name) PGM_SHW_NAME_GC_PAE_STR(name)
284#define PGM_SHW_NAME_R0_STR(name) PGM_SHW_NAME_R0_PAE_STR(name)
285#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
286#include "PGMShw.h"
287
288/* Guest - real mode */
289#define PGM_GST_TYPE PGM_TYPE_REAL
290#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
291#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_REAL_STR(name)
292#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_REAL_STR(name)
293#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
294#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_PAE_REAL_STR(name)
295#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_PAE_REAL_STR(name)
296#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
297#include "PGMBth.h"
298#undef BTH_PGMPOOLKIND_PT_FOR_PT
299#undef PGM_BTH_NAME
300#undef PGM_BTH_NAME_GC_STR
301#undef PGM_BTH_NAME_R0_STR
302#undef PGM_GST_TYPE
303#undef PGM_GST_NAME
304#undef PGM_GST_NAME_GC_STR
305#undef PGM_GST_NAME_R0_STR
306
307/* Guest - protected mode */
308#define PGM_GST_TYPE PGM_TYPE_PROT
309#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
310#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_PROT_STR(name)
311#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PROT_STR(name)
312#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
313#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_PAE_PROT_STR(name)
314#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_PAE_PROT_STR(name)
315#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
316#include "PGMBth.h"
317#undef BTH_PGMPOOLKIND_PT_FOR_PT
318#undef PGM_BTH_NAME
319#undef PGM_BTH_NAME_GC_STR
320#undef PGM_BTH_NAME_R0_STR
321#undef PGM_GST_TYPE
322#undef PGM_GST_NAME
323#undef PGM_GST_NAME_GC_STR
324#undef PGM_GST_NAME_R0_STR
325
326/* Guest - 32-bit mode */
327#define PGM_GST_TYPE PGM_TYPE_32BIT
328#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
329#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_32BIT_STR(name)
330#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_32BIT_STR(name)
331#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
332#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_PAE_32BIT_STR(name)
333#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_PAE_32BIT_STR(name)
334#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
335#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
336#include "PGMBth.h"
337#undef BTH_PGMPOOLKIND_PT_FOR_BIG
338#undef BTH_PGMPOOLKIND_PT_FOR_PT
339#undef PGM_BTH_NAME
340#undef PGM_BTH_NAME_GC_STR
341#undef PGM_BTH_NAME_R0_STR
342#undef PGM_GST_TYPE
343#undef PGM_GST_NAME
344#undef PGM_GST_NAME_GC_STR
345#undef PGM_GST_NAME_R0_STR
346
347/* Guest - PAE mode */
348#define PGM_GST_TYPE PGM_TYPE_PAE
349#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
350#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_PAE_STR(name)
351#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PAE_STR(name)
352#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
353#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_PAE_PAE_STR(name)
354#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_PAE_PAE_STR(name)
355#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
356#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
357#include "PGMGst.h"
358#include "PGMBth.h"
359#undef BTH_PGMPOOLKIND_PT_FOR_BIG
360#undef BTH_PGMPOOLKIND_PT_FOR_PT
361#undef PGM_BTH_NAME
362#undef PGM_BTH_NAME_GC_STR
363#undef PGM_BTH_NAME_R0_STR
364#undef PGM_GST_TYPE
365#undef PGM_GST_NAME
366#undef PGM_GST_NAME_GC_STR
367#undef PGM_GST_NAME_R0_STR
368
369#undef PGM_SHW_TYPE
370#undef PGM_SHW_NAME
371#undef PGM_SHW_NAME_GC_STR
372#undef PGM_SHW_NAME_R0_STR
373
374
375/*
376 * Shadow - AMD64 mode
377 */
378#define PGM_SHW_TYPE PGM_TYPE_AMD64
379#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
380#define PGM_SHW_NAME_GC_STR(name) PGM_SHW_NAME_GC_AMD64_STR(name)
381#define PGM_SHW_NAME_R0_STR(name) PGM_SHW_NAME_R0_AMD64_STR(name)
382#include "PGMShw.h"
383
384/* Guest - real mode */
385#define PGM_GST_TYPE PGM_TYPE_REAL
386#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
387#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_REAL_STR(name)
388#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_REAL_STR(name)
389#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_REAL(name)
390#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_AMD64_REAL_STR(name)
391#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_AMD64_REAL_STR(name)
392#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
393#include "PGMBth.h"
394#undef BTH_PGMPOOLKIND_PT_FOR_PT
395#undef PGM_BTH_NAME
396#undef PGM_BTH_NAME_GC_STR
397#undef PGM_BTH_NAME_R0_STR
398#undef PGM_GST_TYPE
399#undef PGM_GST_NAME
400#undef PGM_GST_NAME_GC_STR
401#undef PGM_GST_NAME_R0_STR
402
403/* Guest - protected mode */
404#define PGM_GST_TYPE PGM_TYPE_PROT
405#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
406#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_PROT_STR(name)
407#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PROT_STR(name)
408#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
409#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_AMD64_PROT_STR(name)
410#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_AMD64_PROT_STR(name)
411#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
412#include "PGMBth.h"
413#undef BTH_PGMPOOLKIND_PT_FOR_PT
414#undef PGM_BTH_NAME
415#undef PGM_BTH_NAME_GC_STR
416#undef PGM_BTH_NAME_R0_STR
417#undef PGM_GST_TYPE
418#undef PGM_GST_NAME
419#undef PGM_GST_NAME_GC_STR
420#undef PGM_GST_NAME_R0_STR
421
422/* Guest - AMD64 mode */
423#define PGM_GST_TYPE PGM_TYPE_AMD64
424#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
425#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_AMD64_STR(name)
426#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_AMD64_STR(name)
427#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
428#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_AMD64_AMD64_STR(name)
429#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_AMD64_AMD64_STR(name)
430#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
431#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
432#include "PGMGst.h"
433#include "PGMBth.h"
434#undef BTH_PGMPOOLKIND_PT_FOR_BIG
435#undef BTH_PGMPOOLKIND_PT_FOR_PT
436#undef PGM_BTH_NAME
437#undef PGM_BTH_NAME_GC_STR
438#undef PGM_BTH_NAME_R0_STR
439#undef PGM_GST_TYPE
440#undef PGM_GST_NAME
441#undef PGM_GST_NAME_GC_STR
442#undef PGM_GST_NAME_R0_STR
443
444#undef PGM_SHW_TYPE
445#undef PGM_SHW_NAME
446#undef PGM_SHW_NAME_GC_STR
447#undef PGM_SHW_NAME_R0_STR
448
449
450/**
451 * Initiates the paging of VM.
452 *
453 * @returns VBox status code.
454 * @param pVM Pointer to VM structure.
455 */
456PGMR3DECL(int) PGMR3Init(PVM pVM)
457{
458 LogFlow(("PGMR3Init:\n"));
459
460 /*
461 * Assert alignment and sizes.
462 */
463 AssertRelease(sizeof(pVM->pgm.s) <= sizeof(pVM->pgm.padding));
464
465 /*
466 * Init the structure.
467 */
468 pVM->pgm.s.offVM = RT_OFFSETOF(VM, pgm.s);
469 pVM->pgm.s.enmShadowMode = PGMMODE_INVALID;
470 pVM->pgm.s.enmGuestMode = PGMMODE_INVALID;
471 pVM->pgm.s.enmHostMode = SUPPAGINGMODE_INVALID;
472 pVM->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
473 pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;
474 pVM->pgm.s.fA20Enabled = true;
475 pVM->pgm.s.pGstPaePDPTRHC = NULL;
476 pVM->pgm.s.pGstPaePDPTRGC = 0;
477 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.apGstPaePDsHC); i++)
478 {
479 pVM->pgm.s.apGstPaePDsHC[i] = NULL;
480 pVM->pgm.s.apGstPaePDsGC[i] = 0;
481 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
482 }
483
484#ifdef VBOX_STRICT
485 VMR3AtStateRegister(pVM, pgmR3ResetNoMorePhysWritesFlag, NULL);
486#endif
487
488 /*
489 * Get the configured RAM size - to estimate saved state size.
490 */
491 uint64_t cbRam;
492 int rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
493 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
494 cbRam = pVM->pgm.s.cbRamSize = 0;
495 else if (VBOX_SUCCESS(rc))
496 {
497 if (cbRam < PAGE_SIZE)
498 cbRam = 0;
499 cbRam = RT_ALIGN_64(cbRam, PAGE_SIZE);
500 pVM->pgm.s.cbRamSize = (RTUINT)cbRam;
501 }
502 else
503 {
504 AssertMsgFailed(("Configuration error: Failed to query integer \"RamSize\", rc=%Vrc.\n", rc));
505 return rc;
506 }
507
508 /*
509 * Register saved state data unit.
510 */
511 rc = SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
512 NULL, pgmR3Save, NULL,
513 NULL, pgmR3Load, NULL);
514 if (VBOX_FAILURE(rc))
515 return rc;
516
517 /* Initialise PGM critical section. */
518 rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSect, "PGM");
519 AssertRCReturn(rc, rc);
520
521 /*
522 * Trees
523 */
524 rc = MMHyperAlloc(pVM, sizeof(PGMTREES), 0, MM_TAG_PGM, (void **)&pVM->pgm.s.pTreesHC);
525 if (VBOX_SUCCESS(rc))
526 {
527 pVM->pgm.s.pTreesGC = MMHyperHC2GC(pVM, pVM->pgm.s.pTreesHC);
528
529 /*
530 * Init the paging.
531 */
532 rc = pgmR3InitPaging(pVM);
533 }
534 if (VBOX_SUCCESS(rc))
535 {
536 /*
537 * Init the page pool.
538 */
539 rc = pgmR3PoolInit(pVM);
540 }
541 if (VBOX_SUCCESS(rc))
542 {
543 /*
544 * Info & statistics
545 */
546 DBGFR3InfoRegisterInternal(pVM, "mode",
547 "Shows the current paging mode. "
548 "Recognizes 'all', 'guest', 'shadow' and 'host' as arguments, defaulting to 'all' if nothing's given.",
549 pgmR3InfoMode);
550 DBGFR3InfoRegisterInternal(pVM, "pgmcr3",
551 "Dumps all the entries in the top level paging table. No arguments.",
552 pgmR3InfoCr3);
553 DBGFR3InfoRegisterInternal(pVM, "phys",
554 "Dumps all the physical address ranges. No arguments.",
555 pgmR3PhysInfo);
556 DBGFR3InfoRegisterInternal(pVM, "handlers",
557 "Dumps physical and virtual handlers. "
558 "Pass 'phys' or 'virt' as argument if only one kind is wanted.",
559 pgmR3InfoHandlers);
560
561 STAM_REL_REG(pVM, &pVM->pgm.s.cGuestModeChanges, STAMTYPE_COUNTER, "/PGM/cGuestModeChanges", STAMUNIT_OCCURENCES, "Number of guest mode changes.");
562#ifdef VBOX_WITH_STATISTICS
563 pgmR3InitStats(pVM);
564#endif
565#ifdef VBOX_WITH_DEBUGGER
566 /*
567 * Debugger commands.
568 */
569 static bool fRegisteredCmds = false;
570 if (!fRegisteredCmds)
571 {
572 int rc = DBGCRegisterCommands(&g_aCmds[0], ELEMENTS(g_aCmds));
573 if (VBOX_SUCCESS(rc))
574 fRegisteredCmds = true;
575 }
576#endif
577 return VINF_SUCCESS;
578 }
579 /* No cleanup necessary, MM frees all memory. */
580
581 return rc;
582}
583
584
585/**
586 * Init paging.
587 *
588 * Since we need to check what mode the host is operating in before we can choose
589 * the right paging functions for the host we have to delay this until R0 has
590 * been initialized.
591 *
592 * @returns VBox status code.
593 * @param pVM VM handle.
594 */
595static int pgmR3InitPaging(PVM pVM)
596{
597 /*
598 * Force a recalculation of modes and switcher so everyone gets notified.
599 */
600 pVM->pgm.s.enmShadowMode = PGMMODE_INVALID;
601 pVM->pgm.s.enmGuestMode = PGMMODE_INVALID;
602 pVM->pgm.s.enmHostMode = SUPPAGINGMODE_INVALID;
603
604 /*
605 * Allocate static mapping space for whatever the cr3 register
606 * points to and in the case of PAE mode to the 4 PDs.
607 */
608 int rc = MMR3HyperReserve(pVM, PAGE_SIZE * 5, "CR3 mapping", &pVM->pgm.s.GCPtrCR3Mapping);
609 if (VBOX_FAILURE(rc))
610 {
611 AssertMsgFailed(("Failed to reserve two pages for cr mapping in HMA, rc=%Vrc\n", rc));
612 return rc;
613 }
614 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
615
616 /*
617 * Allocate pages for the three possible intermediate contexts
618 * (AMD64, PAE and plain 32-Bit). We maintain all three contexts
619 * for the sake of simplicity. The AMD64 uses the PAE for the
620 * lower levels, making the total number of pages 11 (3 + 7 + 1).
621 *
622 * We assume that two page tables will be enought for the core code
623 * mappings (HC virtual and identity).
624 */
625 pVM->pgm.s.pInterPD = (PX86PD)MMR3PageAllocLow(pVM);
626 pVM->pgm.s.apInterPTs[0] = (PX86PT)MMR3PageAllocLow(pVM);
627 pVM->pgm.s.apInterPTs[1] = (PX86PT)MMR3PageAllocLow(pVM);
628 pVM->pgm.s.apInterPaePTs[0] = (PX86PTPAE)MMR3PageAlloc(pVM);
629 pVM->pgm.s.apInterPaePTs[1] = (PX86PTPAE)MMR3PageAlloc(pVM);
630 pVM->pgm.s.apInterPaePDs[0] = (PX86PDPAE)MMR3PageAlloc(pVM);
631 pVM->pgm.s.apInterPaePDs[1] = (PX86PDPAE)MMR3PageAlloc(pVM);
632 pVM->pgm.s.apInterPaePDs[2] = (PX86PDPAE)MMR3PageAlloc(pVM);
633 pVM->pgm.s.apInterPaePDs[3] = (PX86PDPAE)MMR3PageAlloc(pVM);
634 pVM->pgm.s.pInterPaePDPTR = (PX86PDPTR)MMR3PageAllocLow(pVM);
635 pVM->pgm.s.pInterPaePDPTR64 = (PX86PDPTR)MMR3PageAllocLow(pVM);
636 pVM->pgm.s.pInterPaePML4 = (PX86PML4)MMR3PageAllocLow(pVM);
637 if ( !pVM->pgm.s.pInterPD
638 || !pVM->pgm.s.apInterPTs[0]
639 || !pVM->pgm.s.apInterPTs[1]
640 || !pVM->pgm.s.apInterPaePTs[0]
641 || !pVM->pgm.s.apInterPaePTs[1]
642 || !pVM->pgm.s.apInterPaePDs[0]
643 || !pVM->pgm.s.apInterPaePDs[1]
644 || !pVM->pgm.s.apInterPaePDs[2]
645 || !pVM->pgm.s.apInterPaePDs[3]
646 || !pVM->pgm.s.pInterPaePDPTR
647 || !pVM->pgm.s.pInterPaePDPTR64
648 || !pVM->pgm.s.pInterPaePML4)
649 {
650 AssertMsgFailed(("Failed to allocate pages for the intermediate context!\n"));
651 return VERR_NO_PAGE_MEMORY;
652 }
653
654 pVM->pgm.s.HCPhysInterPD = MMPage2Phys(pVM, pVM->pgm.s.pInterPD);
655 AssertRelease(pVM->pgm.s.HCPhysInterPD != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPD & PAGE_OFFSET_MASK));
656 pVM->pgm.s.HCPhysInterPaePDPTR = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPTR);
657 AssertRelease(pVM->pgm.s.HCPhysInterPaePDPTR != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePDPTR & PAGE_OFFSET_MASK));
658 pVM->pgm.s.HCPhysInterPaePML4 = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePML4);
659 AssertRelease(pVM->pgm.s.HCPhysInterPaePML4 != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePML4 & PAGE_OFFSET_MASK));
660
661 /*
662 * Initialize the pages, setting up the PML4 and PDPTR for repetitive 4GB action.
663 */
664 ASMMemZeroPage(pVM->pgm.s.pInterPD);
665 ASMMemZeroPage(pVM->pgm.s.apInterPTs[0]);
666 ASMMemZeroPage(pVM->pgm.s.apInterPTs[1]);
667
668 ASMMemZeroPage(pVM->pgm.s.apInterPaePTs[0]);
669 ASMMemZeroPage(pVM->pgm.s.apInterPaePTs[1]);
670
671 ASMMemZeroPage(pVM->pgm.s.pInterPaePDPTR);
672 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.apInterPaePDs); i++)
673 {
674 ASMMemZeroPage(pVM->pgm.s.apInterPaePDs[i]);
675 pVM->pgm.s.pInterPaePDPTR->a[i].u = X86_PDPE_P | PGM_PLXFLAGS_PERMANENT
676 | MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[i]);
677 }
678
679 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.pInterPaePDPTR64->a); i++)
680 {
681 const unsigned iPD = i % ELEMENTS(pVM->pgm.s.apInterPaePDs);
682 pVM->pgm.s.pInterPaePDPTR64->a[i].u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A | PGM_PLXFLAGS_PERMANENT
683 | MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[iPD]);
684 }
685
686 RTHCPHYS HCPhysInterPaePDPTR64 = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPTR64);
687 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.pInterPaePML4->a); i++)
688 pVM->pgm.s.pInterPaePML4->a[i].u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A | PGM_PLXFLAGS_PERMANENT
689 | HCPhysInterPaePDPTR64;
690
691 /*
692 * Allocate pages for the three possible guest contexts (AMD64, PAE and plain 32-Bit).
693 * We allocate pages for all three posibilities to in order to simplify mappings and
694 * avoid resource failure during mode switches. So, we need to cover all levels of the
695 * of the first 4GB down to PD level.
696 * As with the intermediate context, AMD64 uses the PAE PDPTR and PDs.
697 */
698 pVM->pgm.s.pHC32BitPD = (PX86PD)MMR3PageAllocLow(pVM);
699 pVM->pgm.s.apHCPaePDs[0] = (PX86PDPAE)MMR3PageAlloc(pVM);
700 pVM->pgm.s.apHCPaePDs[1] = (PX86PDPAE)MMR3PageAlloc(pVM);
701 AssertRelease((uintptr_t)pVM->pgm.s.apHCPaePDs[0] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apHCPaePDs[1]);
702 pVM->pgm.s.apHCPaePDs[2] = (PX86PDPAE)MMR3PageAlloc(pVM);
703 AssertRelease((uintptr_t)pVM->pgm.s.apHCPaePDs[1] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apHCPaePDs[2]);
704 pVM->pgm.s.apHCPaePDs[3] = (PX86PDPAE)MMR3PageAlloc(pVM);
705 AssertRelease((uintptr_t)pVM->pgm.s.apHCPaePDs[2] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apHCPaePDs[3]);
706 pVM->pgm.s.pHCPaePDPTR = (PX86PDPTR)MMR3PageAllocLow(pVM);
707 pVM->pgm.s.pHCPaePML4 = (PX86PML4)MMR3PageAllocLow(pVM);
708 if ( !pVM->pgm.s.pHC32BitPD
709 || !pVM->pgm.s.apHCPaePDs[0]
710 || !pVM->pgm.s.apHCPaePDs[1]
711 || !pVM->pgm.s.apHCPaePDs[2]
712 || !pVM->pgm.s.apHCPaePDs[3]
713 || !pVM->pgm.s.pHCPaePDPTR
714 || !pVM->pgm.s.pHCPaePML4)
715 {
716 AssertMsgFailed(("Failed to allocate pages for the intermediate context!\n"));
717 return VERR_NO_PAGE_MEMORY;
718 }
719
720 /* get physical addresses. */
721 pVM->pgm.s.HCPhys32BitPD = MMPage2Phys(pVM, pVM->pgm.s.pHC32BitPD);
722 Assert(MMPagePhys2Page(pVM, pVM->pgm.s.HCPhys32BitPD) == pVM->pgm.s.pHC32BitPD);
723 pVM->pgm.s.aHCPhysPaePDs[0] = MMPage2Phys(pVM, pVM->pgm.s.apHCPaePDs[0]);
724 pVM->pgm.s.aHCPhysPaePDs[1] = MMPage2Phys(pVM, pVM->pgm.s.apHCPaePDs[1]);
725 pVM->pgm.s.aHCPhysPaePDs[2] = MMPage2Phys(pVM, pVM->pgm.s.apHCPaePDs[2]);
726 pVM->pgm.s.aHCPhysPaePDs[3] = MMPage2Phys(pVM, pVM->pgm.s.apHCPaePDs[3]);
727 pVM->pgm.s.HCPhysPaePDPTR = MMPage2Phys(pVM, pVM->pgm.s.pHCPaePDPTR);
728 pVM->pgm.s.HCPhysPaePML4 = MMPage2Phys(pVM, pVM->pgm.s.pHCPaePML4);
729
730 /*
731 * Initialize the pages, setting up the PML4 and PDPTR for action below 4GB.
732 */
733 ASMMemZero32(pVM->pgm.s.pHC32BitPD, PAGE_SIZE);
734
735 ASMMemZero32(pVM->pgm.s.pHCPaePDPTR, PAGE_SIZE);
736 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.apHCPaePDs); i++)
737 {
738 ASMMemZero32(pVM->pgm.s.apHCPaePDs[i], PAGE_SIZE);
739 pVM->pgm.s.pHCPaePDPTR->a[i].u = X86_PDPE_P | PGM_PLXFLAGS_PERMANENT | pVM->pgm.s.aHCPhysPaePDs[i];
740 /* The flags will be corrected when entering and leaving long mode. */
741 }
742
743 ASMMemZero32(pVM->pgm.s.pHCPaePML4, PAGE_SIZE);
744 pVM->pgm.s.pHCPaePML4->a[0].u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_A
745 | PGM_PLXFLAGS_PERMANENT | pVM->pgm.s.HCPhysPaePDPTR;
746
747 CPUMSetHyperCR3(pVM, (uint32_t)pVM->pgm.s.HCPhys32BitPD);
748
749 /*
750 * Initialize paging workers and mode from current host mode
751 * and the guest running in real mode.
752 */
753 pVM->pgm.s.enmHostMode = SUPGetPagingMode();
754 switch (pVM->pgm.s.enmHostMode)
755 {
756 case SUPPAGINGMODE_32_BIT:
757 case SUPPAGINGMODE_32_BIT_GLOBAL:
758 case SUPPAGINGMODE_PAE:
759 case SUPPAGINGMODE_PAE_GLOBAL:
760 case SUPPAGINGMODE_PAE_NX:
761 case SUPPAGINGMODE_PAE_GLOBAL_NX:
762 break;
763
764 case SUPPAGINGMODE_AMD64:
765 case SUPPAGINGMODE_AMD64_GLOBAL:
766 case SUPPAGINGMODE_AMD64_NX:
767 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
768#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL
769 if (ARCH_BITS != 64)
770 {
771 AssertMsgFailed(("Host mode %d (64-bit) is not supported by non-64bit builds\n", pVM->pgm.s.enmHostMode));
772 LogRel(("Host mode %d (64-bit) is not supported by non-64bit builds\n", pVM->pgm.s.enmHostMode));
773 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
774 }
775#endif
776 break;
777 default:
778 AssertMsgFailed(("Host mode %d is not supported\n", pVM->pgm.s.enmHostMode));
779 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
780 }
781 rc = pgmR3ModeDataInit(pVM, false /* don't resolve GC and R0 syms yet */);
782 if (VBOX_SUCCESS(rc))
783 rc = pgmR3ChangeMode(pVM, PGMMODE_REAL);
784 if (VBOX_SUCCESS(rc))
785 {
786 LogFlow(("pgmR3InitPaging: returns successfully\n"));
787#if HC_ARCH_BITS == 64
788LogRel(("Debug: HCPhys32BitPD=%VHp aHCPhysPaePDs={%VHp,%VHp,%VHp,%VHp} HCPhysPaePDPTR=%VHp HCPhysPaePML4=%VHp\n",
789 pVM->pgm.s.HCPhys32BitPD, pVM->pgm.s.aHCPhysPaePDs[0], pVM->pgm.s.aHCPhysPaePDs[1], pVM->pgm.s.aHCPhysPaePDs[2], pVM->pgm.s.aHCPhysPaePDs[3],
790 pVM->pgm.s.HCPhysPaePDPTR, pVM->pgm.s.HCPhysPaePML4));
791LogRel(("Debug: HCPhysInterPD=%VHp HCPhysInterPaePDPTR=%VHp HCPhysInterPaePML4=%VHp\n",
792 pVM->pgm.s.HCPhysInterPD, pVM->pgm.s.HCPhysInterPaePDPTR, pVM->pgm.s.HCPhysInterPaePML4));
793LogRel(("Debug: apInterPTs={%VHp,%VHp} apInterPaePTs={%VHp,%VHp} apInterPaePDs={%VHp,%VHp,%VHp,%VHp} pInterPaePDPTR64=%VHp\n",
794 MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[1]),
795 MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[1]),
796 MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[1]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[2]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[3]),
797 MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPTR64)));
798#endif
799
800 return VINF_SUCCESS;
801 }
802
803 LogFlow(("pgmR3InitPaging: returns %Vrc\n", rc));
804 return rc;
805}
806
807
808#ifdef VBOX_WITH_STATISTICS
809/**
810 * Init statistics
811 */
812static void pgmR3InitStats(PVM pVM)
813{
814 PPGM pPGM = &pVM->pgm.s;
815 STAM_REG(pVM, &pPGM->StatGCInvalidatePage, STAMTYPE_PROFILE, "/PGM/GC/InvalidatePage", STAMUNIT_TICKS_PER_CALL, "PGMGCInvalidatePage() profiling.");
816 STAM_REG(pVM, &pPGM->StatGCInvalidatePage4KBPages, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/4KBPages", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a 4KB page.");
817 STAM_REG(pVM, &pPGM->StatGCInvalidatePage4MBPages, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/4MBPages", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a 4MB page.");
818 STAM_REG(pVM, &pPGM->StatGCInvalidatePage4MBPagesSkip, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/4MBPagesSkip",STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() skipped a 4MB page.");
819 STAM_REG(pVM, &pPGM->StatGCInvalidatePagePDMappings, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/PDMappings", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a page directory containing mappings (no conflict).");
820 STAM_REG(pVM, &pPGM->StatGCInvalidatePagePDNAs, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/PDNAs", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a not accessed page directory.");
821 STAM_REG(pVM, &pPGM->StatGCInvalidatePagePDNPs, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/PDNPs", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a not present page directory.");
822 STAM_REG(pVM, &pPGM->StatGCInvalidatePagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/PDOutOfSync", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for an out of sync page directory.");
823 STAM_REG(pVM, &pPGM->StatGCInvalidatePageSkipped, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/Skipped", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
824 STAM_REG(pVM, &pPGM->StatGCSyncPT, STAMTYPE_PROFILE, "/PGM/GC/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGCSyncPT() body.");
825 STAM_REG(pVM, &pPGM->StatGCAccessedPage, STAMTYPE_COUNTER, "/PGM/GC/AccessedPage", STAMUNIT_OCCURENCES, "The number of pages marked not present for accessed bit emulation.");
826 STAM_REG(pVM, &pPGM->StatGCDirtyPage, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/Mark", STAMUNIT_OCCURENCES, "The number of pages marked read-only for dirty bit tracking.");
827 STAM_REG(pVM, &pPGM->StatGCDirtyPageBig, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/MarkBig", STAMUNIT_OCCURENCES, "The number of 4MB pages marked read-only for dirty bit tracking.");
828 STAM_REG(pVM, &pPGM->StatGCDirtyPageTrap, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/Trap", STAMUNIT_OCCURENCES, "The number of traps generated for dirty bit tracking.");
829 STAM_REG(pVM, &pPGM->StatGCDirtyPageSkipped, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/Skipped", STAMUNIT_OCCURENCES, "The number of pages already dirty or readonly.");
830 STAM_REG(pVM, &pPGM->StatGCDirtiedPage, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/SetDirty", STAMUNIT_OCCURENCES, "The number of pages marked dirty because of write accesses.");
831 STAM_REG(pVM, &pPGM->StatGCDirtyTrackRealPF, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/RealPF", STAMUNIT_OCCURENCES, "The number of real pages faults during dirty bit tracking.");
832 STAM_REG(pVM, &pPGM->StatGCPageAlreadyDirty, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/AlreadySet", STAMUNIT_OCCURENCES, "The number of pages already marked dirty because of write accesses.");
833 STAM_REG(pVM, &pPGM->StatGCDirtyBitTracking, STAMTYPE_PROFILE, "/PGM/GC/DirtyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMTrackDirtyBit() body.");
834 STAM_REG(pVM, &pPGM->StatGCSyncPTAlloc, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/Alloc", STAMUNIT_OCCURENCES, "The number of times PGMGCSyncPT() needed to allocate page tables.");
835 STAM_REG(pVM, &pPGM->StatGCSyncPTConflict, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/Conflicts", STAMUNIT_OCCURENCES, "The number of times PGMGCSyncPT() detected conflicts.");
836 STAM_REG(pVM, &pPGM->StatGCSyncPTFailed, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/Failed", STAMUNIT_OCCURENCES, "The number of times PGMGCSyncPT() failed.");
837
838 STAM_REG(pVM, &pPGM->StatGCTrap0e, STAMTYPE_PROFILE, "/PGM/GC/Trap0e", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGCTrap0eHandler() body.");
839 STAM_REG(pVM, &pPGM->StatCheckPageFault, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/CheckPageFault", STAMUNIT_TICKS_PER_CALL, "Profiling of checking for dirty/access emulation faults.");
840 STAM_REG(pVM, &pPGM->StatLazySyncPT, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of lazy page table syncing.");
841 STAM_REG(pVM, &pPGM->StatMapping, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/Mapping", STAMUNIT_TICKS_PER_CALL, "Profiling of checking virtual mappings.");
842 STAM_REG(pVM, &pPGM->StatOutOfSync, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/OutOfSync", STAMUNIT_TICKS_PER_CALL, "Profiling of out of sync page handling.");
843 STAM_REG(pVM, &pPGM->StatHandlers, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/Handlers", STAMUNIT_TICKS_PER_CALL, "Profiling of checking handlers.");
844 STAM_REG(pVM, &pPGM->StatEIPHandlers, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/EIPHandlers", STAMUNIT_TICKS_PER_CALL, "Profiling of checking eip handlers.");
845 STAM_REG(pVM, &pPGM->StatTrap0eCSAM, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/CSAM", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is CSAM.");
846 STAM_REG(pVM, &pPGM->StatTrap0eDirtyAndAccessedBits, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/DirtyAndAccessedBits", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation.");
847 STAM_REG(pVM, &pPGM->StatTrap0eGuestTrap, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/GuestTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is a guest trap.");
848 STAM_REG(pVM, &pPGM->StatTrap0eHndPhys, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/HandlerPhysical", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is a physical handler.");
849 STAM_REG(pVM, &pPGM->StatTrap0eHndVirt, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/HandlerVirtual",STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is a virtual handler.");
850 STAM_REG(pVM, &pPGM->StatTrap0eHndUnhandled, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/HandlerUnhandled", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page.");
851 STAM_REG(pVM, &pPGM->StatTrap0eMisc, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/Misc", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is not known.");
852 STAM_REG(pVM, &pPGM->StatTrap0eOutOfSync, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/OutOfSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an out-of-sync page.");
853 STAM_REG(pVM, &pPGM->StatTrap0eOutOfSyncHndPhys, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/OutOfSyncHndPhys", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page.");
854 STAM_REG(pVM, &pPGM->StatTrap0eOutOfSyncHndVirt, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/OutOfSyncHndVirt", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page.");
855 STAM_REG(pVM, &pPGM->StatTrap0eOutOfSyncObsHnd, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/OutOfSyncObsHnd", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an obsolete handler page.");
856 STAM_REG(pVM, &pPGM->StatTrap0eSyncPT, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT.");
857
858 STAM_REG(pVM, &pPGM->StatTrap0eMapHandler, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/Mapping", STAMUNIT_OCCURENCES, "Number of traps due to access handlers in mappings.");
859 STAM_REG(pVM, &pPGM->StatHandlersOutOfSync, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/OutOfSync", STAMUNIT_OCCURENCES, "Number of traps due to out-of-sync handled pages.");
860 STAM_REG(pVM, &pPGM->StatHandlersPhysical, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/Physical", STAMUNIT_OCCURENCES, "Number of traps due to physical access handlers.");
861 STAM_REG(pVM, &pPGM->StatHandlersVirtual, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/Virtual", STAMUNIT_OCCURENCES, "Number of traps due to virtual access handlers.");
862 STAM_REG(pVM, &pPGM->StatHandlersVirtualByPhys, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/VirtualByPhys", STAMUNIT_OCCURENCES, "Number of traps due to virtual access handlers by physical address.");
863 STAM_REG(pVM, &pPGM->StatHandlersVirtualUnmarked, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/VirtualUnmarked", STAMUNIT_OCCURENCES,"Number of traps due to virtual access handlers by virtual address (without proper physical flags).");
864 STAM_REG(pVM, &pPGM->StatHandlersUnhandled, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/Unhandled", STAMUNIT_OCCURENCES, "Number of traps due to access outside range of monitored page(s).");
865
866 STAM_REG(pVM, &pPGM->StatGCTrap0eConflicts, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Conflicts", STAMUNIT_OCCURENCES, "The number of times #PF was caused by an undetected conflict.");
867 STAM_REG(pVM, &pPGM->StatGCTrap0eUSNotPresentRead, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/NPRead", STAMUNIT_OCCURENCES, "Number of user mode not present read page faults.");
868 STAM_REG(pVM, &pPGM->StatGCTrap0eUSNotPresentWrite, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/NPWrite", STAMUNIT_OCCURENCES, "Number of user mode not present write page faults.");
869 STAM_REG(pVM, &pPGM->StatGCTrap0eUSWrite, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/Write", STAMUNIT_OCCURENCES, "Number of user mode write page faults.");
870 STAM_REG(pVM, &pPGM->StatGCTrap0eUSReserved, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/Reserved", STAMUNIT_OCCURENCES, "Number of user mode reserved bit page faults.");
871 STAM_REG(pVM, &pPGM->StatGCTrap0eUSRead, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/Read", STAMUNIT_OCCURENCES, "Number of user mode read page faults.");
872
873 STAM_REG(pVM, &pPGM->StatGCTrap0eSVNotPresentRead, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Supervisor/NPRead", STAMUNIT_OCCURENCES, "Number of supervisor mode not present read page faults.");
874 STAM_REG(pVM, &pPGM->StatGCTrap0eSVNotPresentWrite, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Supervisor/NPWrite", STAMUNIT_OCCURENCES, "Number of supervisor mode not present write page faults.");
875 STAM_REG(pVM, &pPGM->StatGCTrap0eSVWrite, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Supervisor/Write", STAMUNIT_OCCURENCES, "Number of supervisor mode write page faults.");
876 STAM_REG(pVM, &pPGM->StatGCTrap0eSVReserved, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Supervisor/Reserved", STAMUNIT_OCCURENCES, "Number of supervisor mode reserved bit page faults.");
877 STAM_REG(pVM, &pPGM->StatGCTrap0eUnhandled, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/GuestPF/Unhandled", STAMUNIT_OCCURENCES, "Number of guest real page faults.");
878 STAM_REG(pVM, &pPGM->StatGCTrap0eMap, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/GuestPF/Map", STAMUNIT_OCCURENCES, "Number of guest page faults due to map accesses.");
879
880
881 STAM_REG(pVM, &pPGM->StatGCGuestCR3WriteHandled, STAMTYPE_COUNTER, "/PGM/GC/CR3WriteInt", STAMUNIT_OCCURENCES, "The number of times the Guest CR3 change was successfully handled.");
882 STAM_REG(pVM, &pPGM->StatGCGuestCR3WriteUnhandled, STAMTYPE_COUNTER, "/PGM/GC/CR3WriteEmu", STAMUNIT_OCCURENCES, "The number of times the Guest CR3 change was passed back to the recompiler.");
883 STAM_REG(pVM, &pPGM->StatGCGuestCR3WriteConflict, STAMTYPE_COUNTER, "/PGM/GC/CR3WriteConflict", STAMUNIT_OCCURENCES, "The number of times the Guest CR3 monitoring detected a conflict.");
884
885 STAM_REG(pVM, &pPGM->StatGCPageOutOfSyncSupervisor, STAMTYPE_COUNTER, "/PGM/GC/OutOfSync/SuperVisor", STAMUNIT_OCCURENCES, "Number of traps due to pages out of sync.");
886 STAM_REG(pVM, &pPGM->StatGCPageOutOfSyncUser, STAMTYPE_COUNTER, "/PGM/GC/OutOfSync/User", STAMUNIT_OCCURENCES, "Number of traps due to pages out of sync.");
887
888 STAM_REG(pVM, &pPGM->StatGCGuestROMWriteHandled, STAMTYPE_COUNTER, "/PGM/GC/ROMWriteInt", STAMUNIT_OCCURENCES, "The number of times the Guest ROM change was successfully handled.");
889 STAM_REG(pVM, &pPGM->StatGCGuestROMWriteUnhandled, STAMTYPE_COUNTER, "/PGM/GC/ROMWriteEmu", STAMUNIT_OCCURENCES, "The number of times the Guest ROM change was passed back to the recompiler.");
890
891 STAM_REG(pVM, &pPGM->StatDynMapCacheHits, STAMTYPE_COUNTER, "/PGM/GC/DynMapCache/Hits" , STAMUNIT_OCCURENCES, "Number of dynamic page mapping cache hits.");
892 STAM_REG(pVM, &pPGM->StatDynMapCacheMisses, STAMTYPE_COUNTER, "/PGM/GC/DynMapCache/Misses" , STAMUNIT_OCCURENCES, "Number of dynamic page mapping cache misses.");
893
894 STAM_REG(pVM, &pPGM->StatHCDetectedConflicts, STAMTYPE_COUNTER, "/PGM/HC/DetectedConflicts", STAMUNIT_OCCURENCES, "The number of times PGMR3CheckMappingConflicts() detected a conflict.");
895 STAM_REG(pVM, &pPGM->StatHCGuestPDWrite, STAMTYPE_COUNTER, "/PGM/HC/PDWrite", STAMUNIT_OCCURENCES, "The total number of times pgmHCGuestPDWriteHandler() was called.");
896 STAM_REG(pVM, &pPGM->StatHCGuestPDWriteConflict, STAMTYPE_COUNTER, "/PGM/HC/PDWriteConflict", STAMUNIT_OCCURENCES, "The number of times pgmHCGuestPDWriteHandler() detected a conflict.");
897
898 STAM_REG(pVM, &pPGM->StatHCInvalidatePage, STAMTYPE_PROFILE, "/PGM/HC/InvalidatePage", STAMUNIT_TICKS_PER_CALL, "PGMHCInvalidatePage() profiling.");
899 STAM_REG(pVM, &pPGM->StatHCInvalidatePage4KBPages, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/4KBPages", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a 4KB page.");
900 STAM_REG(pVM, &pPGM->StatHCInvalidatePage4MBPages, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/4MBPages", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a 4MB page.");
901 STAM_REG(pVM, &pPGM->StatHCInvalidatePage4MBPagesSkip, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/4MBPagesSkip",STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() skipped a 4MB page.");
902 STAM_REG(pVM, &pPGM->StatHCInvalidatePagePDMappings, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/PDMappings", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a page directory containing mappings (no conflict).");
903 STAM_REG(pVM, &pPGM->StatHCInvalidatePagePDNAs, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/PDNAs", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a not accessed page directory.");
904 STAM_REG(pVM, &pPGM->StatHCInvalidatePagePDNPs, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/PDNPs", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a not present page directory.");
905 STAM_REG(pVM, &pPGM->StatHCInvalidatePagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/PDOutOfSync", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for an out of sync page directory.");
906 STAM_REG(pVM, &pPGM->StatHCInvalidatePageSkipped, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/Skipped", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
907 STAM_REG(pVM, &pPGM->StatHCResolveConflict, STAMTYPE_PROFILE, "/PGM/HC/ResolveConflict", STAMUNIT_TICKS_PER_CALL, "pgmR3SyncPTResolveConflict() profiling (includes the entire relocation).");
908 STAM_REG(pVM, &pPGM->StatHCPrefetch, STAMTYPE_PROFILE, "/PGM/HC/Prefetch", STAMUNIT_TICKS_PER_CALL, "PGMR3PrefetchPage profiling.");
909
910 STAM_REG(pVM, &pPGM->StatHCSyncPT, STAMTYPE_PROFILE, "/PGM/HC/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMR3SyncPT() body.");
911 STAM_REG(pVM, &pPGM->StatHCAccessedPage, STAMTYPE_COUNTER, "/PGM/HC/AccessedPage", STAMUNIT_OCCURENCES, "The number of pages marked not present for accessed bit emulation.");
912 STAM_REG(pVM, &pPGM->StatHCDirtyPage, STAMTYPE_COUNTER, "/PGM/HC/DirtyPage/Mark", STAMUNIT_OCCURENCES, "The number of pages marked read-only for dirty bit tracking.");
913 STAM_REG(pVM, &pPGM->StatHCDirtyPageBig, STAMTYPE_COUNTER, "/PGM/HC/DirtyPage/MarkBig", STAMUNIT_OCCURENCES, "The number of 4MB pages marked read-only for dirty bit tracking.");
914 STAM_REG(pVM, &pPGM->StatHCDirtyPageTrap, STAMTYPE_COUNTER, "/PGM/HC/DirtyPage/Trap", STAMUNIT_OCCURENCES, "The number of traps generated for dirty bit tracking.");
915 STAM_REG(pVM, &pPGM->StatHCDirtyPageSkipped, STAMTYPE_COUNTER, "/PGM/HC/DirtyPage/Skipped", STAMUNIT_OCCURENCES, "The number of pages already dirty or readonly.");
916 STAM_REG(pVM, &pPGM->StatHCDirtyBitTracking, STAMTYPE_PROFILE, "/PGM/HC/DirtyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMTrackDirtyBit() body.");
917
918 STAM_REG(pVM, &pPGM->StatGCSyncPagePDNAs, STAMTYPE_COUNTER, "/PGM/GC/SyncPagePDNAs", STAMUNIT_OCCURENCES, "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
919 STAM_REG(pVM, &pPGM->StatGCSyncPagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/GC/SyncPagePDOutOfSync", STAMUNIT_OCCURENCES, "The number of time we've encountered an out-of-sync PD in SyncPage.");
920 STAM_REG(pVM, &pPGM->StatHCSyncPagePDNAs, STAMTYPE_COUNTER, "/PGM/HC/SyncPagePDNAs", STAMUNIT_OCCURENCES, "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
921 STAM_REG(pVM, &pPGM->StatHCSyncPagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/HC/SyncPagePDOutOfSync", STAMUNIT_OCCURENCES, "The number of time we've encountered an out-of-sync PD in SyncPage.");
922
923 STAM_REG(pVM, &pPGM->StatFlushTLB, STAMTYPE_PROFILE, "/PGM/FlushTLB", STAMUNIT_OCCURENCES, "Profiling of the PGMFlushTLB() body.");
924 STAM_REG(pVM, &pPGM->StatFlushTLBNewCR3, STAMTYPE_COUNTER, "/PGM/FlushTLB/NewCR3", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
925 STAM_REG(pVM, &pPGM->StatFlushTLBNewCR3Global, STAMTYPE_COUNTER, "/PGM/FlushTLB/NewCR3Global", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
926 STAM_REG(pVM, &pPGM->StatFlushTLBSameCR3, STAMTYPE_COUNTER, "/PGM/FlushTLB/SameCR3", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
927 STAM_REG(pVM, &pPGM->StatFlushTLBSameCR3Global, STAMTYPE_COUNTER, "/PGM/FlushTLB/SameCR3Global", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
928
929 STAM_REG(pVM, &pPGM->StatGCSyncCR3, STAMTYPE_PROFILE, "/PGM/GC/SyncCR3", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() body.");
930 STAM_REG(pVM, &pPGM->StatGCSyncCR3Handlers, STAMTYPE_PROFILE, "/PGM/GC/SyncCR3/Handlers", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() update handler section.");
931 STAM_REG(pVM, &pPGM->StatGCSyncCR3HandlerVirtualUpdate, STAMTYPE_PROFILE, "/PGM/GC/SyncCR3/Handlers/VirtualUpdate",STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler updates.");
932 STAM_REG(pVM, &pPGM->StatGCSyncCR3HandlerVirtualReset, STAMTYPE_PROFILE, "/PGM/GC/SyncCR3/Handlers/VirtualReset", STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler resets.");
933 STAM_REG(pVM, &pPGM->StatGCSyncCR3Global, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/Global", STAMUNIT_OCCURENCES, "The number of global CR3 syncs.");
934 STAM_REG(pVM, &pPGM->StatGCSyncCR3NotGlobal, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/NotGlobal", STAMUNIT_OCCURENCES, "The number of non-global CR3 syncs.");
935 STAM_REG(pVM, &pPGM->StatGCSyncCR3DstCacheHit, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstChacheHit", STAMUNIT_OCCURENCES, "The number of times we got some kind of a cache hit.");
936 STAM_REG(pVM, &pPGM->StatGCSyncCR3DstFreed, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstFreed", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry.");
937 STAM_REG(pVM, &pPGM->StatGCSyncCR3DstFreedSrcNP, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstFreedSrcNP", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry for which the source entry was not present.");
938 STAM_REG(pVM, &pPGM->StatGCSyncCR3DstNotPresent, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstNotPresent", STAMUNIT_OCCURENCES, "The number of times we've encountered a not present shadow entry for a present guest entry.");
939 STAM_REG(pVM, &pPGM->StatGCSyncCR3DstSkippedGlobalPD, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstSkippedGlobalPD", STAMUNIT_OCCURENCES, "The number of times a global page directory wasn't flushed.");
940 STAM_REG(pVM, &pPGM->StatGCSyncCR3DstSkippedGlobalPT, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstSkippedGlobalPT", STAMUNIT_OCCURENCES, "The number of times a page table with only global entries wasn't flushed.");
941
942 STAM_REG(pVM, &pPGM->StatHCSyncCR3, STAMTYPE_PROFILE, "/PGM/HC/SyncCR3", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() body.");
943 STAM_REG(pVM, &pPGM->StatHCSyncCR3Handlers, STAMTYPE_PROFILE, "/PGM/HC/SyncCR3/Handlers", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() update handler section.");
944 STAM_REG(pVM, &pPGM->StatHCSyncCR3HandlerVirtualUpdate, STAMTYPE_PROFILE, "/PGM/HC/SyncCR3/Handlers/VirtualUpdate",STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler updates.");
945 STAM_REG(pVM, &pPGM->StatHCSyncCR3HandlerVirtualReset, STAMTYPE_PROFILE, "/PGM/HC/SyncCR3/Handlers/VirtualReset", STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler resets.");
946 STAM_REG(pVM, &pPGM->StatHCSyncCR3Global, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/Global", STAMUNIT_OCCURENCES, "The number of global CR3 syncs.");
947 STAM_REG(pVM, &pPGM->StatHCSyncCR3NotGlobal, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/NotGlobal", STAMUNIT_OCCURENCES, "The number of non-global CR3 syncs.");
948 STAM_REG(pVM, &pPGM->StatHCSyncCR3DstCacheHit, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstChacheHit", STAMUNIT_OCCURENCES, "The number of times we got some kind of a cache hit.");
949 STAM_REG(pVM, &pPGM->StatHCSyncCR3DstFreed, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstFreed", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry.");
950 STAM_REG(pVM, &pPGM->StatHCSyncCR3DstFreedSrcNP, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstFreedSrcNP", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry for which the source entry was not present.");
951 STAM_REG(pVM, &pPGM->StatHCSyncCR3DstNotPresent, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstNotPresent", STAMUNIT_OCCURENCES, "The number of times we've encountered a not present shadow entry for a present guest entry.");
952 STAM_REG(pVM, &pPGM->StatHCSyncCR3DstSkippedGlobalPD, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstSkippedGlobalPD", STAMUNIT_OCCURENCES, "The number of times a global page directory wasn't flushed.");
953 STAM_REG(pVM, &pPGM->StatHCSyncCR3DstSkippedGlobalPT, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstSkippedGlobalPT", STAMUNIT_OCCURENCES, "The number of times a page table with only global entries wasn't flushed.");
954
955 STAM_REG(pVM, &pPGM->StatVirtHandleSearchByPhysGC, STAMTYPE_PROFILE, "/PGM/VirtHandler/SearchByPhys/GC", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmHandlerVirtualFindByPhysAddr in GC.");
956 STAM_REG(pVM, &pPGM->StatVirtHandleSearchByPhysHC, STAMTYPE_PROFILE, "/PGM/VirtHandler/SearchByPhys/HC", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmHandlerVirtualFindByPhysAddr in HC.");
957 STAM_REG(pVM, &pPGM->StatHandlePhysicalReset, STAMTYPE_COUNTER, "/PGM/HC/HandlerPhysicalReset", STAMUNIT_OCCURENCES, "The number of times PGMR3HandlerPhysicalReset is called.");
958
959 STAM_REG(pVM, &pPGM->StatHCGstModifyPage, STAMTYPE_PROFILE, "/PGM/HC/GstModifyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGstModifyPage() body.");
960 STAM_REG(pVM, &pPGM->StatGCGstModifyPage, STAMTYPE_PROFILE, "/PGM/GC/GstModifyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGstModifyPage() body.");
961
962 STAM_REG(pVM, &pPGM->StatSynPT4kGC, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/4k", STAMUNIT_OCCURENCES, "Nr of 4k PT syncs");
963 STAM_REG(pVM, &pPGM->StatSynPT4kHC, STAMTYPE_COUNTER, "/PGM/HC/SyncPT/4k", STAMUNIT_OCCURENCES, "Nr of 4k PT syncs");
964 STAM_REG(pVM, &pPGM->StatSynPT4MGC, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/4M", STAMUNIT_OCCURENCES, "Nr of 4M PT syncs");
965 STAM_REG(pVM, &pPGM->StatSynPT4MHC, STAMTYPE_COUNTER, "/PGM/HC/SyncPT/4M", STAMUNIT_OCCURENCES, "Nr of 4M PT syncs");
966
967 STAM_REG(pVM, &pPGM->StatDynRamTotal, STAMTYPE_COUNTER, "/PGM/RAM/TotalAlloc", STAMUNIT_MEGABYTES, "Allocated mbs of guest ram.");
968 STAM_REG(pVM, &pPGM->StatDynRamGrow, STAMTYPE_COUNTER, "/PGM/RAM/Grow", STAMUNIT_OCCURENCES, "Nr of pgmr3PhysGrowRange calls.");
969
970#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
971 STAM_REG(pVM, &pPGM->StatTrackVirgin, STAMTYPE_COUNTER, "/PGM/Track/Virgin", STAMUNIT_OCCURENCES, "The number of first time shadowings");
972 STAM_REG(pVM, &pPGM->StatTrackAliased, STAMTYPE_COUNTER, "/PGM/Track/Aliased", STAMUNIT_OCCURENCES, "The number of times switching to cRef2, i.e. the page is being shadowed by two PTs.");
973 STAM_REG(pVM, &pPGM->StatTrackAliasedMany, STAMTYPE_COUNTER, "/PGM/Track/AliasedMany", STAMUNIT_OCCURENCES, "The number of times we're tracking using cRef2.");
974 STAM_REG(pVM, &pPGM->StatTrackAliasedLots, STAMTYPE_COUNTER, "/PGM/Track/AliasedLots", STAMUNIT_OCCURENCES, "The number of times we're hitting pages which has overflowed cRef2");
975 STAM_REG(pVM, &pPGM->StatTrackOverflows, STAMTYPE_COUNTER, "/PGM/Track/Overflows", STAMUNIT_OCCURENCES, "The number of times the extent list grows to long.");
976 STAM_REG(pVM, &pPGM->StatTrackDeref, STAMTYPE_PROFILE, "/PGM/Track/Deref", STAMUNIT_OCCURENCES, "Profiling of SyncPageWorkerTrackDeref (expensive).");
977#endif
978
979 for (unsigned i = 0; i < PAGE_ENTRIES; i++)
980 {
981 /** @todo r=bird: We need a STAMR3RegisterF()! */
982 char szName[32];
983
984 RTStrPrintf(szName, sizeof(szName), "/PGM/GC/PD/Trap0e/%04X", i);
985 int rc = STAMR3Register(pVM, &pPGM->StatGCTrap0ePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "The number of traps in page directory n.");
986 AssertRC(rc);
987
988 RTStrPrintf(szName, sizeof(szName), "/PGM/GC/PD/SyncPt/%04X", i);
989 rc = STAMR3Register(pVM, &pPGM->StatGCSyncPtPD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "The number of syncs per PD n.");
990 AssertRC(rc);
991
992 RTStrPrintf(szName, sizeof(szName), "/PGM/GC/PD/SyncPage/%04X", i);
993 rc = STAMR3Register(pVM, &pPGM->StatGCSyncPagePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "The number of out of sync pages per page directory n.");
994 AssertRC(rc);
995 }
996}
997#endif /* VBOX_WITH_STATISTICS */
998
999/**
1000 * Init the PGM bits that rely on VMMR0 and MM to be fully initialized.
1001 *
1002 * The dynamic mapping area will also be allocated and initialized at this
1003 * time. We could allocate it during PGMR3Init of course, but the mapping
1004 * wouldn't be allocated at that time preventing us from setting up the
1005 * page table entries with the dummy page.
1006 *
1007 * @returns VBox status code.
1008 * @param pVM VM handle.
1009 */
1010PGMR3DECL(int) PGMR3InitDynMap(PVM pVM)
1011{
1012 /*
1013 * Reserve space for mapping the paging pages into guest context.
1014 */
1015 int rc = MMR3HyperReserve(pVM, PAGE_SIZE * (2 + ELEMENTS(pVM->pgm.s.apHCPaePDs) + 1 + 2 + 2), "Paging", &pVM->pgm.s.pGC32BitPD);
1016 AssertRCReturn(rc, rc);
1017 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
1018
1019 /*
1020 * Reserve space for the dynamic mappings.
1021 */
1022 /** @todo r=bird: Need to verify that the checks for crossing PTs are correct here. They seems to be assuming 4MB PTs.. */
1023 rc = MMR3HyperReserve(pVM, MM_HYPER_DYNAMIC_SIZE, "Dynamic mapping", &pVM->pgm.s.pbDynPageMapBaseGC);
1024 if ( VBOX_SUCCESS(rc)
1025 && (pVM->pgm.s.pbDynPageMapBaseGC >> PGDIR_SHIFT) != ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> PGDIR_SHIFT))
1026 rc = MMR3HyperReserve(pVM, MM_HYPER_DYNAMIC_SIZE, "Dynamic mapping not crossing", &pVM->pgm.s.pbDynPageMapBaseGC);
1027 if (VBOX_SUCCESS(rc))
1028 {
1029 AssertRelease((pVM->pgm.s.pbDynPageMapBaseGC >> PGDIR_SHIFT) == ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> PGDIR_SHIFT));
1030 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
1031 }
1032 return rc;
1033}
1034
1035
1036/**
1037 * Ring-3 init finalizing.
1038 *
1039 * @returns VBox status code.
1040 * @param pVM The VM handle.
1041 */
1042PGMR3DECL(int) PGMR3InitFinalize(PVM pVM)
1043{
1044 /*
1045 * Map the paging pages into the guest context.
1046 */
1047 RTGCPTR GCPtr = pVM->pgm.s.pGC32BitPD;
1048 AssertReleaseReturn(GCPtr, VERR_INTERNAL_ERROR);
1049
1050 int rc = PGMMap(pVM, GCPtr, pVM->pgm.s.HCPhys32BitPD, PAGE_SIZE, 0);
1051 AssertRCReturn(rc, rc);
1052 pVM->pgm.s.pGC32BitPD = GCPtr;
1053 GCPtr += PAGE_SIZE;
1054 GCPtr += PAGE_SIZE; /* reserved page */
1055
1056 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.apHCPaePDs); i++)
1057 {
1058 rc = PGMMap(pVM, GCPtr, pVM->pgm.s.aHCPhysPaePDs[i], PAGE_SIZE, 0);
1059 AssertRCReturn(rc, rc);
1060 pVM->pgm.s.apGCPaePDs[i] = GCPtr;
1061 GCPtr += PAGE_SIZE;
1062 }
1063 /* A bit of paranoia is justified. */
1064 AssertRelease((RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[0] + PAGE_SIZE == (RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[1]);
1065 AssertRelease((RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[1] + PAGE_SIZE == (RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[2]);
1066 AssertRelease((RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[2] + PAGE_SIZE == (RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[3]);
1067 GCPtr += PAGE_SIZE; /* reserved page */
1068
1069 rc = PGMMap(pVM, GCPtr, pVM->pgm.s.HCPhysPaePDPTR, PAGE_SIZE, 0);
1070 AssertRCReturn(rc, rc);
1071 pVM->pgm.s.pGCPaePDPTR = GCPtr;
1072 GCPtr += PAGE_SIZE;
1073 GCPtr += PAGE_SIZE; /* reserved page */
1074
1075 rc = PGMMap(pVM, GCPtr, pVM->pgm.s.HCPhysPaePML4, PAGE_SIZE, 0);
1076 AssertRCReturn(rc, rc);
1077 pVM->pgm.s.pGCPaePML4 = GCPtr;
1078 GCPtr += PAGE_SIZE;
1079 GCPtr += PAGE_SIZE; /* reserved page */
1080
1081
1082 /*
1083 * Reserve space for the dynamic mappings.
1084 * Initialize the dynamic mapping pages with dummy pages to simply the cache.
1085 */
1086 /* get the pointer to the page table entries. */
1087 PPGMMAPPING pMapping = pgmGetMapping(pVM, pVM->pgm.s.pbDynPageMapBaseGC);
1088 AssertRelease(pMapping);
1089 const uintptr_t off = pVM->pgm.s.pbDynPageMapBaseGC - pMapping->GCPtr;
1090 const unsigned iPT = off >> X86_PD_SHIFT;
1091 const unsigned iPG = (off >> X86_PT_SHIFT) & X86_PT_MASK;
1092 pVM->pgm.s.paDynPageMap32BitPTEsGC = pMapping->aPTs[iPT].pPTGC + iPG * sizeof(pMapping->aPTs[0].pPTR3->a[0]);
1093 pVM->pgm.s.paDynPageMapPaePTEsGC = pMapping->aPTs[iPT].paPaePTsGC + iPG * sizeof(pMapping->aPTs[0].paPaePTsR3->a[0]);
1094
1095 /* init cache */
1096 RTHCPHYS HCPhysDummy = MMR3PageDummyHCPhys(pVM);
1097 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache); i++)
1098 pVM->pgm.s.aHCPhysDynPageMapCache[i] = HCPhysDummy;
1099
1100 for (unsigned i = 0; i < MM_HYPER_DYNAMIC_SIZE; i += PAGE_SIZE)
1101 {
1102 rc = PGMMap(pVM, pVM->pgm.s.pbDynPageMapBaseGC + i, HCPhysDummy, PAGE_SIZE, 0);
1103 AssertRCReturn(rc, rc);
1104 }
1105
1106 return rc;
1107}
1108
1109
1110/**
1111 * Applies relocations to data and code managed by this
1112 * component. This function will be called at init and
1113 * whenever the VMM need to relocate it self inside the GC.
1114 *
1115 * @param pVM The VM.
1116 * @param offDelta Relocation delta relative to old location.
1117 */
1118PGMR3DECL(void) PGMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1119{
1120 LogFlow(("PGMR3Relocate\n"));
1121
1122 /*
1123 * Paging stuff.
1124 */
1125 pVM->pgm.s.GCPtrCR3Mapping += offDelta;
1126 /** @todo move this into shadow and guest specific relocation functions. */
1127 AssertMsg(pVM->pgm.s.pGC32BitPD, ("Init order, no relocation before paging is initialized!\n"));
1128 pVM->pgm.s.pGC32BitPD += offDelta;
1129 pVM->pgm.s.pGuestPDGC += offDelta;
1130 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.apGCPaePDs); i++)
1131 pVM->pgm.s.apGCPaePDs[i] += offDelta;
1132 pVM->pgm.s.pGCPaePDPTR += offDelta;
1133 pVM->pgm.s.pGCPaePML4 += offDelta;
1134
1135 pgmR3ModeDataInit(pVM, true /* resolve GC/R0 symbols */);
1136 pgmR3ModeDataSwitch(pVM, pVM->pgm.s.enmShadowMode, pVM->pgm.s.enmGuestMode);
1137
1138 PGM_SHW_PFN(Relocate, pVM)(pVM, offDelta);
1139 PGM_GST_PFN(Relocate, pVM)(pVM, offDelta);
1140 PGM_BTH_PFN(Relocate, pVM)(pVM, offDelta);
1141
1142 /*
1143 * Trees.
1144 */
1145 pVM->pgm.s.pTreesGC = MMHyperHC2GC(pVM, pVM->pgm.s.pTreesHC);
1146
1147 /*
1148 * Ram ranges.
1149 */
1150 if (pVM->pgm.s.pRamRangesHC)
1151 {
1152 pVM->pgm.s.pRamRangesGC = MMHyperHC2GC(pVM, pVM->pgm.s.pRamRangesHC);
1153 for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesHC; pCur->pNextHC; pCur = pCur->pNextHC)
1154 {
1155 pCur->pNextGC = MMHyperHC2GC(pVM, pCur->pNextHC);
1156 if (pCur->pavHCChunkGC)
1157 pCur->pavHCChunkGC = MMHyperHC2GC(pVM, pCur->pavHCChunkHC);
1158 }
1159 }
1160
1161 /*
1162 * Update the two page directories with all page table mappings.
1163 * (One or more of them have changed, that's why we're here.)
1164 */
1165 pVM->pgm.s.pMappingsGC = MMHyperHC2GC(pVM, pVM->pgm.s.pMappingsR3);
1166 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur->pNextR3; pCur = pCur->pNextR3)
1167 pCur->pNextGC = MMHyperHC2GC(pVM, pCur->pNextR3);
1168
1169 /* Relocate GC addresses of Page Tables. */
1170 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1171 {
1172 for (RTHCUINT i = 0; i < pCur->cPTs; i++)
1173 {
1174 pCur->aPTs[i].pPTGC = MMHyperR3ToGC(pVM, pCur->aPTs[i].pPTR3);
1175 pCur->aPTs[i].paPaePTsGC = MMHyperR3ToGC(pVM, pCur->aPTs[i].paPaePTsR3);
1176 }
1177 }
1178
1179 /*
1180 * Dynamic page mapping area.
1181 */
1182 pVM->pgm.s.paDynPageMap32BitPTEsGC += offDelta;
1183 pVM->pgm.s.paDynPageMapPaePTEsGC += offDelta;
1184 pVM->pgm.s.pbDynPageMapBaseGC += offDelta;
1185
1186 /*
1187 * Physical and virtual handlers.
1188 */
1189 RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysHandlers, true, pgmR3RelocatePhysHandler, &offDelta);
1190 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesHC->VirtHandlers, true, pgmR3RelocateVirtHandler, &offDelta);
1191
1192 /*
1193 * The page pool.
1194 */
1195 pgmR3PoolRelocate(pVM);
1196}
1197
1198
1199/**
1200 * Callback function for relocating a physical access handler.
1201 *
1202 * @returns 0 (continue enum)
1203 * @param pNode Pointer to a PGMPHYSHANDLER node.
1204 * @param pvUser Pointer to the offDelta. This is a pointer to the delta since we're
1205 * not certain the delta will fit in a void pointer for all possible configs.
1206 */
1207static DECLCALLBACK(int) pgmR3RelocatePhysHandler(PAVLROGCPHYSNODECORE pNode, void *pvUser)
1208{
1209 PPGMPHYSHANDLER pHandler = (PPGMPHYSHANDLER)pNode;
1210 RTGCINTPTR offDelta = *(PRTGCINTPTR)pvUser;
1211 if (pHandler->pfnHandlerGC)
1212 pHandler->pfnHandlerGC += offDelta;
1213 if ((RTGCUINTPTR)pHandler->pvUserGC >= 0x10000)
1214 pHandler->pvUserGC += offDelta;
1215 return 0;
1216}
1217
1218
1219/**
1220 * Callback function for relocating a virtual access handler.
1221 *
1222 * @returns 0 (continue enum)
1223 * @param pNode Pointer to a PGMVIRTHANDLER node.
1224 * @param pvUser Pointer to the offDelta. This is a pointer to the delta since we're
1225 * not certain the delta will fit in a void pointer for all possible configs.
1226 */
1227static DECLCALLBACK(int) pgmR3RelocateVirtHandler(PAVLROGCPTRNODECORE pNode, void *pvUser)
1228{
1229 PPGMVIRTHANDLER pHandler = (PPGMVIRTHANDLER)pNode;
1230 RTGCINTPTR offDelta = *(PRTGCINTPTR)pvUser;
1231 Assert(pHandler->pfnHandlerGC);
1232 pHandler->pfnHandlerGC += offDelta;
1233 return 0;
1234}
1235
1236
1237/**
1238 * The VM is being reset.
1239 *
1240 * For the PGM component this means that any PD write monitors
1241 * needs to be removed.
1242 *
1243 * @param pVM VM handle.
1244 */
1245PGMR3DECL(void) PGMR3Reset(PVM pVM)
1246{
1247 LogFlow(("PGMR3Reset:\n"));
1248 VM_ASSERT_EMT(pVM);
1249
1250 /*
1251 * Unfix any fixed mappings and disable CR3 monitoring.
1252 */
1253 pVM->pgm.s.fMappingsFixed = false;
1254 pVM->pgm.s.GCPtrMappingFixed = 0;
1255 pVM->pgm.s.cbMappingFixed = 0;
1256
1257 int rc = PGM_GST_PFN(UnmonitorCR3, pVM)(pVM);
1258 AssertRC(rc);
1259#ifdef DEBUG
1260 PGMR3DumpMappings(pVM);
1261#endif
1262
1263 /*
1264 * Reset the shadow page pool.
1265 */
1266 pgmR3PoolReset(pVM);
1267
1268 /*
1269 * Re-init other members.
1270 */
1271 pVM->pgm.s.fA20Enabled = true;
1272
1273 /*
1274 * Clear the FFs PGM owns.
1275 */
1276 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1277 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1278
1279 /*
1280 * Zero memory.
1281 */
1282 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesHC; pRam; pRam = pRam->pNextHC)
1283 {
1284 unsigned iPage = pRam->cb >> PAGE_SHIFT;
1285 while (iPage-- > 0)
1286 {
1287 if (pRam->aHCPhys[iPage] & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2))
1288 {
1289 Log4(("PGMR3Reset: not clearing phys page %RGp due to flags %RHp\n", pRam->GCPhys + (iPage << PAGE_SHIFT), pRam->aHCPhys[iPage] & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO)));
1290 continue;
1291 }
1292 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1293 {
1294 unsigned iChunk = iPage >> (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
1295 if (pRam->pavHCChunkHC[iChunk])
1296 ASMMemZero32((char *)pRam->pavHCChunkHC[iChunk] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK), PAGE_SIZE);
1297 }
1298 else
1299 ASMMemZero32((char *)pRam->pvHC + (iPage << PAGE_SHIFT), PAGE_SIZE);
1300 }
1301 }
1302
1303 /*
1304 * Switch mode back to real mode.
1305 */
1306 rc = pgmR3ChangeMode(pVM, PGMMODE_REAL);
1307 AssertReleaseRC(rc);
1308 STAM_REL_COUNTER_RESET(&pVM->pgm.s.cGuestModeChanges);
1309}
1310
1311
1312/**
1313 * Terminates the PGM.
1314 *
1315 * @returns VBox status code.
1316 * @param pVM Pointer to VM structure.
1317 */
1318PGMR3DECL(int) PGMR3Term(PVM pVM)
1319{
1320 return PDMR3CritSectDelete(&pVM->pgm.s.CritSect);
1321}
1322
1323
1324#ifdef VBOX_STRICT
1325/**
1326 * VM state change callback for clearing fNoMorePhysWrites after
1327 * a snapshot has been created.
1328 */
1329static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser)
1330{
1331 if (enmState == VMSTATE_RUNNING)
1332 pVM->pgm.s.fNoMorePhysWrites = false;
1333}
1334#endif
1335
1336
1337/**
1338 * Execute state save operation.
1339 *
1340 * @returns VBox status code.
1341 * @param pVM VM Handle.
1342 * @param pSSM SSM operation handle.
1343 */
1344static DECLCALLBACK(int) pgmR3Save(PVM pVM, PSSMHANDLE pSSM)
1345{
1346 PPGM pPGM = &pVM->pgm.s;
1347
1348 /* No more writes to physical memory after this point! */
1349 pVM->pgm.s.fNoMorePhysWrites = true;
1350
1351 /*
1352 * Save basic data (required / unaffected by relocation).
1353 */
1354#if 1
1355 SSMR3PutBool(pSSM, pPGM->fMappingsFixed);
1356#else
1357 SSMR3PutUInt(pSSM, pPGM->fMappingsFixed);
1358#endif
1359 SSMR3PutGCPtr(pSSM, pPGM->GCPtrMappingFixed);
1360 SSMR3PutU32(pSSM, pPGM->cbMappingFixed);
1361 SSMR3PutUInt(pSSM, pPGM->cbRamSize);
1362 SSMR3PutGCPhys(pSSM, pPGM->GCPhysA20Mask);
1363 SSMR3PutUInt(pSSM, pPGM->fA20Enabled);
1364 SSMR3PutUInt(pSSM, pPGM->fSyncFlags);
1365 SSMR3PutUInt(pSSM, pPGM->enmGuestMode);
1366 SSMR3PutU32(pSSM, ~0); /* Separator. */
1367
1368 /*
1369 * The guest mappings.
1370 */
1371 uint32_t i = 0;
1372 for (PPGMMAPPING pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3, i++)
1373 {
1374 SSMR3PutU32(pSSM, i);
1375 SSMR3PutStrZ(pSSM, pMapping->pszDesc); /* This is the best unique id we have... */
1376 SSMR3PutGCPtr(pSSM, pMapping->GCPtr);
1377 SSMR3PutGCUIntPtr(pSSM, pMapping->cPTs);
1378 /* flags are done by the mapping owners! */
1379 }
1380 SSMR3PutU32(pSSM, ~0); /* terminator. */
1381
1382 /*
1383 * Ram range flags and bits.
1384 */
1385 i = 0;
1386 for (PPGMRAMRANGE pRam = pPGM->pRamRangesHC; pRam; pRam = pRam->pNextHC, i++)
1387 {
1388 /** @todo MMIO ranges may move (PCI reconfig), we currently assume they don't. */
1389
1390 SSMR3PutU32(pSSM, i);
1391 SSMR3PutGCPhys(pSSM, pRam->GCPhys);
1392 SSMR3PutGCPhys(pSSM, pRam->GCPhysLast);
1393 SSMR3PutGCPhys(pSSM, pRam->cb);
1394 SSMR3PutU8(pSSM, !!pRam->pvHC); /* boolean indicating memory or not. */
1395
1396 /* Flags. */
1397 const unsigned cPages = pRam->cb >> PAGE_SHIFT;
1398 for (unsigned iPage = 0; iPage < cPages; iPage++)
1399 SSMR3PutU16(pSSM, (uint16_t)(pRam->aHCPhys[iPage] & ~X86_PTE_PAE_PG_MASK));
1400
1401 /* any memory associated with the range. */
1402 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1403 {
1404 for (unsigned iChunk = 0; iChunk < (pRam->cb >> PGM_DYNAMIC_CHUNK_SHIFT); iChunk++)
1405 {
1406 if (pRam->pavHCChunkHC[iChunk])
1407 {
1408 SSMR3PutU8(pSSM, 1); /* chunk present */
1409 SSMR3PutMem(pSSM, pRam->pavHCChunkHC[iChunk], PGM_DYNAMIC_CHUNK_SIZE);
1410 }
1411 else
1412 SSMR3PutU8(pSSM, 0); /* no chunk present */
1413 }
1414 }
1415 else if (pRam->pvHC)
1416 {
1417 int rc = SSMR3PutMem(pSSM, pRam->pvHC, pRam->cb);
1418 if (VBOX_FAILURE(rc))
1419 {
1420 Log(("pgmR3Save: SSMR3PutMem(, %p, %#x) -> %Vrc\n", pRam->pvHC, pRam->cb, rc));
1421 return rc;
1422 }
1423 }
1424 }
1425 return SSMR3PutU32(pSSM, ~0); /* terminator. */
1426}
1427
1428
1429/**
1430 * Execute state load operation.
1431 *
1432 * @returns VBox status code.
1433 * @param pVM VM Handle.
1434 * @param pSSM SSM operation handle.
1435 * @param u32Version Data layout version.
1436 */
1437static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
1438{
1439 /*
1440 * Validate version.
1441 */
1442 if (u32Version != PGM_SAVED_STATE_VERSION)
1443 {
1444 Log(("pgmR3Load: Invalid version u32Version=%d (current %d)!\n", u32Version, PGM_SAVED_STATE_VERSION));
1445 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1446 }
1447
1448 /*
1449 * Call the reset function to make sure all the memory is cleared.
1450 */
1451 PGMR3Reset(pVM);
1452
1453 /*
1454 * Load basic data (required / unaffected by relocation).
1455 */
1456 PPGM pPGM = &pVM->pgm.s;
1457#if 1
1458 SSMR3GetBool(pSSM, &pPGM->fMappingsFixed);
1459#else
1460 uint32_t u;
1461 SSMR3GetU32(pSSM, &u);
1462 pPGM->fMappingsFixed = u;
1463#endif
1464 SSMR3GetGCPtr(pSSM, &pPGM->GCPtrMappingFixed);
1465 SSMR3GetU32(pSSM, &pPGM->cbMappingFixed);
1466
1467 RTUINT cbRamSize;
1468 int rc = SSMR3GetU32(pSSM, &cbRamSize);
1469 if (VBOX_FAILURE(rc))
1470 return rc;
1471 if (cbRamSize != pPGM->cbRamSize)
1472 return VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH;
1473 SSMR3GetGCPhys(pSSM, &pPGM->GCPhysA20Mask);
1474 SSMR3GetUInt(pSSM, &pPGM->fA20Enabled);
1475 SSMR3GetUInt(pSSM, &pPGM->fSyncFlags);
1476 RTUINT uGuestMode;
1477 SSMR3GetUInt(pSSM, &uGuestMode);
1478 pPGM->enmGuestMode = (PGMMODE)uGuestMode;
1479
1480 /* check separator. */
1481 uint32_t u32Sep;
1482 SSMR3GetU32(pSSM, &u32Sep);
1483 if (VBOX_FAILURE(rc))
1484 return rc;
1485 if (u32Sep != (uint32_t)~0)
1486 {
1487 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
1488 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1489 }
1490
1491 /*
1492 * The guest mappings.
1493 */
1494 uint32_t i = 0;
1495 for (;; i++)
1496 {
1497 /* Check the seqence number / separator. */
1498 rc = SSMR3GetU32(pSSM, &u32Sep);
1499 if (VBOX_FAILURE(rc))
1500 return rc;
1501 if (u32Sep == ~0U)
1502 break;
1503 if (u32Sep != i)
1504 {
1505 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
1506 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1507 }
1508
1509 /* get the mapping details. */
1510 char szDesc[256];
1511 szDesc[0] = '\0';
1512 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
1513 if (VBOX_FAILURE(rc))
1514 return rc;
1515 RTGCPTR GCPtr;
1516 SSMR3GetGCPtr(pSSM, &GCPtr);
1517 RTGCUINTPTR cPTs;
1518 rc = SSMR3GetU32(pSSM, &cPTs);
1519 if (VBOX_FAILURE(rc))
1520 return rc;
1521
1522 /* find matching range. */
1523 PPGMMAPPING pMapping;
1524 for (pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3)
1525 if ( pMapping->cPTs == cPTs
1526 && !strcmp(pMapping->pszDesc, szDesc))
1527 break;
1528 if (!pMapping)
1529 {
1530 LogRel(("Couldn't find mapping: cPTs=%#x szDesc=%s (GCPtr=%VGv)\n",
1531 cPTs, szDesc, GCPtr));
1532 AssertFailed();
1533 return VERR_SSM_LOAD_CONFIG_MISMATCH;
1534 }
1535
1536 /* relocate it. */
1537 if (pMapping->GCPtr != GCPtr)
1538 {
1539 AssertMsg((GCPtr >> PGDIR_SHIFT << PGDIR_SHIFT) == GCPtr, ("GCPtr=%VGv\n", GCPtr));
1540#if HC_ARCH_BITS == 64
1541LogRel(("Mapping: %VGv -> %VGv %s\n", pMapping->GCPtr, GCPtr, pMapping->pszDesc));
1542#endif
1543 pgmR3MapRelocate(pVM, pMapping, pMapping->GCPtr >> PGDIR_SHIFT, GCPtr >> PGDIR_SHIFT);
1544 }
1545 else
1546 Log(("pgmR3Load: '%s' needed no relocation (%VGv)\n", szDesc, GCPtr));
1547 }
1548
1549 /*
1550 * Ram range flags and bits.
1551 */
1552 i = 0;
1553 for (PPGMRAMRANGE pRam = pPGM->pRamRangesHC; pRam; pRam = pRam->pNextHC, i++)
1554 {
1555 /** @todo MMIO ranges may move (PCI reconfig), we currently assume they don't. */
1556 /* Check the seqence number / separator. */
1557 rc = SSMR3GetU32(pSSM, &u32Sep);
1558 if (VBOX_FAILURE(rc))
1559 return rc;
1560 if (u32Sep == ~0U)
1561 break;
1562 if (u32Sep != i)
1563 {
1564 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
1565 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1566 }
1567
1568 /* Get the range details. */
1569 RTGCPHYS GCPhys;
1570 SSMR3GetGCPhys(pSSM, &GCPhys);
1571 RTGCPHYS GCPhysLast;
1572 SSMR3GetGCPhys(pSSM, &GCPhysLast);
1573 RTGCPHYS cb;
1574 SSMR3GetGCPhys(pSSM, &cb);
1575 uint8_t fHaveBits;
1576 rc = SSMR3GetU8(pSSM, &fHaveBits);
1577 if (VBOX_FAILURE(rc))
1578 return rc;
1579 if (fHaveBits & ~1)
1580 {
1581 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
1582 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1583 }
1584
1585 /* Match it up with the current range. */
1586 if ( GCPhys != pRam->GCPhys
1587 || GCPhysLast != pRam->GCPhysLast
1588 || cb != pRam->cb
1589 || fHaveBits != !!pRam->pvHC)
1590 {
1591 LogRel(("Ram range: %VGp-%VGp %VGp bytes %s\n"
1592 "State : %VGp-%VGp %VGp bytes %s\n",
1593 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvHC ? "bits" : "nobits",
1594 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits"));
1595 AssertFailed();
1596 return VERR_SSM_LOAD_CONFIG_MISMATCH;
1597 }
1598
1599 /* Flags. */
1600 const unsigned cPages = pRam->cb >> PAGE_SHIFT;
1601 for (unsigned iPage = 0; iPage < cPages; iPage++)
1602 {
1603 uint16_t u16 = 0;
1604 SSMR3GetU16(pSSM, &u16);
1605 u16 &= PAGE_OFFSET_MASK & ~( MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_WRITE | MM_RAM_FLAGS_VIRTUAL_ALL
1606 | MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL
1607 | MM_RAM_FLAGS_PHYSICAL_TEMP_OFF );
1608 pRam->aHCPhys[iPage] = (pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK) | (RTHCPHYS)u16;
1609 }
1610
1611 /* any memory associated with the range. */
1612 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1613 {
1614 for (unsigned iChunk = 0; iChunk < (pRam->cb >> PGM_DYNAMIC_CHUNK_SHIFT); iChunk++)
1615 {
1616 uint8_t fValidChunk;
1617
1618 rc = SSMR3GetU8(pSSM, &fValidChunk);
1619 if (VBOX_FAILURE(rc))
1620 return rc;
1621 if (fValidChunk > 1)
1622 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1623
1624 if (fValidChunk)
1625 {
1626 if (!pRam->pavHCChunkHC[iChunk])
1627 {
1628 rc = pgmr3PhysGrowRange(pVM, pRam->GCPhys + iChunk * PGM_DYNAMIC_CHUNK_SIZE);
1629 if (VBOX_FAILURE(rc))
1630 return rc;
1631 }
1632 Assert(pRam->pavHCChunkHC[iChunk]);
1633
1634 SSMR3GetMem(pSSM, pRam->pavHCChunkHC[iChunk], PGM_DYNAMIC_CHUNK_SIZE);
1635 }
1636 /* else nothing to do */
1637 }
1638 }
1639 else if (pRam->pvHC)
1640 {
1641 int rc = SSMR3GetMem(pSSM, pRam->pvHC, pRam->cb);
1642 if (VBOX_FAILURE(rc))
1643 {
1644 Log(("pgmR3Save: SSMR3GetMem(, %p, %#x) -> %Vrc\n", pRam->pvHC, pRam->cb, rc));
1645 return rc;
1646 }
1647 }
1648 }
1649
1650 /*
1651 * We require a full resync now.
1652 */
1653 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1654 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1655 pPGM->fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
1656 pPGM->fPhysCacheFlushPending = true;
1657 pgmR3HandlerPhysicalUpdateAll(pVM);
1658
1659 /*
1660 * Change the paging mode.
1661 */
1662 return pgmR3ChangeMode(pVM, pPGM->enmGuestMode);
1663}
1664
1665
1666/**
1667 * Show paging mode.
1668 *
1669 * @param pVM VM Handle.
1670 * @param pHlp The info helpers.
1671 * @param pszArgs "all" (default), "guest", "shadow" or "host".
1672 */
1673static DECLCALLBACK(void) pgmR3InfoMode(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1674{
1675 /* digest argument. */
1676 bool fGuest, fShadow, fHost;
1677 if (pszArgs)
1678 pszArgs = RTStrStripL(pszArgs);
1679 if (!pszArgs || !*pszArgs || strstr(pszArgs, "all"))
1680 fShadow = fHost = fGuest = true;
1681 else
1682 {
1683 fShadow = fHost = fGuest = false;
1684 if (strstr(pszArgs, "guest"))
1685 fGuest = true;
1686 if (strstr(pszArgs, "shadow"))
1687 fShadow = true;
1688 if (strstr(pszArgs, "host"))
1689 fHost = true;
1690 }
1691
1692 /* print info. */
1693 if (fGuest)
1694 pHlp->pfnPrintf(pHlp, "Guest paging mode: %s, changed %RU64 times, A20 %s\n",
1695 PGMGetModeName(pVM->pgm.s.enmGuestMode), pVM->pgm.s.cGuestModeChanges.c,
1696 pVM->pgm.s.fA20Enabled ? "enabled" : "disabled");
1697 if (fShadow)
1698 pHlp->pfnPrintf(pHlp, "Shadow paging mode: %s\n", PGMGetModeName(pVM->pgm.s.enmShadowMode));
1699 if (fHost)
1700 {
1701 const char *psz;
1702 switch (pVM->pgm.s.enmHostMode)
1703 {
1704 case SUPPAGINGMODE_INVALID: psz = "invalid"; break;
1705 case SUPPAGINGMODE_32_BIT: psz = "32-bit"; break;
1706 case SUPPAGINGMODE_32_BIT_GLOBAL: psz = "32-bit+G"; break;
1707 case SUPPAGINGMODE_PAE: psz = "PAE"; break;
1708 case SUPPAGINGMODE_PAE_GLOBAL: psz = "PAE+G"; break;
1709 case SUPPAGINGMODE_PAE_NX: psz = "PAE+NX"; break;
1710 case SUPPAGINGMODE_PAE_GLOBAL_NX: psz = "PAE+G+NX"; break;
1711 case SUPPAGINGMODE_AMD64: psz = "AMD64"; break;
1712 case SUPPAGINGMODE_AMD64_GLOBAL: psz = "AMD64+G"; break;
1713 case SUPPAGINGMODE_AMD64_NX: psz = "AMD64+NX"; break;
1714 case SUPPAGINGMODE_AMD64_GLOBAL_NX: psz = "AMD64+G+NX"; break;
1715 default: psz = "unknown"; break;
1716 }
1717 pHlp->pfnPrintf(pHlp, "Host paging mode: %s\n", psz);
1718 }
1719}
1720
1721
1722/**
1723 * Dump registered MMIO ranges to the log.
1724 *
1725 * @param pVM VM Handle.
1726 * @param pHlp The info helpers.
1727 * @param pszArgs Arguments, ignored.
1728 */
1729static DECLCALLBACK(void) pgmR3PhysInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1730{
1731 NOREF(pszArgs);
1732 pHlp->pfnPrintf(pHlp,
1733 "RAM ranges (pVM=%p)\n"
1734 "%.*s %.*s\n",
1735 pVM,
1736 sizeof(RTGCPHYS) * 4 + 1, "GC Phys Range ",
1737 sizeof(RTHCPTR) * 2, "pvHC ");
1738
1739 for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesHC; pCur; pCur = pCur->pNextHC)
1740 pHlp->pfnPrintf(pHlp,
1741 "%VGp-%VGp %VHv\n",
1742 pCur->GCPhys,
1743 pCur->GCPhysLast,
1744 pCur->pvHC);
1745}
1746
1747/**
1748 * Dump the page directory to the log.
1749 *
1750 * @param pVM VM Handle.
1751 * @param pHlp The info helpers.
1752 * @param pszArgs Arguments, ignored.
1753 */
1754static DECLCALLBACK(void) pgmR3InfoCr3(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1755{
1756/** @todo fix this! Convert the PGMR3DumpHierarchyHC functions to do guest stuff. */
1757 /* Big pages supported? */
1758 const bool fPSE = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
1759 /* Global pages supported? */
1760 const bool fPGE = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PGE);
1761
1762 NOREF(pszArgs);
1763
1764 /*
1765 * Get page directory addresses.
1766 */
1767 PVBOXPD pPDSrc = pVM->pgm.s.pGuestPDHC;
1768 Assert(pPDSrc);
1769 Assert(MMPhysGCPhys2HCVirt(pVM, (RTGCPHYS)(CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);
1770
1771 /*
1772 * Iterate the page directory.
1773 */
1774 for (unsigned iPD = 0; iPD < ELEMENTS(pPDSrc->a); iPD++)
1775 {
1776 VBOXPDE PdeSrc = pPDSrc->a[iPD];
1777 if (PdeSrc.n.u1Present)
1778 {
1779 if (PdeSrc.b.u1Size && fPSE)
1780 {
1781 pHlp->pfnPrintf(pHlp,
1782 "%04X - %VGp P=%d U=%d RW=%d G=%d - BIG\n",
1783 iPD,
1784 PdeSrc.u & X86_PDE_PG_MASK,
1785 PdeSrc.b.u1Present, PdeSrc.b.u1User, PdeSrc.b.u1Write, PdeSrc.b.u1Global && fPGE);
1786 }
1787 else
1788 {
1789 pHlp->pfnPrintf(pHlp,
1790 "%04X - %VGp P=%d U=%d RW=%d [G=%d]\n",
1791 iPD,
1792 PdeSrc.u & X86_PDE4M_PG_MASK,
1793 PdeSrc.n.u1Present, PdeSrc.n.u1User, PdeSrc.n.u1Write, PdeSrc.b.u1Global && fPGE);
1794 }
1795 }
1796 }
1797}
1798
1799
1800/**
1801 * Serivce a VMMCALLHOST_PGM_LOCK call.
1802 *
1803 * @returns VBox status code.
1804 * @param pVM The VM handle.
1805 */
1806PDMR3DECL(int) PGMR3LockCall(PVM pVM)
1807{
1808 return pgmLock(pVM);
1809}
1810
1811
1812/**
1813 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
1814 *
1815 * @returns PGM_TYPE_*.
1816 * @param pgmMode The mode value to convert.
1817 */
1818DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
1819{
1820 switch (pgmMode)
1821 {
1822 case PGMMODE_REAL: return PGM_TYPE_REAL;
1823 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
1824 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
1825 case PGMMODE_PAE:
1826 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
1827 case PGMMODE_AMD64:
1828 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
1829 default:
1830 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
1831 }
1832}
1833
1834
1835/**
1836 * Gets the index into the paging mode data array of a SHW+GST mode.
1837 *
1838 * @returns PGM::paPagingData index.
1839 * @param uShwType The shadow paging mode type.
1840 * @param uGstType The guest paging mode type.
1841 */
1842DECLINLINE(unsigned) pgmModeDataIndex(unsigned uShwType, unsigned uGstType)
1843{
1844 Assert(uShwType >= PGM_TYPE_32BIT && uShwType <= PGM_TYPE_AMD64);
1845 Assert(uGstType >= PGM_TYPE_REAL && uGstType <= PGM_TYPE_AMD64);
1846 return (uShwType - PGM_TYPE_32BIT) * (PGM_TYPE_AMD64 - PGM_TYPE_32BIT + 1)
1847 + (uGstType - PGM_TYPE_REAL);
1848}
1849
1850
1851/**
1852 * Gets the index into the paging mode data array of a SHW+GST mode.
1853 *
1854 * @returns PGM::paPagingData index.
1855 * @param enmShw The shadow paging mode.
1856 * @param enmGst The guest paging mode.
1857 */
1858DECLINLINE(unsigned) pgmModeDataIndexByMode(PGMMODE enmShw, PGMMODE enmGst)
1859{
1860 Assert(enmShw >= PGMMODE_32_BIT && enmShw <= PGMMODE_MAX);
1861 Assert(enmGst > PGMMODE_INVALID && enmGst < PGMMODE_MAX);
1862 return pgmModeDataIndex(pgmModeToType(enmShw), pgmModeToType(enmGst));
1863}
1864
1865
1866/**
1867 * Calculates the max data index.
1868 * @returns The number of entries in the pagaing data array.
1869 */
1870DECLINLINE(unsigned) pgmModeDataMaxIndex(void)
1871{
1872 return pgmModeDataIndex(PGM_TYPE_AMD64, PGM_TYPE_AMD64) + 1;
1873}
1874
1875
1876/**
1877 * Initializes the paging mode data kept in PGM::paModeData.
1878 *
1879 * @param pVM The VM handle.
1880 * @param fResolveGCAndR0 Indicate whether or not GC and Ring-0 symbols can be resolved now.
1881 * This is used early in the init process to avoid trouble with PDM
1882 * not being initialized yet.
1883 */
1884static int pgmR3ModeDataInit(PVM pVM, bool fResolveGCAndR0)
1885{
1886 PPGMMODEDATA pModeData;
1887 int rc;
1888
1889 /*
1890 * Allocate the array on the first call.
1891 */
1892 if (!pVM->pgm.s.paModeData)
1893 {
1894 pVM->pgm.s.paModeData = (PPGMMODEDATA)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMMODEDATA) * pgmModeDataMaxIndex());
1895 AssertReturn(pVM->pgm.s.paModeData, VERR_NO_MEMORY);
1896 }
1897
1898 /*
1899 * Initialize the array entries.
1900 */
1901 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_32BIT, PGM_TYPE_REAL)];
1902 pModeData->uShwType = PGM_TYPE_32BIT;
1903 pModeData->uGstType = PGM_TYPE_REAL;
1904 rc = PGM_SHW_NAME_32BIT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1905 rc = PGM_GST_NAME_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1906 rc = PGM_BTH_NAME_32BIT_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1907
1908 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_32BIT, PGMMODE_PROTECTED)];
1909 pModeData->uShwType = PGM_TYPE_32BIT;
1910 pModeData->uGstType = PGM_TYPE_PROT;
1911 rc = PGM_SHW_NAME_32BIT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1912 rc = PGM_GST_NAME_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1913 rc = PGM_BTH_NAME_32BIT_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1914
1915 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_32BIT, PGM_TYPE_32BIT)];
1916 pModeData->uShwType = PGM_TYPE_32BIT;
1917 pModeData->uGstType = PGM_TYPE_32BIT;
1918 rc = PGM_SHW_NAME_32BIT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1919 rc = PGM_GST_NAME_32BIT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1920 rc = PGM_BTH_NAME_32BIT_32BIT(InitData)(pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1921
1922 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_PAE, PGM_TYPE_REAL)];
1923 pModeData->uShwType = PGM_TYPE_PAE;
1924 pModeData->uGstType = PGM_TYPE_REAL;
1925 rc = PGM_SHW_NAME_PAE(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1926 rc = PGM_GST_NAME_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1927 rc = PGM_BTH_NAME_PAE_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1928
1929 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_PAE, PGM_TYPE_PROT)];
1930 pModeData->uShwType = PGM_TYPE_PAE;
1931 pModeData->uGstType = PGM_TYPE_PROT;
1932 rc = PGM_SHW_NAME_PAE(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1933 rc = PGM_GST_NAME_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1934 rc = PGM_BTH_NAME_PAE_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1935
1936 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_PAE, PGM_TYPE_32BIT)];
1937 pModeData->uShwType = PGM_TYPE_PAE;
1938 pModeData->uGstType = PGM_TYPE_32BIT;
1939 rc = PGM_SHW_NAME_PAE(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1940 rc = PGM_GST_NAME_32BIT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1941 rc = PGM_BTH_NAME_PAE_32BIT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1942
1943 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_PAE, PGM_TYPE_PAE)];
1944 pModeData->uShwType = PGM_TYPE_PAE;
1945 pModeData->uGstType = PGM_TYPE_PAE;
1946 rc = PGM_SHW_NAME_PAE(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1947 rc = PGM_GST_NAME_PAE(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1948 rc = PGM_BTH_NAME_PAE_PAE(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1949
1950 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_AMD64, PGM_TYPE_REAL)];
1951 pModeData->uShwType = PGM_TYPE_AMD64;
1952 pModeData->uGstType = PGM_TYPE_REAL;
1953 rc = PGM_SHW_NAME_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1954 rc = PGM_GST_NAME_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1955 rc = PGM_BTH_NAME_AMD64_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1956
1957 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_AMD64, PGM_TYPE_PROT)];
1958 pModeData->uShwType = PGM_TYPE_AMD64;
1959 pModeData->uGstType = PGM_TYPE_PROT;
1960 rc = PGM_SHW_NAME_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1961 rc = PGM_GST_NAME_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1962 rc = PGM_BTH_NAME_AMD64_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1963
1964 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_AMD64, PGM_TYPE_AMD64)];
1965 pModeData->uShwType = PGM_TYPE_AMD64;
1966 pModeData->uGstType = PGM_TYPE_AMD64;
1967 rc = PGM_SHW_NAME_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1968 rc = PGM_GST_NAME_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1969 rc = PGM_BTH_NAME_AMD64_AMD64(InitData)(pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1970
1971 return VINF_SUCCESS;
1972}
1973
1974
1975/**
1976 * Swtich to different (or relocated in the relocate case) mode data.
1977 *
1978 * @param pVM The VM handle.
1979 * @param enmShw The the shadow paging mode.
1980 * @param enmGst The the guest paging mode.
1981 */
1982static void pgmR3ModeDataSwitch(PVM pVM, PGMMODE enmShw, PGMMODE enmGst)
1983{
1984 PPGMMODEDATA pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(enmShw, enmGst)];
1985
1986 Assert(pModeData->uGstType == pgmModeToType(enmGst));
1987 Assert(pModeData->uShwType == pgmModeToType(enmShw));
1988
1989 /* shadow */
1990 pVM->pgm.s.pfnR3ShwRelocate = pModeData->pfnR3ShwRelocate;
1991 pVM->pgm.s.pfnR3ShwExit = pModeData->pfnR3ShwExit;
1992 pVM->pgm.s.pfnR3ShwGetPage = pModeData->pfnR3ShwGetPage;
1993 Assert(pVM->pgm.s.pfnR3ShwGetPage);
1994 pVM->pgm.s.pfnR3ShwModifyPage = pModeData->pfnR3ShwModifyPage;
1995 pVM->pgm.s.pfnR3ShwGetPDEByIndex = pModeData->pfnR3ShwGetPDEByIndex;
1996 pVM->pgm.s.pfnR3ShwSetPDEByIndex = pModeData->pfnR3ShwSetPDEByIndex;
1997 pVM->pgm.s.pfnR3ShwModifyPDEByIndex = pModeData->pfnR3ShwModifyPDEByIndex;
1998
1999 pVM->pgm.s.pfnGCShwGetPage = pModeData->pfnGCShwGetPage;
2000 pVM->pgm.s.pfnGCShwModifyPage = pModeData->pfnGCShwModifyPage;
2001 pVM->pgm.s.pfnGCShwGetPDEByIndex = pModeData->pfnGCShwGetPDEByIndex;
2002 pVM->pgm.s.pfnGCShwSetPDEByIndex = pModeData->pfnGCShwSetPDEByIndex;
2003 pVM->pgm.s.pfnGCShwModifyPDEByIndex = pModeData->pfnGCShwModifyPDEByIndex;
2004
2005 pVM->pgm.s.pfnR0ShwGetPage = pModeData->pfnR0ShwGetPage;
2006 pVM->pgm.s.pfnR0ShwModifyPage = pModeData->pfnR0ShwModifyPage;
2007 pVM->pgm.s.pfnR0ShwGetPDEByIndex = pModeData->pfnR0ShwGetPDEByIndex;
2008 pVM->pgm.s.pfnR0ShwSetPDEByIndex = pModeData->pfnR0ShwSetPDEByIndex;
2009 pVM->pgm.s.pfnR0ShwModifyPDEByIndex = pModeData->pfnR0ShwModifyPDEByIndex;
2010
2011
2012 /* guest */
2013 pVM->pgm.s.pfnR3GstRelocate = pModeData->pfnR3GstRelocate;
2014 pVM->pgm.s.pfnR3GstExit = pModeData->pfnR3GstExit;
2015 pVM->pgm.s.pfnR3GstGetPage = pModeData->pfnR3GstGetPage;
2016 Assert(pVM->pgm.s.pfnR3GstGetPage);
2017 pVM->pgm.s.pfnR3GstModifyPage = pModeData->pfnR3GstModifyPage;
2018 pVM->pgm.s.pfnR3GstGetPDE = pModeData->pfnR3GstGetPDE;
2019 pVM->pgm.s.pfnR3GstMonitorCR3 = pModeData->pfnR3GstMonitorCR3;
2020 pVM->pgm.s.pfnR3GstUnmonitorCR3 = pModeData->pfnR3GstUnmonitorCR3;
2021 pVM->pgm.s.pfnR3GstMapCR3 = pModeData->pfnR3GstMapCR3;
2022 pVM->pgm.s.pfnR3GstUnmapCR3 = pModeData->pfnR3GstUnmapCR3;
2023 pVM->pgm.s.pfnHCGstWriteHandlerCR3 = pModeData->pfnHCGstWriteHandlerCR3;
2024 pVM->pgm.s.pszHCGstWriteHandlerCR3 = pModeData->pszHCGstWriteHandlerCR3;
2025
2026 pVM->pgm.s.pfnGCGstGetPage = pModeData->pfnGCGstGetPage;
2027 pVM->pgm.s.pfnGCGstModifyPage = pModeData->pfnGCGstModifyPage;
2028 pVM->pgm.s.pfnGCGstGetPDE = pModeData->pfnGCGstGetPDE;
2029 pVM->pgm.s.pfnGCGstMonitorCR3 = pModeData->pfnGCGstMonitorCR3;
2030 pVM->pgm.s.pfnGCGstUnmonitorCR3 = pModeData->pfnGCGstUnmonitorCR3;
2031 pVM->pgm.s.pfnGCGstMapCR3 = pModeData->pfnGCGstMapCR3;
2032 pVM->pgm.s.pfnGCGstUnmapCR3 = pModeData->pfnGCGstUnmapCR3;
2033 pVM->pgm.s.pfnGCGstWriteHandlerCR3 = pModeData->pfnGCGstWriteHandlerCR3;
2034
2035 pVM->pgm.s.pfnR0GstGetPage = pModeData->pfnR0GstGetPage;
2036 pVM->pgm.s.pfnR0GstModifyPage = pModeData->pfnR0GstModifyPage;
2037 pVM->pgm.s.pfnR0GstGetPDE = pModeData->pfnR0GstGetPDE;
2038 pVM->pgm.s.pfnR0GstMonitorCR3 = pModeData->pfnR0GstMonitorCR3;
2039 pVM->pgm.s.pfnR0GstUnmonitorCR3 = pModeData->pfnR0GstUnmonitorCR3;
2040 pVM->pgm.s.pfnR0GstMapCR3 = pModeData->pfnR0GstMapCR3;
2041 pVM->pgm.s.pfnR0GstUnmapCR3 = pModeData->pfnR0GstUnmapCR3;
2042 pVM->pgm.s.pfnR0GstWriteHandlerCR3 = pModeData->pfnR0GstWriteHandlerCR3;
2043
2044
2045 /* both */
2046 pVM->pgm.s.pfnR3BthRelocate = pModeData->pfnR3BthRelocate;
2047 pVM->pgm.s.pfnR3BthTrap0eHandler = pModeData->pfnR3BthTrap0eHandler;
2048 pVM->pgm.s.pfnR3BthInvalidatePage = pModeData->pfnR3BthInvalidatePage;
2049 pVM->pgm.s.pfnR3BthSyncCR3 = pModeData->pfnR3BthSyncCR3;
2050 Assert(pVM->pgm.s.pfnR3BthSyncCR3);
2051 pVM->pgm.s.pfnR3BthSyncPage = pModeData->pfnR3BthSyncPage;
2052 pVM->pgm.s.pfnR3BthPrefetchPage = pModeData->pfnR3BthPrefetchPage;
2053 pVM->pgm.s.pfnR3BthVerifyAccessSyncPage = pModeData->pfnR3BthVerifyAccessSyncPage;
2054#ifdef VBOX_STRICT
2055 pVM->pgm.s.pfnR3BthAssertCR3 = pModeData->pfnR3BthAssertCR3;
2056#endif
2057
2058 pVM->pgm.s.pfnGCBthTrap0eHandler = pModeData->pfnGCBthTrap0eHandler;
2059 pVM->pgm.s.pfnGCBthInvalidatePage = pModeData->pfnGCBthInvalidatePage;
2060 pVM->pgm.s.pfnGCBthSyncCR3 = pModeData->pfnGCBthSyncCR3;
2061 pVM->pgm.s.pfnGCBthSyncPage = pModeData->pfnGCBthSyncPage;
2062 pVM->pgm.s.pfnGCBthPrefetchPage = pModeData->pfnGCBthPrefetchPage;
2063 pVM->pgm.s.pfnGCBthVerifyAccessSyncPage = pModeData->pfnGCBthVerifyAccessSyncPage;
2064#ifdef VBOX_STRICT
2065 pVM->pgm.s.pfnGCBthAssertCR3 = pModeData->pfnGCBthAssertCR3;
2066#endif
2067
2068 pVM->pgm.s.pfnR0BthTrap0eHandler = pModeData->pfnR0BthTrap0eHandler;
2069 pVM->pgm.s.pfnR0BthInvalidatePage = pModeData->pfnR0BthInvalidatePage;
2070 pVM->pgm.s.pfnR0BthSyncCR3 = pModeData->pfnR0BthSyncCR3;
2071 pVM->pgm.s.pfnR0BthSyncPage = pModeData->pfnR0BthSyncPage;
2072 pVM->pgm.s.pfnR0BthPrefetchPage = pModeData->pfnR0BthPrefetchPage;
2073 pVM->pgm.s.pfnR0BthVerifyAccessSyncPage = pModeData->pfnR0BthVerifyAccessSyncPage;
2074#ifdef VBOX_STRICT
2075 pVM->pgm.s.pfnR0BthAssertCR3 = pModeData->pfnR0BthAssertCR3;
2076#endif
2077}
2078
2079
2080#ifdef DEBUG_bird
2081#include <stdlib.h> /* getenv() remove me! */
2082#endif
2083
2084/**
2085 * Calculates the shadow paging mode.
2086 *
2087 * @returns The shadow paging mode.
2088 * @param enmGuestMode The guest mode.
2089 * @param enmHostMode The host mode.
2090 * @param enmShadowMode The current shadow mode.
2091 * @param penmSwitcher Where to store the switcher to use.
2092 * VMMSWITCHER_INVALID means no change.
2093 */
2094static PGMMODE pgmR3CalcShadowMode(PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher)
2095{
2096 VMMSWITCHER enmSwitcher = VMMSWITCHER_INVALID;
2097 switch (enmGuestMode)
2098 {
2099 /*
2100 * When switching to real or protected mode we don't change
2101 * anything since it's likely that we'll switch back pretty soon.
2102 *
2103 * During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
2104 * and is supposed to determin which shadow paging and switcher to
2105 * use during init.
2106 */
2107 case PGMMODE_REAL:
2108 case PGMMODE_PROTECTED:
2109 if (enmShadowMode != PGMMODE_INVALID)
2110 break; /* (no change) */
2111 switch (enmHostMode)
2112 {
2113 case SUPPAGINGMODE_32_BIT:
2114 case SUPPAGINGMODE_32_BIT_GLOBAL:
2115 enmShadowMode = PGMMODE_32_BIT;
2116 enmSwitcher = VMMSWITCHER_32_TO_32;
2117 break;
2118
2119 case SUPPAGINGMODE_PAE:
2120 case SUPPAGINGMODE_PAE_NX:
2121 case SUPPAGINGMODE_PAE_GLOBAL:
2122 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2123 enmShadowMode = PGMMODE_PAE;
2124 enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
2125#ifdef DEBUG_bird
2126if (getenv("VBOX_32BIT"))
2127{
2128 enmShadowMode = PGMMODE_32_BIT;
2129 enmSwitcher = VMMSWITCHER_PAE_TO_32;
2130}
2131#endif
2132 break;
2133
2134 case SUPPAGINGMODE_AMD64:
2135 case SUPPAGINGMODE_AMD64_GLOBAL:
2136 case SUPPAGINGMODE_AMD64_NX:
2137 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2138 enmShadowMode = PGMMODE_PAE;
2139 enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
2140 break;
2141
2142 default: AssertMsgFailed(("enmHostMode=%d\n", enmHostMode)); break;
2143 }
2144 break;
2145
2146 case PGMMODE_32_BIT:
2147 switch (enmHostMode)
2148 {
2149 case SUPPAGINGMODE_32_BIT:
2150 case SUPPAGINGMODE_32_BIT_GLOBAL:
2151 enmShadowMode = PGMMODE_32_BIT;
2152 enmSwitcher = VMMSWITCHER_32_TO_32;
2153 break;
2154
2155 case SUPPAGINGMODE_PAE:
2156 case SUPPAGINGMODE_PAE_NX:
2157 case SUPPAGINGMODE_PAE_GLOBAL:
2158 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2159 enmShadowMode = PGMMODE_PAE;
2160 enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
2161#ifdef DEBUG_bird
2162if (getenv("VBOX_32BIT"))
2163{
2164 enmShadowMode = PGMMODE_32_BIT;
2165 enmSwitcher = VMMSWITCHER_PAE_TO_32;
2166}
2167#endif
2168 break;
2169
2170 case SUPPAGINGMODE_AMD64:
2171 case SUPPAGINGMODE_AMD64_GLOBAL:
2172 case SUPPAGINGMODE_AMD64_NX:
2173 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2174 enmShadowMode = PGMMODE_PAE;
2175 enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
2176 break;
2177
2178 default: AssertMsgFailed(("enmHostMode=%d\n", enmHostMode)); break;
2179 }
2180 break;
2181
2182 case PGMMODE_PAE:
2183 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
2184 switch (enmHostMode)
2185 {
2186 case SUPPAGINGMODE_32_BIT:
2187 case SUPPAGINGMODE_32_BIT_GLOBAL:
2188 enmShadowMode = PGMMODE_PAE;
2189 enmSwitcher = VMMSWITCHER_32_TO_PAE;
2190 break;
2191
2192 case SUPPAGINGMODE_PAE:
2193 case SUPPAGINGMODE_PAE_NX:
2194 case SUPPAGINGMODE_PAE_GLOBAL:
2195 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2196 enmShadowMode = PGMMODE_PAE;
2197 enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
2198 break;
2199
2200 case SUPPAGINGMODE_AMD64:
2201 case SUPPAGINGMODE_AMD64_GLOBAL:
2202 case SUPPAGINGMODE_AMD64_NX:
2203 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2204 enmShadowMode = PGMMODE_PAE;
2205 enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
2206 break;
2207
2208 default: AssertMsgFailed(("enmHostMode=%d\n", enmHostMode)); break;
2209 }
2210 break;
2211
2212 case PGMMODE_AMD64:
2213 case PGMMODE_AMD64_NX:
2214 switch (enmHostMode)
2215 {
2216 case SUPPAGINGMODE_32_BIT:
2217 case SUPPAGINGMODE_32_BIT_GLOBAL:
2218 enmShadowMode = PGMMODE_PAE;
2219 enmSwitcher = VMMSWITCHER_32_TO_AMD64;
2220 break;
2221
2222 case SUPPAGINGMODE_PAE:
2223 case SUPPAGINGMODE_PAE_NX:
2224 case SUPPAGINGMODE_PAE_GLOBAL:
2225 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2226 enmShadowMode = PGMMODE_PAE;
2227 enmSwitcher = VMMSWITCHER_PAE_TO_AMD64;
2228 break;
2229
2230 case SUPPAGINGMODE_AMD64:
2231 case SUPPAGINGMODE_AMD64_GLOBAL:
2232 case SUPPAGINGMODE_AMD64_NX:
2233 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2234 enmShadowMode = PGMMODE_PAE;
2235 enmSwitcher = VMMSWITCHER_AMD64_TO_AMD64;
2236 break;
2237
2238 default: AssertMsgFailed(("enmHostMode=%d\n", enmHostMode)); break;
2239 }
2240 break;
2241
2242
2243 default:
2244 AssertReleaseMsgFailed(("enmGuestMode=%d\n", enmGuestMode));
2245 return PGMMODE_INVALID;
2246 }
2247
2248 *penmSwitcher = enmSwitcher;
2249 return enmShadowMode;
2250}
2251
2252
2253/**
2254 * Performs the actual mode change.
2255 * This is called by PGMChangeMode and pgmR3InitPaging().
2256 *
2257 * @returns VBox status code.
2258 * @param pVM VM handle.
2259 * @param enmGuestMode The new guest mode. This is assumed to be different from
2260 * the current mode.
2261 */
2262int pgmR3ChangeMode(PVM pVM, PGMMODE enmGuestMode)
2263{
2264 LogFlow(("pgmR3ChangeMode: Guest mode: %d -> %d\n", pVM->pgm.s.enmGuestMode, enmGuestMode));
2265 STAM_REL_COUNTER_INC(&pVM->pgm.s.cGuestModeChanges);
2266
2267 /*
2268 * Calc the shadow mode and switcher.
2269 */
2270 VMMSWITCHER enmSwitcher;
2271 PGMMODE enmShadowMode = pgmR3CalcShadowMode(enmGuestMode, pVM->pgm.s.enmHostMode, pVM->pgm.s.enmShadowMode, &enmSwitcher);
2272 if (enmSwitcher != VMMSWITCHER_INVALID)
2273 {
2274 /*
2275 * Select new switcher.
2276 */
2277 int rc = VMMR3SelectSwitcher(pVM, enmSwitcher);
2278 if (VBOX_FAILURE(rc))
2279 {
2280 AssertReleaseMsgFailed(("VMMR3SelectSwitcher(%d) -> %Vrc\n", enmSwitcher, rc));
2281 return rc;
2282 }
2283 }
2284
2285 /*
2286 * Exit old mode(s).
2287 */
2288 /* shadow */
2289 if (enmShadowMode != pVM->pgm.s.enmShadowMode)
2290 {
2291 LogFlow(("pgmR3ChangeMode: Shadow mode: %d -> %d\n", pVM->pgm.s.enmShadowMode, enmShadowMode));
2292 if (PGM_SHW_PFN(Exit, pVM))
2293 {
2294 int rc = PGM_SHW_PFN(Exit, pVM)(pVM);
2295 if (VBOX_FAILURE(rc))
2296 {
2297 AssertMsgFailed(("Exit failed for shadow mode %d: %Vrc\n", pVM->pgm.s.enmShadowMode, rc));
2298 return rc;
2299 }
2300 }
2301
2302 }
2303
2304 /* guest */
2305 if (PGM_GST_PFN(Exit, pVM))
2306 {
2307 int rc = PGM_GST_PFN(Exit, pVM)(pVM);
2308 if (VBOX_FAILURE(rc))
2309 {
2310 AssertMsgFailed(("Exit failed for guest mode %d: %Vrc\n", pVM->pgm.s.enmGuestMode, rc));
2311 return rc;
2312 }
2313 }
2314
2315 /*
2316 * Load new paging mode data.
2317 */
2318 pgmR3ModeDataSwitch(pVM, enmShadowMode, enmGuestMode);
2319
2320 /*
2321 * Enter new shadow mode (if changed).
2322 */
2323 if (enmShadowMode != pVM->pgm.s.enmShadowMode)
2324 {
2325 int rc;
2326 pVM->pgm.s.enmShadowMode = enmShadowMode;
2327 switch (enmShadowMode)
2328 {
2329 case PGMMODE_32_BIT:
2330 rc = PGM_SHW_NAME_32BIT(Enter)(pVM);
2331 break;
2332 case PGMMODE_PAE:
2333 case PGMMODE_PAE_NX:
2334 rc = PGM_SHW_NAME_PAE(Enter)(pVM);
2335 break;
2336 case PGMMODE_AMD64:
2337 case PGMMODE_AMD64_NX:
2338 rc = PGM_SHW_NAME_AMD64(Enter)(pVM);
2339 break;
2340 case PGMMODE_REAL:
2341 case PGMMODE_PROTECTED:
2342 default:
2343 AssertReleaseMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
2344 return VERR_INTERNAL_ERROR;
2345 }
2346 if (VBOX_FAILURE(rc))
2347 {
2348 AssertReleaseMsgFailed(("Entering enmShadowMode=%d failed: %Vrc\n", enmShadowMode, rc));
2349 pVM->pgm.s.enmShadowMode = PGMMODE_INVALID;
2350 return rc;
2351 }
2352 }
2353
2354 /*
2355 * Enter the new guest and shadow+guest modes.
2356 */
2357 int rc = -1;
2358 int rc2 = -1;
2359 RTGCPHYS GCPhysCR3 = NIL_RTGCPHYS;
2360 pVM->pgm.s.enmGuestMode = enmGuestMode;
2361 switch (enmGuestMode)
2362 {
2363 case PGMMODE_REAL:
2364 rc = PGM_GST_NAME_REAL(Enter)(pVM, NIL_RTGCPHYS);
2365 switch (pVM->pgm.s.enmShadowMode)
2366 {
2367 case PGMMODE_32_BIT:
2368 rc2 = PGM_BTH_NAME_32BIT_REAL(Enter)(pVM, NIL_RTGCPHYS);
2369 break;
2370 case PGMMODE_PAE:
2371 case PGMMODE_PAE_NX:
2372 rc2 = PGM_BTH_NAME_PAE_REAL(Enter)(pVM, NIL_RTGCPHYS);
2373 break;
2374 case PGMMODE_AMD64:
2375 case PGMMODE_AMD64_NX:
2376 rc2 = PGM_BTH_NAME_AMD64_REAL(Enter)(pVM, NIL_RTGCPHYS);
2377 break;
2378 default: AssertFailed(); break;
2379 }
2380 break;
2381
2382 case PGMMODE_PROTECTED:
2383 rc = PGM_GST_NAME_PROT(Enter)(pVM, NIL_RTGCPHYS);
2384 switch (pVM->pgm.s.enmShadowMode)
2385 {
2386 case PGMMODE_32_BIT:
2387 rc2 = PGM_BTH_NAME_32BIT_PROT(Enter)(pVM, NIL_RTGCPHYS);
2388 break;
2389 case PGMMODE_PAE:
2390 case PGMMODE_PAE_NX:
2391 rc2 = PGM_BTH_NAME_PAE_PROT(Enter)(pVM, NIL_RTGCPHYS);
2392 break;
2393 case PGMMODE_AMD64:
2394 case PGMMODE_AMD64_NX:
2395 rc2 = PGM_BTH_NAME_AMD64_PROT(Enter)(pVM, NIL_RTGCPHYS);
2396 break;
2397 default: AssertFailed(); break;
2398 }
2399 break;
2400
2401 case PGMMODE_32_BIT:
2402 GCPhysCR3 = CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK;
2403 rc = PGM_GST_NAME_32BIT(Enter)(pVM, GCPhysCR3);
2404 switch (pVM->pgm.s.enmShadowMode)
2405 {
2406 case PGMMODE_32_BIT:
2407 rc2 = PGM_BTH_NAME_32BIT_32BIT(Enter)(pVM, GCPhysCR3);
2408 break;
2409 case PGMMODE_PAE:
2410 case PGMMODE_PAE_NX:
2411 rc2 = PGM_BTH_NAME_PAE_32BIT(Enter)(pVM, GCPhysCR3);
2412 break;
2413 case PGMMODE_AMD64:
2414 case PGMMODE_AMD64_NX:
2415 AssertMsgFailed(("Should use PAE shadow mode!\n"));
2416 default: AssertFailed(); break;
2417 }
2418 break;
2419
2420 //case PGMMODE_PAE_NX:
2421 case PGMMODE_PAE:
2422 GCPhysCR3 = CPUMGetGuestCR3(pVM) & X86_CR3_PAE_PAGE_MASK;
2423 rc = PGM_GST_NAME_PAE(Enter)(pVM, GCPhysCR3);
2424 switch (pVM->pgm.s.enmShadowMode)
2425 {
2426 case PGMMODE_PAE:
2427 case PGMMODE_PAE_NX:
2428 rc2 = PGM_BTH_NAME_PAE_PAE(Enter)(pVM, GCPhysCR3);
2429 break;
2430 case PGMMODE_32_BIT:
2431 case PGMMODE_AMD64:
2432 case PGMMODE_AMD64_NX:
2433 AssertMsgFailed(("Should use PAE shadow mode!\n"));
2434 default: AssertFailed(); break;
2435 }
2436 break;
2437
2438 //case PGMMODE_AMD64_NX:
2439 case PGMMODE_AMD64:
2440 GCPhysCR3 = CPUMGetGuestCR3(pVM) & 0xfffffffffffff000ULL; /** @todo define this mask and make CR3 64-bit in this case! */
2441 rc = PGM_GST_NAME_AMD64(Enter)(pVM, GCPhysCR3);
2442 switch (pVM->pgm.s.enmShadowMode)
2443 {
2444 case PGMMODE_AMD64:
2445 case PGMMODE_AMD64_NX:
2446 rc2 = PGM_BTH_NAME_AMD64_AMD64(Enter)(pVM, GCPhysCR3);
2447 break;
2448 case PGMMODE_32_BIT:
2449 case PGMMODE_PAE:
2450 case PGMMODE_PAE_NX:
2451 AssertMsgFailed(("Should use AMD64 shadow mode!\n"));
2452 default: AssertFailed(); break;
2453 }
2454 break;
2455
2456 default:
2457 AssertReleaseMsgFailed(("enmGuestMode=%d\n", enmGuestMode));
2458 rc = VERR_NOT_IMPLEMENTED;
2459 break;
2460 }
2461
2462 /* status codes. */
2463 AssertRC(rc);
2464 AssertRC(rc2);
2465 if (VBOX_SUCCESS(rc))
2466 {
2467 rc = rc2;
2468 if (VBOX_SUCCESS(rc)) /* no informational status codes. */
2469 rc = VINF_SUCCESS;
2470 }
2471
2472 /*
2473 * Notify SELM so it can update the TSSes with correct CR3s.
2474 */
2475 SELMR3PagingModeChanged(pVM);
2476
2477 /* Notify HWACCM as well. */
2478 HWACCMR3PagingModeChanged(pVM, pVM->pgm.s.enmShadowMode);
2479 return rc;
2480}
2481
2482
2483/**
2484 * Dumps a PAE shadow page table.
2485 *
2486 * @returns VBox status code (VINF_SUCCESS).
2487 * @param pVM The VM handle.
2488 * @param pPT Pointer to the page table.
2489 * @param u64Address The virtual address of the page table starts.
2490 * @param fLongMode Set if this a long mode table; clear if it's a legacy mode table.
2491 * @param cMaxDepth The maxium depth.
2492 * @param pHlp Pointer to the output functions.
2493 */
2494static int pgmR3DumpHierarchyHCPaePT(PVM pVM, PX86PTPAE pPT, uint64_t u64Address, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
2495{
2496 for (unsigned i = 0; i < ELEMENTS(pPT->a); i++)
2497 {
2498 X86PTEPAE Pte = pPT->a[i];
2499 if (Pte.n.u1Present)
2500 {
2501 pHlp->pfnPrintf(pHlp,
2502 fLongMode /*P R S A D G WT CD AT NX 4M a p ? */
2503 ? "%016llx 3 | P %c %c %c %c %c %s %s %s %s 4K %c%c%c %016llx\n"
2504 : "%08llx 2 | P %c %c %c %c %c %s %s %s %s 4K %c%c%c %016llx\n",
2505 u64Address + ((uint64_t)i << X86_PT_PAE_SHIFT),
2506 Pte.n.u1Write ? 'W' : 'R',
2507 Pte.n.u1User ? 'U' : 'S',
2508 Pte.n.u1Accessed ? 'A' : '-',
2509 Pte.n.u1Dirty ? 'D' : '-',
2510 Pte.n.u1Global ? 'G' : '-',
2511 Pte.n.u1WriteThru ? "WT" : "--",
2512 Pte.n.u1CacheDisable? "CD" : "--",
2513 Pte.n.u1PAT ? "AT" : "--",
2514 Pte.n.u1NoExecute ? "NX" : "--",
2515 Pte.u & PGM_PTFLAGS_TRACK_DIRTY ? 'd' : '-',
2516 Pte.u & BIT(10) ? '1' : '0',
2517 Pte.u & PGM_PTFLAGS_CSAM_VALIDATED? 'v' : '-',
2518 Pte.u & X86_PTE_PAE_PG_MASK);
2519 }
2520 }
2521 return VINF_SUCCESS;
2522}
2523
2524
2525/**
2526 * Dumps a PAE shadow page directory table.
2527 *
2528 * @returns VBox status code (VINF_SUCCESS).
2529 * @param pVM The VM handle.
2530 * @param HCPhys The physical address of the page directory table.
2531 * @param u64Address The virtual address of the page table starts.
2532 * @param cr4 The CR4, PSE is currently used.
2533 * @param fLongMode Set if this a long mode table; clear if it's a legacy mode table.
2534 * @param cMaxDepth The maxium depth.
2535 * @param pHlp Pointer to the output functions.
2536 */
2537static int pgmR3DumpHierarchyHCPaePD(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
2538{
2539 PX86PDPAE pPD = (PX86PDPAE)MMPagePhys2Page(pVM, HCPhys);
2540 if (!pPD)
2541 {
2542 pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory at HCPhys=%#VHp was not found in the page pool!\n",
2543 fLongMode ? 16 : 8, u64Address, HCPhys);
2544 return VERR_INVALID_PARAMETER;
2545 }
2546 int rc = VINF_SUCCESS;
2547 for (unsigned i = 0; i < ELEMENTS(pPD->a); i++)
2548 {
2549 X86PDEPAE Pde = pPD->a[i];
2550 if (Pde.n.u1Present)
2551 {
2552 if ((cr4 & X86_CR4_PSE) && Pde.b.u1Size)
2553 pHlp->pfnPrintf(pHlp,
2554 fLongMode /*P R S A D G WT CD AT NX 4M a p ? */
2555 ? "%016llx 2 | P %c %c %c %c %c %s %s %s %s 4M %c%c%c %016llx\n"
2556 : "%08llx 1 | P %c %c %c %c %c %s %s %s %s 4M %c%c%c %016llx\n",
2557 u64Address + ((uint64_t)i << X86_PD_PAE_SHIFT),
2558 Pde.b.u1Write ? 'W' : 'R',
2559 Pde.b.u1User ? 'U' : 'S',
2560 Pde.b.u1Accessed ? 'A' : '-',
2561 Pde.b.u1Dirty ? 'D' : '-',
2562 Pde.b.u1Global ? 'G' : '-',
2563 Pde.b.u1WriteThru ? "WT" : "--",
2564 Pde.b.u1CacheDisable? "CD" : "--",
2565 Pde.b.u1PAT ? "AT" : "--",
2566 Pde.b.u1NoExecute ? "NX" : "--",
2567 Pde.u & BIT64(9) ? '1' : '0',
2568 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-',
2569 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
2570 Pde.u & X86_PDE_PAE_PG_MASK);
2571 else
2572 {
2573 pHlp->pfnPrintf(pHlp,
2574 fLongMode /*P R S A D G WT CD AT NX 4M a p ? */
2575 ? "%016llx 2 | P %c %c %c %c %c %s %s .. %s 4K %c%c%c %016llx\n"
2576 : "%08llx 1 | P %c %c %c %c %c %s %s .. %s 4K %c%c%c %016llx\n",
2577 u64Address + ((uint64_t)i << X86_PD_PAE_SHIFT),
2578 Pde.n.u1Write ? 'W' : 'R',
2579 Pde.n.u1User ? 'U' : 'S',
2580 Pde.n.u1Accessed ? 'A' : '-',
2581 Pde.n.u1Reserved0 ? '?' : '.', /* ignored */
2582 Pde.n.u1Reserved1 ? '?' : '.', /* ignored */
2583 Pde.n.u1WriteThru ? "WT" : "--",
2584 Pde.n.u1CacheDisable? "CD" : "--",
2585 Pde.n.u1NoExecute ? "NX" : "--",
2586 Pde.u & BIT64(9) ? '1' : '0',
2587 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-',
2588 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
2589 Pde.u & X86_PDE_PAE_PG_MASK);
2590 if (cMaxDepth >= 1)
2591 {
2592 /** @todo what about using the page pool for mapping PTs? */
2593 uint64_t u64AddressPT = u64Address + ((uint64_t)i << X86_PD_PAE_SHIFT);
2594 RTHCPHYS HCPhysPT = Pde.u & X86_PDE_PAE_PG_MASK;
2595 PX86PTPAE pPT = NULL;
2596 if (!(Pde.u & PGM_PDFLAGS_MAPPING))
2597 pPT = (PX86PTPAE)MMPagePhys2Page(pVM, HCPhysPT);
2598 else
2599 {
2600 for (PPGMMAPPING pMap = pVM->pgm.s.pMappingsR3; pMap; pMap = pMap->pNextR3)
2601 {
2602 uint64_t off = u64AddressPT - pMap->GCPtr;
2603 if (off < pMap->cb)
2604 {
2605 const int iPDE = (uint32_t)(off >> X86_PD_SHIFT);
2606 const int iSub = (int)((off >> X86_PD_PAE_SHIFT) & 1); /* MSC is a pain sometimes */
2607 if ((iSub ? pMap->aPTs[iPDE].HCPhysPaePT1 : pMap->aPTs[iPDE].HCPhysPaePT0) != HCPhysPT)
2608 pHlp->pfnPrintf(pHlp, "%0*llx error! Mapping error! PT %d has HCPhysPT=%VHp not %VHp is in the PD.\n",
2609 fLongMode ? 16 : 8, u64AddressPT, iPDE,
2610 iSub ? pMap->aPTs[iPDE].HCPhysPaePT1 : pMap->aPTs[iPDE].HCPhysPaePT0, HCPhysPT);
2611 pPT = &pMap->aPTs[iPDE].paPaePTsR3[iSub];
2612 }
2613 }
2614 }
2615 int rc2 = VERR_INVALID_PARAMETER;
2616 if (pPT)
2617 rc2 = pgmR3DumpHierarchyHCPaePT(pVM, pPT, u64AddressPT, fLongMode, cMaxDepth - 1, pHlp);
2618 else
2619 pHlp->pfnPrintf(pHlp, "%0*llx error! Page table at HCPhys=%#VHp was not found in the page pool!\n",
2620 fLongMode ? 16 : 8, u64AddressPT, HCPhysPT);
2621 if (rc2 < rc && VBOX_SUCCESS(rc))
2622 rc = rc2;
2623 }
2624 }
2625 }
2626 }
2627 return rc;
2628}
2629
2630
2631/**
2632 * Dumps a PAE shadow page directory pointer table.
2633 *
2634 * @returns VBox status code (VINF_SUCCESS).
2635 * @param pVM The VM handle.
2636 * @param HCPhys The physical address of the page directory pointer table.
2637 * @param u64Address The virtual address of the page table starts.
2638 * @param cr4 The CR4, PSE is currently used.
2639 * @param fLongMode Set if this a long mode table; clear if it's a legacy mode table.
2640 * @param cMaxDepth The maxium depth.
2641 * @param pHlp Pointer to the output functions.
2642 */
2643static int pgmR3DumpHierarchyHCPaePDPTR(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
2644{
2645 PX86PDPTR pPDPTR = (PX86PDPTR)MMPagePhys2Page(pVM, HCPhys);
2646 if (!pPDPTR)
2647 {
2648 pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory pointer table at HCPhys=%#VHp was not found in the page pool!\n",
2649 fLongMode ? 16 : 8, u64Address, HCPhys);
2650 return VERR_INVALID_PARAMETER;
2651 }
2652
2653 int rc = VINF_SUCCESS;
2654 const unsigned c = fLongMode ? ELEMENTS(pPDPTR->a) : 4;
2655 for (unsigned i = 0; i < c; i++)
2656 {
2657 X86PDPE Pdpe = pPDPTR->a[i];
2658 if (Pdpe.n.u1Present)
2659 {
2660 if (fLongMode)
2661 pHlp->pfnPrintf(pHlp, /*P R S A D G WT CD AT NX 4M a p ? */
2662 "%016llx 1 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx\n",
2663 u64Address + ((uint64_t)i << X86_PDPTR_SHIFT),
2664 Pdpe.n.u1Write ? 'W' : 'R',
2665 Pdpe.n.u1User ? 'U' : 'S',
2666 Pdpe.n.u1Accessed ? 'A' : '-',
2667 Pdpe.n.u3Reserved & 1? '?' : '.', /* ignored */
2668 Pdpe.n.u3Reserved & 4? '!' : '.', /* mbz */
2669 Pdpe.n.u1WriteThru ? "WT" : "--",
2670 Pdpe.n.u1CacheDisable? "CD" : "--",
2671 Pdpe.n.u3Reserved & 2? "!" : "..",/* mbz */
2672 Pdpe.n.u1NoExecute ? "NX" : "--",
2673 Pdpe.u & BIT(9) ? '1' : '0',
2674 Pdpe.u & PGM_PLXFLAGS_PERMANENT ? 'p' : '-',
2675 Pdpe.u & BIT(11) ? '1' : '0',
2676 Pdpe.u & X86_PDPE_PG_MASK);
2677 else
2678 pHlp->pfnPrintf(pHlp, /*P R S A D G WT CD AT NX 4M a p ? */
2679 "%08x 0 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx\n",
2680 i << X86_PDPTR_SHIFT,
2681 Pdpe.n.u1Write ? '!' : '.', /* mbz */
2682 Pdpe.n.u1User ? '!' : '.', /* mbz */
2683 Pdpe.n.u1Accessed ? '!' : '.', /* mbz */
2684 Pdpe.n.u3Reserved & 1? '!' : '.', /* mbz */
2685 Pdpe.n.u3Reserved & 4? '!' : '.', /* mbz */
2686 Pdpe.n.u1WriteThru ? "WT" : "--",
2687 Pdpe.n.u1CacheDisable? "CD" : "--",
2688 Pdpe.n.u3Reserved & 2? "!" : "..",/* mbz */
2689 Pdpe.n.u1NoExecute ? "NX" : "--",
2690 Pdpe.u & BIT(9) ? '1' : '0',
2691 Pdpe.u & PGM_PLXFLAGS_PERMANENT ? 'p' : '-',
2692 Pdpe.u & BIT(11) ? '1' : '0',
2693 Pdpe.u & X86_PDPE_PG_MASK);
2694 if (cMaxDepth >= 1)
2695 {
2696 int rc2 = pgmR3DumpHierarchyHCPaePD(pVM, Pdpe.u & X86_PDPE_PG_MASK, u64Address + ((uint64_t)i << X86_PDPTR_SHIFT),
2697 cr4, fLongMode, cMaxDepth - 1, pHlp);
2698 if (rc2 < rc && VBOX_SUCCESS(rc))
2699 rc = rc2;
2700 }
2701 }
2702 }
2703 return rc;
2704}
2705
2706
2707/**
2708 * Dumps a 32-bit shadow page table.
2709 *
2710 * @returns VBox status code (VINF_SUCCESS).
2711 * @param pVM The VM handle.
2712 * @param HCPhys The physical address of the table.
2713 * @param cr4 The CR4, PSE is currently used.
2714 * @param cMaxDepth The maxium depth.
2715 * @param pHlp Pointer to the output functions.
2716 */
2717static int pgmR3DumpHierarchyHcPaePML4(PVM pVM, RTHCPHYS HCPhys, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
2718{
2719 PX86PML4 pPML4 = (PX86PML4)MMPagePhys2Page(pVM, HCPhys);
2720 if (!pPML4)
2721 {
2722 pHlp->pfnPrintf(pHlp, "Page map level 4 at HCPhys=%#VHp was not found in the page pool!\n", HCPhys);
2723 return VERR_INVALID_PARAMETER;
2724 }
2725
2726 int rc = VINF_SUCCESS;
2727 for (unsigned i = 0; i < ELEMENTS(pPML4->a); i++)
2728 {
2729 X86PML4E Pml4e = pPML4->a[i];
2730 if (Pml4e.n.u1Present)
2731 {
2732 uint64_t u64Address = ((uint64_t)i << X86_PML4_SHIFT) | (((uint64_t)i >> (X86_PML4_SHIFT - X86_PDPTR_SHIFT - 1)) * 0xffff000000000000ULL);
2733 pHlp->pfnPrintf(pHlp, /*P R S A D G WT CD AT NX 4M a p ? */
2734 "%016llx 0 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx\n",
2735 u64Address,
2736 Pml4e.n.u1Write ? 'W' : 'R',
2737 Pml4e.n.u1User ? 'U' : 'S',
2738 Pml4e.n.u1Accessed ? 'A' : '-',
2739 Pml4e.n.u3Reserved & 1? '?' : '.', /* ignored */
2740 Pml4e.n.u3Reserved & 4? '!' : '.', /* mbz */
2741 Pml4e.n.u1WriteThru ? "WT" : "--",
2742 Pml4e.n.u1CacheDisable? "CD" : "--",
2743 Pml4e.n.u3Reserved & 2? "!" : "..",/* mbz */
2744 Pml4e.n.u1NoExecute ? "NX" : "--",
2745 Pml4e.u & BIT(9) ? '1' : '0',
2746 Pml4e.u & PGM_PLXFLAGS_PERMANENT ? 'p' : '-',
2747 Pml4e.u & BIT(11) ? '1' : '0',
2748 Pml4e.u & X86_PML4E_PG_MASK);
2749
2750 if (cMaxDepth >= 1)
2751 {
2752 int rc2 = pgmR3DumpHierarchyHCPaePDPTR(pVM, Pml4e.u & X86_PML4E_PG_MASK, u64Address, cr4, true, cMaxDepth - 1, pHlp);
2753 if (rc2 < rc && VBOX_SUCCESS(rc))
2754 rc = rc2;
2755 }
2756 }
2757 }
2758 return rc;
2759}
2760
2761
2762/**
2763 * Dumps a 32-bit shadow page table.
2764 *
2765 * @returns VBox status code (VINF_SUCCESS).
2766 * @param pVM The VM handle.
2767 * @param pPT Pointer to the page table.
2768 * @param u32Address The virtual address this table starts at.
2769 * @param pHlp Pointer to the output functions.
2770 */
2771int pgmR3DumpHierarchyHC32BitPT(PVM pVM, PX86PT pPT, uint32_t u32Address, PCDBGFINFOHLP pHlp)
2772{
2773 for (unsigned i = 0; i < ELEMENTS(pPT->a); i++)
2774 {
2775 X86PTE Pte = pPT->a[i];
2776 if (Pte.n.u1Present)
2777 {
2778 pHlp->pfnPrintf(pHlp, /*P R S A D G WT CD AT NX 4M a m d */
2779 "%08x 1 | P %c %c %c %c %c %s %s %s .. 4K %c%c%c %08x\n",
2780 u32Address + (i << X86_PT_SHIFT),
2781 Pte.n.u1Write ? 'W' : 'R',
2782 Pte.n.u1User ? 'U' : 'S',
2783 Pte.n.u1Accessed ? 'A' : '-',
2784 Pte.n.u1Dirty ? 'D' : '-',
2785 Pte.n.u1Global ? 'G' : '-',
2786 Pte.n.u1WriteThru ? "WT" : "--",
2787 Pte.n.u1CacheDisable? "CD" : "--",
2788 Pte.n.u1PAT ? "AT" : "--",
2789 Pte.u & PGM_PTFLAGS_TRACK_DIRTY ? 'd' : '-',
2790 Pte.u & BIT(10) ? '1' : '0',
2791 Pte.u & PGM_PTFLAGS_CSAM_VALIDATED ? 'v' : '-',
2792 Pte.u & X86_PDE_PG_MASK);
2793 }
2794 }
2795 return VINF_SUCCESS;
2796}
2797
2798
2799/**
2800 * Dumps a 32-bit shadow page directory and page tables.
2801 *
2802 * @returns VBox status code (VINF_SUCCESS).
2803 * @param pVM The VM handle.
2804 * @param cr3 The root of the hierarchy.
2805 * @param cr4 The CR4, PSE is currently used.
2806 * @param cMaxDepth How deep into the hierarchy the dumper should go.
2807 * @param pHlp Pointer to the output functions.
2808 */
2809int pgmR3DumpHierarchyHC32BitPD(PVM pVM, uint32_t cr3, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
2810{
2811 PX86PD pPD = (PX86PD)MMPagePhys2Page(pVM, cr3 & X86_CR3_PAGE_MASK);
2812 if (!pPD)
2813 {
2814 pHlp->pfnPrintf(pHlp, "Page directory at %#x was not found in the page pool!\n", cr3 & X86_CR3_PAGE_MASK);
2815 return VERR_INVALID_PARAMETER;
2816 }
2817
2818 int rc = VINF_SUCCESS;
2819 for (unsigned i = 0; i < ELEMENTS(pPD->a); i++)
2820 {
2821 X86PDE Pde = pPD->a[i];
2822 if (Pde.n.u1Present)
2823 {
2824 const uint32_t u32Address = i << X86_PD_SHIFT;
2825 if ((cr4 & X86_CR4_PSE) && Pde.b.u1Size)
2826 pHlp->pfnPrintf(pHlp, /*P R S A D G WT CD AT NX 4M a m d */
2827 "%08x 0 | P %c %c %c %c %c %s %s %s .. 4M %c%c%c %08x\n",
2828 u32Address,
2829 Pde.b.u1Write ? 'W' : 'R',
2830 Pde.b.u1User ? 'U' : 'S',
2831 Pde.b.u1Accessed ? 'A' : '-',
2832 Pde.b.u1Dirty ? 'D' : '-',
2833 Pde.b.u1Global ? 'G' : '-',
2834 Pde.b.u1WriteThru ? "WT" : "--",
2835 Pde.b.u1CacheDisable? "CD" : "--",
2836 Pde.b.u1PAT ? "AT" : "--",
2837 Pde.u & BIT64(9) ? '1' : '0',
2838 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-',
2839 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
2840 Pde.u & X86_PDE4M_PG_MASK);
2841 else
2842 {
2843 pHlp->pfnPrintf(pHlp, /*P R S A D G WT CD AT NX 4M a m d */
2844 "%08x 0 | P %c %c %c %c %c %s %s .. .. 4K %c%c%c %08x\n",
2845 u32Address,
2846 Pde.n.u1Write ? 'W' : 'R',
2847 Pde.n.u1User ? 'U' : 'S',
2848 Pde.n.u1Accessed ? 'A' : '-',
2849 Pde.n.u1Reserved0 ? '?' : '.', /* ignored */
2850 Pde.n.u1Reserved1 ? '?' : '.', /* ignored */
2851 Pde.n.u1WriteThru ? "WT" : "--",
2852 Pde.n.u1CacheDisable? "CD" : "--",
2853 Pde.u & BIT64(9) ? '1' : '0',
2854 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-',
2855 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
2856 Pde.u & X86_PDE_PG_MASK);
2857 if (cMaxDepth >= 1)
2858 {
2859 /** @todo what about using the page pool for mapping PTs? */
2860 RTHCPHYS HCPhys = Pde.u & X86_PDE_PG_MASK;
2861 PX86PT pPT = NULL;
2862 if (!(Pde.u & PGM_PDFLAGS_MAPPING))
2863 pPT = (PX86PT)MMPagePhys2Page(pVM, HCPhys);
2864 else
2865 {
2866 for (PPGMMAPPING pMap = pVM->pgm.s.pMappingsR3; pMap; pMap = pMap->pNextR3)
2867 if (u32Address - pMap->GCPtr < pMap->cb)
2868 {
2869 int iPDE = (u32Address - pMap->GCPtr) >> X86_PD_SHIFT;
2870 if (pMap->aPTs[iPDE].HCPhysPT != HCPhys)
2871 pHlp->pfnPrintf(pHlp, "%08x error! Mapping error! PT %d has HCPhysPT=%VHp not %VHp is in the PD.\n",
2872 u32Address, iPDE, pMap->aPTs[iPDE].HCPhysPT, HCPhys);
2873 pPT = pMap->aPTs[iPDE].pPTR3;
2874 }
2875 }
2876 int rc2 = VERR_INVALID_PARAMETER;
2877 if (pPT)
2878 rc2 = pgmR3DumpHierarchyHC32BitPT(pVM, pPT, u32Address, pHlp);
2879 else
2880 pHlp->pfnPrintf(pHlp, "%08x error! Page table at %#x was not found in the page pool!\n", u32Address, HCPhys);
2881 if (rc2 < rc && VBOX_SUCCESS(rc))
2882 rc = rc2;
2883 }
2884 }
2885 }
2886 }
2887
2888 return rc;
2889}
2890
2891
2892/**
2893 * Dumps a 32-bit shadow page table.
2894 *
2895 * @returns VBox status code (VINF_SUCCESS).
2896 * @param pVM The VM handle.
2897 * @param pPT Pointer to the page table.
2898 * @param u32Address The virtual address this table starts at.
2899 * @param PhysSearch Address to search for.
2900 */
2901int pgmR3DumpHierarchyGC32BitPT(PVM pVM, PX86PT pPT, uint32_t u32Address, RTGCPHYS PhysSearch)
2902{
2903 for (unsigned i = 0; i < ELEMENTS(pPT->a); i++)
2904 {
2905 X86PTE Pte = pPT->a[i];
2906 if (Pte.n.u1Present)
2907 {
2908 Log(( /*P R S A D G WT CD AT NX 4M a m d */
2909 "%08x 1 | P %c %c %c %c %c %s %s %s .. 4K %c%c%c %08x\n",
2910 u32Address + (i << X86_PT_SHIFT),
2911 Pte.n.u1Write ? 'W' : 'R',
2912 Pte.n.u1User ? 'U' : 'S',
2913 Pte.n.u1Accessed ? 'A' : '-',
2914 Pte.n.u1Dirty ? 'D' : '-',
2915 Pte.n.u1Global ? 'G' : '-',
2916 Pte.n.u1WriteThru ? "WT" : "--",
2917 Pte.n.u1CacheDisable? "CD" : "--",
2918 Pte.n.u1PAT ? "AT" : "--",
2919 Pte.u & PGM_PTFLAGS_TRACK_DIRTY ? 'd' : '-',
2920 Pte.u & BIT(10) ? '1' : '0',
2921 Pte.u & PGM_PTFLAGS_CSAM_VALIDATED ? 'v' : '-',
2922 Pte.u & X86_PDE_PG_MASK));
2923
2924 if ((Pte.u & X86_PDE_PG_MASK) == PhysSearch)
2925 {
2926 uint64_t fPageShw = 0;
2927 RTHCPHYS pPhysHC = 0;
2928
2929 PGMShwGetPage(pVM, (RTGCPTR)(u32Address + (i << X86_PT_SHIFT)), &fPageShw, &pPhysHC);
2930 Log(("Found %VGp at %VGv -> flags=%llx\n", PhysSearch, (RTGCPTR)(u32Address + (i << X86_PT_SHIFT)), fPageShw));
2931 }
2932 }
2933 }
2934 return VINF_SUCCESS;
2935}
2936
2937
2938/**
2939 * Dumps a 32-bit guest page directory and page tables.
2940 *
2941 * @returns VBox status code (VINF_SUCCESS).
2942 * @param pVM The VM handle.
2943 * @param cr3 The root of the hierarchy.
2944 * @param cr4 The CR4, PSE is currently used.
2945 * @param PhysSearch Address to search for.
2946 */
2947PGMR3DECL(int) PGMR3DumpHierarchyGC(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCPHYS PhysSearch)
2948{
2949 bool fLongMode = false;
2950 const unsigned cch = fLongMode ? 16 : 8; NOREF(cch);
2951 PX86PD pPD = 0;
2952
2953 int rc = PGM_GCPHYS_2_PTR(pVM, cr3 & X86_CR3_PAGE_MASK, &pPD);
2954 if (VBOX_FAILURE(rc) || !pPD)
2955 {
2956 Log(("Page directory at %#x was not found in the page pool!\n", cr3 & X86_CR3_PAGE_MASK));
2957 return VERR_INVALID_PARAMETER;
2958 }
2959
2960 Log(("cr3=%08x cr4=%08x%s\n"
2961 "%-*s P - Present\n"
2962 "%-*s | R/W - Read (0) / Write (1)\n"
2963 "%-*s | | U/S - User (1) / Supervisor (0)\n"
2964 "%-*s | | | A - Accessed\n"
2965 "%-*s | | | | D - Dirty\n"
2966 "%-*s | | | | | G - Global\n"
2967 "%-*s | | | | | | WT - Write thru\n"
2968 "%-*s | | | | | | | CD - Cache disable\n"
2969 "%-*s | | | | | | | | AT - Attribute table (PAT)\n"
2970 "%-*s | | | | | | | | | NX - No execute (K8)\n"
2971 "%-*s | | | | | | | | | | 4K/4M/2M - Page size.\n"
2972 "%-*s | | | | | | | | | | | AVL - a=allocated; m=mapping; d=track dirty;\n"
2973 "%-*s | | | | | | | | | | | | p=permanent; v=validated;\n"
2974 "%-*s Level | | | | | | | | | | | | Page\n"
2975 /* xxxx n **** P R S A D G WT CD AT NX 4M AVL xxxxxxxxxxxxx
2976 - W U - - - -- -- -- -- -- 010 */
2977 , cr3, cr4, fLongMode ? " Long Mode" : "",
2978 cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "",
2979 cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "Address"));
2980
2981 for (unsigned i = 0; i < ELEMENTS(pPD->a); i++)
2982 {
2983 X86PDE Pde = pPD->a[i];
2984 if (Pde.n.u1Present)
2985 {
2986 const uint32_t u32Address = i << X86_PD_SHIFT;
2987
2988 if ((cr4 & X86_CR4_PSE) && Pde.b.u1Size)
2989 Log(( /*P R S A D G WT CD AT NX 4M a m d */
2990 "%08x 0 | P %c %c %c %c %c %s %s %s .. 4M %c%c%c %08x\n",
2991 u32Address,
2992 Pde.b.u1Write ? 'W' : 'R',
2993 Pde.b.u1User ? 'U' : 'S',
2994 Pde.b.u1Accessed ? 'A' : '-',
2995 Pde.b.u1Dirty ? 'D' : '-',
2996 Pde.b.u1Global ? 'G' : '-',
2997 Pde.b.u1WriteThru ? "WT" : "--",
2998 Pde.b.u1CacheDisable? "CD" : "--",
2999 Pde.b.u1PAT ? "AT" : "--",
3000 Pde.u & BIT(9) ? '1' : '0',
3001 Pde.u & BIT(10) ? '1' : '0',
3002 Pde.u & BIT(11) ? '1' : '0',
3003 Pde.u & X86_PDE4M_PG_MASK));
3004 /** @todo PhysSearch */
3005 else
3006 {
3007 Log(( /*P R S A D G WT CD AT NX 4M a m d */
3008 "%08x 0 | P %c %c %c %c %c %s %s .. .. 4K %c%c%c %08x\n",
3009 u32Address,
3010 Pde.n.u1Write ? 'W' : 'R',
3011 Pde.n.u1User ? 'U' : 'S',
3012 Pde.n.u1Accessed ? 'A' : '-',
3013 Pde.n.u1Reserved0 ? '?' : '.', /* ignored */
3014 Pde.n.u1Reserved1 ? '?' : '.', /* ignored */
3015 Pde.n.u1WriteThru ? "WT" : "--",
3016 Pde.n.u1CacheDisable? "CD" : "--",
3017 Pde.u & BIT(9) ? '1' : '0',
3018 Pde.u & BIT(10) ? '1' : '0',
3019 Pde.u & BIT(11) ? '1' : '0',
3020 Pde.u & X86_PDE_PG_MASK));
3021 ////if (cMaxDepth >= 1)
3022 {
3023 /** @todo what about using the page pool for mapping PTs? */
3024 RTGCPHYS GCPhys = Pde.u & X86_PDE_PG_MASK;
3025 PX86PT pPT = NULL;
3026
3027 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pPT);
3028
3029 int rc2 = VERR_INVALID_PARAMETER;
3030 if (pPT)
3031 rc2 = pgmR3DumpHierarchyGC32BitPT(pVM, pPT, u32Address, PhysSearch);
3032 else
3033 Log(("%08x error! Page table at %#x was not found in the page pool!\n", u32Address, GCPhys));
3034 if (rc2 < rc && VBOX_SUCCESS(rc))
3035 rc = rc2;
3036 }
3037 }
3038 }
3039 }
3040
3041 return rc;
3042}
3043
3044
3045/**
3046 * Dumps a page table hierarchy use only physical addresses and cr4/lm flags.
3047 *
3048 * @returns VBox status code (VINF_SUCCESS).
3049 * @param pVM The VM handle.
3050 * @param cr3 The root of the hierarchy.
3051 * @param cr4 The cr4, only PAE and PSE is currently used.
3052 * @param fLongMode Set if long mode, false if not long mode.
3053 * @param cMaxDepth Number of levels to dump.
3054 * @param pHlp Pointer to the output functions.
3055 */
3056PGMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint32_t cr3, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
3057{
3058 if (!pHlp)
3059 pHlp = DBGFR3InfoLogHlp();
3060 if (!cMaxDepth)
3061 return VINF_SUCCESS;
3062 const unsigned cch = fLongMode ? 16 : 8;
3063 pHlp->pfnPrintf(pHlp,
3064 "cr3=%08x cr4=%08x%s\n"
3065 "%-*s P - Present\n"
3066 "%-*s | R/W - Read (0) / Write (1)\n"
3067 "%-*s | | U/S - User (1) / Supervisor (0)\n"
3068 "%-*s | | | A - Accessed\n"
3069 "%-*s | | | | D - Dirty\n"
3070 "%-*s | | | | | G - Global\n"
3071 "%-*s | | | | | | WT - Write thru\n"
3072 "%-*s | | | | | | | CD - Cache disable\n"
3073 "%-*s | | | | | | | | AT - Attribute table (PAT)\n"
3074 "%-*s | | | | | | | | | NX - No execute (K8)\n"
3075 "%-*s | | | | | | | | | | 4K/4M/2M - Page size.\n"
3076 "%-*s | | | | | | | | | | | AVL - a=allocated; m=mapping; d=track dirty;\n"
3077 "%-*s | | | | | | | | | | | | p=permanent; v=validated;\n"
3078 "%-*s Level | | | | | | | | | | | | Page\n"
3079 /* xxxx n **** P R S A D G WT CD AT NX 4M AVL xxxxxxxxxxxxx
3080 - W U - - - -- -- -- -- -- 010 */
3081 , cr3, cr4, fLongMode ? " Long Mode" : "",
3082 cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "",
3083 cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "Address");
3084 if (cr4 & X86_CR4_PAE)
3085 {
3086 if (fLongMode)
3087 return pgmR3DumpHierarchyHcPaePML4(pVM, cr3 & X86_CR3_PAGE_MASK, cr4, cMaxDepth, pHlp);
3088 return pgmR3DumpHierarchyHCPaePDPTR(pVM, cr3 & X86_CR3_PAE_PAGE_MASK, 0, cr4, false, cMaxDepth, pHlp);
3089 }
3090 return pgmR3DumpHierarchyHC32BitPD(pVM, cr3 & X86_CR3_PAGE_MASK, cr4, cMaxDepth, pHlp);
3091}
3092
3093
3094
3095#ifdef VBOX_WITH_DEBUGGER
3096/**
3097 * The '.pgmram' command.
3098 *
3099 * @returns VBox status.
3100 * @param pCmd Pointer to the command descriptor (as registered).
3101 * @param pCmdHlp Pointer to command helper functions.
3102 * @param pVM Pointer to the current VM (if any).
3103 * @param paArgs Pointer to (readonly) array of arguments.
3104 * @param cArgs Number of arguments in the array.
3105 */
3106static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3107{
3108 /*
3109 * Validate input.
3110 */
3111 if (!pVM)
3112 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
3113 if (!pVM->pgm.s.pRamRangesGC)
3114 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Sorry, no Ram is registered.\n");
3115
3116 /*
3117 * Dump the ranges.
3118 */
3119 int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "From - To (incl) pvHC\n");
3120 PPGMRAMRANGE pRam;
3121 for (pRam = pVM->pgm.s.pRamRangesHC; pRam; pRam = pRam->pNextHC)
3122 {
3123 rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL,
3124 "%VGp - %VGp %p\n",
3125 pRam->GCPhys, pRam->GCPhysLast, pRam->pvHC);
3126 if (VBOX_FAILURE(rc))
3127 return rc;
3128 }
3129
3130 return VINF_SUCCESS;
3131}
3132
3133
3134/**
3135 * The '.pgmmap' command.
3136 *
3137 * @returns VBox status.
3138 * @param pCmd Pointer to the command descriptor (as registered).
3139 * @param pCmdHlp Pointer to command helper functions.
3140 * @param pVM Pointer to the current VM (if any).
3141 * @param paArgs Pointer to (readonly) array of arguments.
3142 * @param cArgs Number of arguments in the array.
3143 */
3144static DECLCALLBACK(int) pgmR3CmdMap(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3145{
3146 /*
3147 * Validate input.
3148 */
3149 if (!pVM)
3150 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
3151 if (!pVM->pgm.s.pMappingsR3)
3152 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Sorry, no mappings are registered.\n");
3153
3154 /*
3155 * Print message about the fixedness of the mappings.
3156 */
3157 int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, pVM->pgm.s.fMappingsFixed ? "The mappings are FIXED.\n" : "The mappings are FLOATING.\n");
3158 if (VBOX_FAILURE(rc))
3159 return rc;
3160
3161 /*
3162 * Dump the ranges.
3163 */
3164 PPGMMAPPING pCur;
3165 for (pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
3166 {
3167 rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL,
3168 "%08x - %08x %s\n",
3169 pCur->GCPtr, pCur->GCPtrLast, pCur->pszDesc);
3170 if (VBOX_FAILURE(rc))
3171 return rc;
3172 }
3173
3174 return VINF_SUCCESS;
3175}
3176
3177
3178/**
3179 * The '.pgmsync' command.
3180 *
3181 * @returns VBox status.
3182 * @param pCmd Pointer to the command descriptor (as registered).
3183 * @param pCmdHlp Pointer to command helper functions.
3184 * @param pVM Pointer to the current VM (if any).
3185 * @param paArgs Pointer to (readonly) array of arguments.
3186 * @param cArgs Number of arguments in the array.
3187 */
3188static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3189{
3190 /*
3191 * Validate input.
3192 */
3193 if (!pVM)
3194 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
3195
3196 /*
3197 * Force page directory sync.
3198 */
3199 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
3200
3201 int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Forcing page directory sync.\n");
3202 if (VBOX_FAILURE(rc))
3203 return rc;
3204
3205 return VINF_SUCCESS;
3206}
3207
3208
3209/**
3210 * The '.pgmsyncalways' command.
3211 *
3212 * @returns VBox status.
3213 * @param pCmd Pointer to the command descriptor (as registered).
3214 * @param pCmdHlp Pointer to command helper functions.
3215 * @param pVM Pointer to the current VM (if any).
3216 * @param paArgs Pointer to (readonly) array of arguments.
3217 * @param cArgs Number of arguments in the array.
3218 */
3219static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3220{
3221 /*
3222 * Validate input.
3223 */
3224 if (!pVM)
3225 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
3226
3227 /*
3228 * Force page directory sync.
3229 */
3230 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS)
3231 {
3232 ASMAtomicAndU32(&pVM->pgm.s.fSyncFlags, ~PGM_SYNC_ALWAYS);
3233 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Disabled permanent forced page directory syncing.\n");
3234 }
3235 else
3236 {
3237 ASMAtomicOrU32(&pVM->pgm.s.fSyncFlags, PGM_SYNC_ALWAYS);
3238 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
3239 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Enabled permanent forced page directory syncing.\n");
3240 }
3241}
3242
3243#endif
3244
3245/**
3246 * pvUser argument of the pgmR3CheckIntegrity*Node callbacks.
3247 */
3248typedef struct PGMCHECKINTARGS
3249{
3250 bool fLeftToRight; /**< true: left-to-right; false: right-to-left. */
3251 PPGMPHYSHANDLER pPrevPhys;
3252 PPGMVIRTHANDLER pPrevVirt;
3253 PPGMPHYS2VIRTHANDLER pPrevPhys2Virt;
3254 PVM pVM;
3255} PGMCHECKINTARGS, *PPGMCHECKINTARGS;
3256
3257/**
3258 * Validate a node in the physical handler tree.
3259 *
3260 * @returns 0 on if ok, other wise 1.
3261 * @param pNode The handler node.
3262 * @param pvUser pVM.
3263 */
3264static DECLCALLBACK(int) pgmR3CheckIntegrityPhysHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
3265{
3266 PPGMCHECKINTARGS pArgs = (PPGMCHECKINTARGS)pvUser;
3267 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)pNode;
3268 AssertReleaseReturn(!((uintptr_t)pCur & 7), 1);
3269 AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %VGp-%VGp %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
3270 AssertReleaseMsg( !pArgs->pPrevPhys
3271 || (pArgs->fLeftToRight ? pArgs->pPrevPhys->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys->Core.KeyLast > pCur->Core.Key),
3272 ("pPrevPhys=%p %VGp-%VGp %s\n"
3273 " pCur=%p %VGp-%VGp %s\n",
3274 pArgs->pPrevPhys, pArgs->pPrevPhys->Core.Key, pArgs->pPrevPhys->Core.KeyLast, pArgs->pPrevPhys->pszDesc,
3275 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
3276 pArgs->pPrevPhys = pCur;
3277 return 0;
3278}
3279
3280
3281/**
3282 * Validate a node in the virtual handler tree.
3283 *
3284 * @returns 0 on if ok, other wise 1.
3285 * @param pNode The handler node.
3286 * @param pvUser pVM.
3287 */
3288static DECLCALLBACK(int) pgmR3CheckIntegrityVirtHandlerNode(PAVLROGCPTRNODECORE pNode, void *pvUser)
3289{
3290 PPGMCHECKINTARGS pArgs = (PPGMCHECKINTARGS)pvUser;
3291 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
3292 AssertReleaseReturn(!((uintptr_t)pCur & 7), 1);
3293 AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %VGv-%VGv %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
3294 AssertReleaseMsg( !pArgs->pPrevVirt
3295 || (pArgs->fLeftToRight ? pArgs->pPrevVirt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevVirt->Core.KeyLast > pCur->Core.Key),
3296 ("pPrevVirt=%p %VGv-%VGv %s\n"
3297 " pCur=%p %VGv-%VGv %s\n",
3298 pArgs->pPrevVirt, pArgs->pPrevVirt->Core.Key, pArgs->pPrevVirt->Core.KeyLast, pArgs->pPrevVirt->pszDesc,
3299 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
3300 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
3301 {
3302 AssertReleaseMsg(pCur->aPhysToVirt[iPage].offVirtHandler == -RT_OFFSETOF(PGMVIRTHANDLER, aPhysToVirt[iPage]),
3303 ("pCur=%p %VGv-%VGv %s\n"
3304 "iPage=%d offVirtHandle=%#x expected %#x\n",
3305 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc,
3306 iPage, pCur->aPhysToVirt[iPage].offVirtHandler, -RT_OFFSETOF(PGMVIRTHANDLER, aPhysToVirt[iPage])));
3307 }
3308 pArgs->pPrevVirt = pCur;
3309 return 0;
3310}
3311
3312
3313/**
3314 * Validate a node in the virtual handler tree.
3315 *
3316 * @returns 0 on if ok, other wise 1.
3317 * @param pNode The handler node.
3318 * @param pvUser pVM.
3319 */
3320static DECLCALLBACK(int) pgmR3CheckIntegrityPhysToVirtHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
3321{
3322 PPGMCHECKINTARGS pArgs = (PPGMCHECKINTARGS)pvUser;
3323 PPGMPHYS2VIRTHANDLER pCur = (PPGMPHYS2VIRTHANDLER)pNode;
3324 AssertReleaseMsgReturn(!((uintptr_t)pCur & 3), ("\n"), 1);
3325 AssertReleaseMsgReturn(!(pCur->offVirtHandler & 3), ("\n"), 1);
3326 AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %VGp-%VGp\n", pCur, pCur->Core.Key, pCur->Core.KeyLast));
3327 AssertReleaseMsg( !pArgs->pPrevPhys2Virt
3328 || (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
3329 ("pPrevPhys2Virt=%p %VGp-%VGp\n"
3330 " pCur=%p %VGp-%VGp\n",
3331 pArgs->pPrevPhys2Virt, pArgs->pPrevPhys2Virt->Core.Key, pArgs->pPrevPhys2Virt->Core.KeyLast,
3332 pCur, pCur->Core.Key, pCur->Core.KeyLast));
3333 AssertReleaseMsg( !pArgs->pPrevPhys2Virt
3334 || (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
3335 ("pPrevPhys2Virt=%p %VGp-%VGp\n"
3336 " pCur=%p %VGp-%VGp\n",
3337 pArgs->pPrevPhys2Virt, pArgs->pPrevPhys2Virt->Core.Key, pArgs->pPrevPhys2Virt->Core.KeyLast,
3338 pCur, pCur->Core.Key, pCur->Core.KeyLast));
3339 AssertReleaseMsg((pCur->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD),
3340 ("pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3341 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias));
3342 if (pCur->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
3343 {
3344 PPGMPHYS2VIRTHANDLER pCur2 = pCur;
3345 for (;;)
3346 {
3347 pCur2 = (PPGMPHYS2VIRTHANDLER)((intptr_t)pCur + (pCur->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
3348 AssertReleaseMsg(pCur2 != pCur,
3349 (" pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3350 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias));
3351 AssertReleaseMsg((pCur2->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == PGMPHYS2VIRTHANDLER_IN_TREE,
3352 (" pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
3353 "pCur2=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3354 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias,
3355 pCur2, pCur2->Core.Key, pCur2->Core.KeyLast, pCur2->offVirtHandler, pCur2->offNextAlias));
3356 AssertReleaseMsg((pCur2->Core.Key ^ pCur->Core.Key) < PAGE_SIZE,
3357 (" pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
3358 "pCur2=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3359 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias,
3360 pCur2, pCur2->Core.Key, pCur2->Core.KeyLast, pCur2->offVirtHandler, pCur2->offNextAlias));
3361 AssertReleaseMsg((pCur2->Core.KeyLast ^ pCur->Core.KeyLast) < PAGE_SIZE,
3362 (" pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
3363 "pCur2=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3364 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias,
3365 pCur2, pCur2->Core.Key, pCur2->Core.KeyLast, pCur2->offVirtHandler, pCur2->offNextAlias));
3366 if (!(pCur2->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
3367 break;
3368 }
3369 }
3370
3371 pArgs->pPrevPhys2Virt = pCur;
3372 return 0;
3373}
3374
3375
3376/**
3377 * Perform an integrity check on the PGM component.
3378 *
3379 * @returns VINF_SUCCESS if everything is fine.
3380 * @returns VBox error status after asserting on integrity breach.
3381 * @param pVM The VM handle.
3382 */
3383PDMR3DECL(int) PGMR3CheckIntegrity(PVM pVM)
3384{
3385 AssertReleaseReturn(pVM->pgm.s.offVM, VERR_INTERNAL_ERROR);
3386
3387 /*
3388 * Check the trees.
3389 */
3390 int cErrors = 0;
3391 PGMCHECKINTARGS Args = { true, NULL, NULL, NULL, pVM };
3392 cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysHandlers, true, pgmR3CheckIntegrityPhysHandlerNode, &Args);
3393 Args.fLeftToRight = false;
3394 cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysHandlers, false, pgmR3CheckIntegrityPhysHandlerNode, &Args);
3395 Args.fLeftToRight = true;
3396 cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesHC->VirtHandlers, true, pgmR3CheckIntegrityVirtHandlerNode, &Args);
3397 Args.fLeftToRight = false;
3398 cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesHC->VirtHandlers, false, pgmR3CheckIntegrityVirtHandlerNode, &Args);
3399 Args.fLeftToRight = true;
3400 cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysToVirtHandlers, true, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);
3401 Args.fLeftToRight = false;
3402 cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysToVirtHandlers, false, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);
3403
3404 return !cErrors ? VINF_SUCCESS : VERR_INTERNAL_ERROR;
3405}
3406
3407
3408/**
3409 * Inform PGM if we want all mappings to be put into the shadow page table. (necessary for e.g. VMX)
3410 *
3411 * @returns VBox status code.
3412 * @param pVM VM handle.
3413 * @param fEnable Enable or disable shadow mappings
3414 */
3415PGMR3DECL(int) PGMR3ChangeShwPDMappings(PVM pVM, bool fEnable)
3416{
3417 pVM->pgm.s.fDisableMappings = !fEnable;
3418
3419 size_t cb;
3420 int rc = PGMR3MappingsSize(pVM, &cb);
3421 AssertRCReturn(rc, rc);
3422
3423 /* Pretend the mappings are now fixed; to force a refresh of the reserved PDEs. */
3424 rc = PGMR3MappingsFix(pVM, MM_HYPER_AREA_ADDRESS, cb);
3425 AssertRCReturn(rc, rc);
3426
3427 return VINF_SUCCESS;
3428}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette