VirtualBox

source: vbox/trunk/src/VBox/VMM/PGM.cpp@ 4050

Last change on this file since 4050 was 4013, checked in by vboxsync, 17 years ago

pdm.h = include pdm*.h; pdmapi.h = only the 'core' pdm APIs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 158.0 KB
Line 
1/* $Id: PGM.cpp 4013 2007-08-03 00:11:38Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor. (Mixing stuff here, not good?)
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/** @page pg_pgm PGM - The Page Manager and Monitor
24 *
25 *
26 *
27 * @section sec_pg_modes Paging Modes
28 *
29 * There are three memory contexts: Host Context (HC), Guest Context (GC)
30 * and intermediate context. When talking about paging HC can also be refered to
31 * as "host paging", and GC refered to as "shadow paging".
32 *
33 * We define three basic paging modes: 32-bit, PAE and AMD64. The host paging mode
34 * is defined by the host operating system. The mode used in the shadow paging mode
35 * depends on the host paging mode and what the mode the guest is currently in. The
36 * following relation between the two is defined:
37 *
38 * @verbatim
39 Host > 32-bit | PAE | AMD64 |
40 Guest | | | |
41 ==v================================
42 32-bit 32-bit PAE PAE
43 -------|--------|--------|--------|
44 PAE PAE PAE PAE
45 -------|--------|--------|--------|
46 AMD64 AMD64 AMD64 AMD64
47 -------|--------|--------|--------| @endverbatim
48 *
49 * All configuration except those in the diagonal (upper left) are expected to
50 * require special effort from the switcher (i.e. a bit slower).
51 *
52 *
53 *
54 *
55 * @section sec_pg_shw The Shadow Memory Context
56 *
57 *
58 * [..]
59 *
60 * Because of guest context mappings requires PDPTR and PML4 entries to allow
61 * writing on AMD64, the two upper levels will have fixed flags whatever the
62 * guest is thinking of using there. So, when shadowing the PD level we will
63 * calculate the effective flags of PD and all the higher levels. In legacy
64 * PAE mode this only applies to the PWT and PCD bits (the rest are
65 * ignored/reserved/MBZ). We will ignore those bits for the present.
66 *
67 *
68 *
69 * @section sec_pg_int The Intermediate Memory Context
70 *
71 * The world switch goes thru an intermediate memory context which purpose it is
72 * to provide different mappings of the switcher code. All guest mappings are also
73 * present in this context.
74 *
75 * The switcher code is mapped at the same location as on the host, at an
76 * identity mapped location (physical equals virtual address), and at the
77 * hypervisor location.
78 *
79 * PGM maintain page tables for 32-bit, PAE and AMD64 paging modes. This
80 * simplifies switching guest CPU mode and consistency at the cost of more
81 * code to do the work. All memory use for those page tables is located below
82 * 4GB (this includes page tables for guest context mappings).
83 *
84 *
85 * @subsection subsec_pg_int_gc Guest Context Mappings
86 *
87 * During assignment and relocation of a guest context mapping the intermediate
88 * memory context is used to verify the new location.
89 *
90 * Guest context mappings are currently restricted to below 4GB, for reasons
91 * of simplicity. This may change when we implement AMD64 support.
92 *
93 *
94 *
95 *
96 * @section sec_pg_misc Misc
97 *
98 * @subsection subsec_pg_misc_diff Differences Between Legacy PAE and Long Mode PAE
99 *
100 * The differences between legacy PAE and long mode PAE are:
101 * -# PDPE bits 1, 2, 5 and 6 are defined differently. In leagcy mode they are
102 * all marked down as must-be-zero, while in long mode 1, 2 and 5 have the
103 * usual meanings while 6 is ignored (AMD). This means that upon switching to
104 * legacy PAE mode we'll have to clear these bits and when going to long mode
105 * they must be set. This applies to both intermediate and shadow contexts,
106 * however we don't need to do it for the intermediate one since we're
107 * executing with CR0.WP at that time.
108 * -# CR3 allows a 32-byte aligned address in legacy mode, while in long mode
109 * a page aligned one is required.
110 */
111
112
113
114/** Saved state data unit version. */
115#define PGM_SAVED_STATE_VERSION 5
116
117/*******************************************************************************
118* Header Files *
119*******************************************************************************/
120#define LOG_GROUP LOG_GROUP_PGM
121#include <VBox/dbgf.h>
122#include <VBox/pgm.h>
123#include <VBox/cpum.h>
124#include <VBox/iom.h>
125#include <VBox/sup.h>
126#include <VBox/mm.h>
127#include <VBox/em.h>
128#include <VBox/stam.h>
129#include <VBox/rem.h>
130#include <VBox/dbgf.h>
131#include <VBox/rem.h>
132#include <VBox/selm.h>
133#include <VBox/ssm.h>
134#include "PGMInternal.h"
135#include <VBox/vm.h>
136#include <VBox/dbg.h>
137#include <VBox/hwaccm.h>
138
139#include <iprt/assert.h>
140#include <iprt/alloc.h>
141#include <iprt/asm.h>
142#include <iprt/thread.h>
143#include <iprt/string.h>
144#include <VBox/param.h>
145#include <VBox/err.h>
146
147
148
149/*******************************************************************************
150* Internal Functions *
151*******************************************************************************/
152static int pgmR3InitPaging(PVM pVM);
153static DECLCALLBACK(void) pgmR3PhysInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
154static DECLCALLBACK(void) pgmR3InfoMode(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
155static DECLCALLBACK(void) pgmR3InfoCr3(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
156static DECLCALLBACK(int) pgmR3RelocatePhysHandler(PAVLROGCPHYSNODECORE pNode, void *pvUser);
157static DECLCALLBACK(int) pgmR3RelocateVirtHandler(PAVLROGCPTRNODECORE pNode, void *pvUser);
158#ifdef VBOX_STRICT
159static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser);
160#endif
161static DECLCALLBACK(int) pgmR3Save(PVM pVM, PSSMHANDLE pSSM);
162static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
163static int pgmR3ModeDataInit(PVM pVM, bool fResolveGCAndR0);
164static void pgmR3ModeDataSwitch(PVM pVM, PGMMODE enmShw, PGMMODE enmGst);
165static PGMMODE pgmR3CalcShadowMode(PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher);
166
167#ifdef VBOX_WITH_STATISTICS
168static void pgmR3InitStats(PVM pVM);
169#endif
170
171#ifdef VBOX_WITH_DEBUGGER
172/** @todo all but the two last commands must be converted to 'info'. */
173static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
174static DECLCALLBACK(int) pgmR3CmdMap(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
175static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
176static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
177#endif
178
179
180/*******************************************************************************
181* Global Variables *
182*******************************************************************************/
183#ifdef VBOX_WITH_DEBUGGER
184/** Command descriptors. */
185static const DBGCCMD g_aCmds[] =
186{
187 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
188 { "pgmram", 0, 0, NULL, 0, NULL, 0, pgmR3CmdRam, "", "Display the ram ranges." },
189 { "pgmmap", 0, 0, NULL, 0, NULL, 0, pgmR3CmdMap, "", "Display the mapping ranges." },
190 { "pgmsync", 0, 0, NULL, 0, NULL, 0, pgmR3CmdSync, "", "Sync the CR3 page." },
191 { "pgmsyncalways", 0, 0, NULL, 0, NULL, 0, pgmR3CmdSyncAlways, "", "Toggle permanent CR3 syncing." },
192};
193#endif
194
195
196
197
198#if 1/// @todo ndef RT_ARCH_AMD64
199/*
200 * Shadow - 32-bit mode
201 */
202#define PGM_SHW_TYPE PGM_TYPE_32BIT
203#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
204#define PGM_SHW_NAME_GC_STR(name) PGM_SHW_NAME_GC_32BIT_STR(name)
205#define PGM_SHW_NAME_R0_STR(name) PGM_SHW_NAME_R0_32BIT_STR(name)
206#include "PGMShw.h"
207
208/* Guest - real mode */
209#define PGM_GST_TYPE PGM_TYPE_REAL
210#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
211#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_REAL_STR(name)
212#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_REAL_STR(name)
213#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
214#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_32BIT_REAL_STR(name)
215#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_32BIT_REAL_STR(name)
216#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
217#include "PGMGst.h"
218#include "PGMBth.h"
219#undef BTH_PGMPOOLKIND_PT_FOR_PT
220#undef PGM_BTH_NAME
221#undef PGM_BTH_NAME_GC_STR
222#undef PGM_BTH_NAME_R0_STR
223#undef PGM_GST_TYPE
224#undef PGM_GST_NAME
225#undef PGM_GST_NAME_GC_STR
226#undef PGM_GST_NAME_R0_STR
227
228/* Guest - protected mode */
229#define PGM_GST_TYPE PGM_TYPE_PROT
230#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
231#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_PROT_STR(name)
232#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PROT_STR(name)
233#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
234#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_32BIT_PROT_STR(name)
235#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_32BIT_PROT_STR(name)
236#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
237#include "PGMGst.h"
238#include "PGMBth.h"
239#undef BTH_PGMPOOLKIND_PT_FOR_PT
240#undef PGM_BTH_NAME
241#undef PGM_BTH_NAME_GC_STR
242#undef PGM_BTH_NAME_R0_STR
243#undef PGM_GST_TYPE
244#undef PGM_GST_NAME
245#undef PGM_GST_NAME_GC_STR
246#undef PGM_GST_NAME_R0_STR
247
248/* Guest - 32-bit mode */
249#define PGM_GST_TYPE PGM_TYPE_32BIT
250#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
251#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_32BIT_STR(name)
252#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_32BIT_STR(name)
253#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
254#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_32BIT_32BIT_STR(name)
255#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_32BIT_32BIT_STR(name)
256#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
257#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
258#include "PGMGst.h"
259#include "PGMBth.h"
260#undef BTH_PGMPOOLKIND_PT_FOR_BIG
261#undef BTH_PGMPOOLKIND_PT_FOR_PT
262#undef PGM_BTH_NAME
263#undef PGM_BTH_NAME_GC_STR
264#undef PGM_BTH_NAME_R0_STR
265#undef PGM_GST_TYPE
266#undef PGM_GST_NAME
267#undef PGM_GST_NAME_GC_STR
268#undef PGM_GST_NAME_R0_STR
269
270#undef PGM_SHW_TYPE
271#undef PGM_SHW_NAME
272#undef PGM_SHW_NAME_GC_STR
273#undef PGM_SHW_NAME_R0_STR
274#endif /* !RT_ARCH_AMD64 */
275
276
277/*
278 * Shadow - PAE mode
279 */
280#define PGM_SHW_TYPE PGM_TYPE_PAE
281#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
282#define PGM_SHW_NAME_GC_STR(name) PGM_SHW_NAME_GC_PAE_STR(name)
283#define PGM_SHW_NAME_R0_STR(name) PGM_SHW_NAME_R0_PAE_STR(name)
284#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
285#include "PGMShw.h"
286
287/* Guest - real mode */
288#define PGM_GST_TYPE PGM_TYPE_REAL
289#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
290#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_REAL_STR(name)
291#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_REAL_STR(name)
292#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
293#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_PAE_REAL_STR(name)
294#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_PAE_REAL_STR(name)
295#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
296#include "PGMBth.h"
297#undef BTH_PGMPOOLKIND_PT_FOR_PT
298#undef PGM_BTH_NAME
299#undef PGM_BTH_NAME_GC_STR
300#undef PGM_BTH_NAME_R0_STR
301#undef PGM_GST_TYPE
302#undef PGM_GST_NAME
303#undef PGM_GST_NAME_GC_STR
304#undef PGM_GST_NAME_R0_STR
305
306/* Guest - protected mode */
307#define PGM_GST_TYPE PGM_TYPE_PROT
308#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
309#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_PROT_STR(name)
310#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PROT_STR(name)
311#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
312#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_PAE_PROT_STR(name)
313#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_PAE_PROT_STR(name)
314#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
315#include "PGMBth.h"
316#undef BTH_PGMPOOLKIND_PT_FOR_PT
317#undef PGM_BTH_NAME
318#undef PGM_BTH_NAME_GC_STR
319#undef PGM_BTH_NAME_R0_STR
320#undef PGM_GST_TYPE
321#undef PGM_GST_NAME
322#undef PGM_GST_NAME_GC_STR
323#undef PGM_GST_NAME_R0_STR
324
325/* Guest - 32-bit mode */
326#define PGM_GST_TYPE PGM_TYPE_32BIT
327#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
328#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_32BIT_STR(name)
329#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_32BIT_STR(name)
330#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
331#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_PAE_32BIT_STR(name)
332#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_PAE_32BIT_STR(name)
333#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
334#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
335#include "PGMBth.h"
336#undef BTH_PGMPOOLKIND_PT_FOR_BIG
337#undef BTH_PGMPOOLKIND_PT_FOR_PT
338#undef PGM_BTH_NAME
339#undef PGM_BTH_NAME_GC_STR
340#undef PGM_BTH_NAME_R0_STR
341#undef PGM_GST_TYPE
342#undef PGM_GST_NAME
343#undef PGM_GST_NAME_GC_STR
344#undef PGM_GST_NAME_R0_STR
345
346/* Guest - PAE mode */
347#define PGM_GST_TYPE PGM_TYPE_PAE
348#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
349#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_PAE_STR(name)
350#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PAE_STR(name)
351#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
352#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_PAE_PAE_STR(name)
353#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_PAE_PAE_STR(name)
354#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
355#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
356#include "PGMGst.h"
357#include "PGMBth.h"
358#undef BTH_PGMPOOLKIND_PT_FOR_BIG
359#undef BTH_PGMPOOLKIND_PT_FOR_PT
360#undef PGM_BTH_NAME
361#undef PGM_BTH_NAME_GC_STR
362#undef PGM_BTH_NAME_R0_STR
363#undef PGM_GST_TYPE
364#undef PGM_GST_NAME
365#undef PGM_GST_NAME_GC_STR
366#undef PGM_GST_NAME_R0_STR
367
368#undef PGM_SHW_TYPE
369#undef PGM_SHW_NAME
370#undef PGM_SHW_NAME_GC_STR
371#undef PGM_SHW_NAME_R0_STR
372
373
374/*
375 * Shadow - AMD64 mode
376 */
377#define PGM_SHW_TYPE PGM_TYPE_AMD64
378#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
379#define PGM_SHW_NAME_GC_STR(name) PGM_SHW_NAME_GC_AMD64_STR(name)
380#define PGM_SHW_NAME_R0_STR(name) PGM_SHW_NAME_R0_AMD64_STR(name)
381#include "PGMShw.h"
382
383/* Guest - real mode */
384#define PGM_GST_TYPE PGM_TYPE_REAL
385#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
386#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_REAL_STR(name)
387#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_REAL_STR(name)
388#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_REAL(name)
389#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_AMD64_REAL_STR(name)
390#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_AMD64_REAL_STR(name)
391#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
392#include "PGMBth.h"
393#undef BTH_PGMPOOLKIND_PT_FOR_PT
394#undef PGM_BTH_NAME
395#undef PGM_BTH_NAME_GC_STR
396#undef PGM_BTH_NAME_R0_STR
397#undef PGM_GST_TYPE
398#undef PGM_GST_NAME
399#undef PGM_GST_NAME_GC_STR
400#undef PGM_GST_NAME_R0_STR
401
402/* Guest - protected mode */
403#define PGM_GST_TYPE PGM_TYPE_PROT
404#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
405#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_PROT_STR(name)
406#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_PROT_STR(name)
407#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
408#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_AMD64_PROT_STR(name)
409#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_AMD64_PROT_STR(name)
410#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
411#include "PGMBth.h"
412#undef BTH_PGMPOOLKIND_PT_FOR_PT
413#undef PGM_BTH_NAME
414#undef PGM_BTH_NAME_GC_STR
415#undef PGM_BTH_NAME_R0_STR
416#undef PGM_GST_TYPE
417#undef PGM_GST_NAME
418#undef PGM_GST_NAME_GC_STR
419#undef PGM_GST_NAME_R0_STR
420
421/* Guest - AMD64 mode */
422#define PGM_GST_TYPE PGM_TYPE_AMD64
423#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
424#define PGM_GST_NAME_GC_STR(name) PGM_GST_NAME_GC_AMD64_STR(name)
425#define PGM_GST_NAME_R0_STR(name) PGM_GST_NAME_R0_AMD64_STR(name)
426#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
427#define PGM_BTH_NAME_GC_STR(name) PGM_BTH_NAME_GC_AMD64_AMD64_STR(name)
428#define PGM_BTH_NAME_R0_STR(name) PGM_BTH_NAME_R0_AMD64_AMD64_STR(name)
429#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
430#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
431#include "PGMGst.h"
432#include "PGMBth.h"
433#undef BTH_PGMPOOLKIND_PT_FOR_BIG
434#undef BTH_PGMPOOLKIND_PT_FOR_PT
435#undef PGM_BTH_NAME
436#undef PGM_BTH_NAME_GC_STR
437#undef PGM_BTH_NAME_R0_STR
438#undef PGM_GST_TYPE
439#undef PGM_GST_NAME
440#undef PGM_GST_NAME_GC_STR
441#undef PGM_GST_NAME_R0_STR
442
443#undef PGM_SHW_TYPE
444#undef PGM_SHW_NAME
445#undef PGM_SHW_NAME_GC_STR
446#undef PGM_SHW_NAME_R0_STR
447
448
449/**
450 * Initiates the paging of VM.
451 *
452 * @returns VBox status code.
453 * @param pVM Pointer to VM structure.
454 */
455PGMR3DECL(int) PGMR3Init(PVM pVM)
456{
457 LogFlow(("PGMR3Init:\n"));
458
459 /*
460 * Assert alignment and sizes.
461 */
462 AssertRelease(sizeof(pVM->pgm.s) <= sizeof(pVM->pgm.padding));
463
464 /*
465 * Init the structure.
466 */
467 pVM->pgm.s.offVM = RT_OFFSETOF(VM, pgm.s);
468 pVM->pgm.s.enmShadowMode = PGMMODE_INVALID;
469 pVM->pgm.s.enmGuestMode = PGMMODE_INVALID;
470 pVM->pgm.s.enmHostMode = SUPPAGINGMODE_INVALID;
471 pVM->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
472 pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;
473 pVM->pgm.s.fA20Enabled = true;
474 pVM->pgm.s.pGstPaePDPTRHC = NULL;
475 pVM->pgm.s.pGstPaePDPTRGC = 0;
476 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.apGstPaePDsHC); i++)
477 {
478 pVM->pgm.s.apGstPaePDsHC[i] = NULL;
479 pVM->pgm.s.apGstPaePDsGC[i] = 0;
480 pVM->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
481 }
482
483#ifdef VBOX_STRICT
484 VMR3AtStateRegister(pVM, pgmR3ResetNoMorePhysWritesFlag, NULL);
485#endif
486
487 /*
488 * Get the configured RAM size - to estimate saved state size.
489 */
490 uint64_t cbRam;
491 int rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
492 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
493 cbRam = pVM->pgm.s.cbRamSize = 0;
494 else if (VBOX_SUCCESS(rc))
495 {
496 if (cbRam < PAGE_SIZE)
497 cbRam = 0;
498 cbRam = RT_ALIGN_64(cbRam, PAGE_SIZE);
499 pVM->pgm.s.cbRamSize = (RTUINT)cbRam;
500 }
501 else
502 {
503 AssertMsgFailed(("Configuration error: Failed to query integer \"RamSize\", rc=%Vrc.\n", rc));
504 return rc;
505 }
506
507 /*
508 * Register saved state data unit.
509 */
510 rc = SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
511 NULL, pgmR3Save, NULL,
512 NULL, pgmR3Load, NULL);
513 if (VBOX_FAILURE(rc))
514 return rc;
515
516 /* Initialise PGM critical section. */
517 rc = PDMR3CritSectInit(pVM, &pVM->pgm.s.CritSect, "PGM");
518 AssertRCReturn(rc, rc);
519
520 /*
521 * Trees
522 */
523 rc = MMHyperAlloc(pVM, sizeof(PGMTREES), 0, MM_TAG_PGM, (void **)&pVM->pgm.s.pTreesHC);
524 if (VBOX_SUCCESS(rc))
525 {
526 pVM->pgm.s.pTreesGC = MMHyperHC2GC(pVM, pVM->pgm.s.pTreesHC);
527
528 /*
529 * Init the paging.
530 */
531 rc = pgmR3InitPaging(pVM);
532 }
533 if (VBOX_SUCCESS(rc))
534 {
535 /*
536 * Init the page pool.
537 */
538 rc = pgmR3PoolInit(pVM);
539 }
540 if (VBOX_SUCCESS(rc))
541 {
542 /*
543 * Info & statistics
544 */
545 DBGFR3InfoRegisterInternal(pVM, "mode",
546 "Shows the current paging mode. "
547 "Recognizes 'all', 'guest', 'shadow' and 'host' as arguments, defaulting to 'all' if nothing's given.",
548 pgmR3InfoMode);
549 DBGFR3InfoRegisterInternal(pVM, "pgmcr3",
550 "Dumps all the entries in the top level paging table. No arguments.",
551 pgmR3InfoCr3);
552 DBGFR3InfoRegisterInternal(pVM, "phys",
553 "Dumps all the physical address ranges. No arguments.",
554 pgmR3PhysInfo);
555 DBGFR3InfoRegisterInternal(pVM, "handlers",
556 "Dumps physical and virtual handlers. "
557 "Pass 'phys' or 'virt' as argument if only one kind is wanted.",
558 pgmR3InfoHandlers);
559
560 STAM_REL_REG(pVM, &pVM->pgm.s.cGuestModeChanges, STAMTYPE_COUNTER, "/PGM/cGuestModeChanges", STAMUNIT_OCCURENCES, "Number of guest mode changes.");
561#ifdef VBOX_WITH_STATISTICS
562 pgmR3InitStats(pVM);
563#endif
564#ifdef VBOX_WITH_DEBUGGER
565 /*
566 * Debugger commands.
567 */
568 static bool fRegisteredCmds = false;
569 if (!fRegisteredCmds)
570 {
571 int rc = DBGCRegisterCommands(&g_aCmds[0], ELEMENTS(g_aCmds));
572 if (VBOX_SUCCESS(rc))
573 fRegisteredCmds = true;
574 }
575#endif
576 return VINF_SUCCESS;
577 }
578 /* No cleanup necessary, MM frees all memory. */
579
580 return rc;
581}
582
583
584/**
585 * Init paging.
586 *
587 * Since we need to check what mode the host is operating in before we can choose
588 * the right paging functions for the host we have to delay this until R0 has
589 * been initialized.
590 *
591 * @returns VBox status code.
592 * @param pVM VM handle.
593 */
594static int pgmR3InitPaging(PVM pVM)
595{
596 /*
597 * Force a recalculation of modes and switcher so everyone gets notified.
598 */
599 pVM->pgm.s.enmShadowMode = PGMMODE_INVALID;
600 pVM->pgm.s.enmGuestMode = PGMMODE_INVALID;
601 pVM->pgm.s.enmHostMode = SUPPAGINGMODE_INVALID;
602
603 /*
604 * Allocate static mapping space for whatever the cr3 register
605 * points to and in the case of PAE mode to the 4 PDs.
606 */
607 int rc = MMR3HyperReserve(pVM, PAGE_SIZE * 5, "CR3 mapping", &pVM->pgm.s.GCPtrCR3Mapping);
608 if (VBOX_FAILURE(rc))
609 {
610 AssertMsgFailed(("Failed to reserve two pages for cr mapping in HMA, rc=%Vrc\n", rc));
611 return rc;
612 }
613 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
614
615 /*
616 * Allocate pages for the three possible intermediate contexts
617 * (AMD64, PAE and plain 32-Bit). We maintain all three contexts
618 * for the sake of simplicity. The AMD64 uses the PAE for the
619 * lower levels, making the total number of pages 11 (3 + 7 + 1).
620 *
621 * We assume that two page tables will be enought for the core code
622 * mappings (HC virtual and identity).
623 */
624 pVM->pgm.s.pInterPD = (PX86PD)MMR3PageAllocLow(pVM);
625 pVM->pgm.s.apInterPTs[0] = (PX86PT)MMR3PageAllocLow(pVM);
626 pVM->pgm.s.apInterPTs[1] = (PX86PT)MMR3PageAllocLow(pVM);
627 pVM->pgm.s.apInterPaePTs[0] = (PX86PTPAE)MMR3PageAlloc(pVM);
628 pVM->pgm.s.apInterPaePTs[1] = (PX86PTPAE)MMR3PageAlloc(pVM);
629 pVM->pgm.s.apInterPaePDs[0] = (PX86PDPAE)MMR3PageAlloc(pVM);
630 pVM->pgm.s.apInterPaePDs[1] = (PX86PDPAE)MMR3PageAlloc(pVM);
631 pVM->pgm.s.apInterPaePDs[2] = (PX86PDPAE)MMR3PageAlloc(pVM);
632 pVM->pgm.s.apInterPaePDs[3] = (PX86PDPAE)MMR3PageAlloc(pVM);
633 pVM->pgm.s.pInterPaePDPTR = (PX86PDPTR)MMR3PageAllocLow(pVM);
634 pVM->pgm.s.pInterPaePDPTR64 = (PX86PDPTR)MMR3PageAllocLow(pVM);
635 pVM->pgm.s.pInterPaePML4 = (PX86PML4)MMR3PageAllocLow(pVM);
636 if ( !pVM->pgm.s.pInterPD
637 || !pVM->pgm.s.apInterPTs[0]
638 || !pVM->pgm.s.apInterPTs[1]
639 || !pVM->pgm.s.apInterPaePTs[0]
640 || !pVM->pgm.s.apInterPaePTs[1]
641 || !pVM->pgm.s.apInterPaePDs[0]
642 || !pVM->pgm.s.apInterPaePDs[1]
643 || !pVM->pgm.s.apInterPaePDs[2]
644 || !pVM->pgm.s.apInterPaePDs[3]
645 || !pVM->pgm.s.pInterPaePDPTR
646 || !pVM->pgm.s.pInterPaePDPTR64
647 || !pVM->pgm.s.pInterPaePML4)
648 {
649 AssertMsgFailed(("Failed to allocate pages for the intermediate context!\n"));
650 return VERR_NO_PAGE_MEMORY;
651 }
652
653 pVM->pgm.s.HCPhysInterPD = MMPage2Phys(pVM, pVM->pgm.s.pInterPD);
654 AssertRelease(pVM->pgm.s.HCPhysInterPD != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPD & PAGE_OFFSET_MASK));
655 pVM->pgm.s.HCPhysInterPaePDPTR = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPTR);
656 AssertRelease(pVM->pgm.s.HCPhysInterPaePDPTR != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePDPTR & PAGE_OFFSET_MASK));
657 pVM->pgm.s.HCPhysInterPaePML4 = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePML4);
658 AssertRelease(pVM->pgm.s.HCPhysInterPaePML4 != NIL_RTHCPHYS && !(pVM->pgm.s.HCPhysInterPaePML4 & PAGE_OFFSET_MASK));
659
660 /*
661 * Initialize the pages, setting up the PML4 and PDPTR for repetitive 4GB action.
662 */
663 ASMMemZeroPage(pVM->pgm.s.pInterPD);
664 ASMMemZeroPage(pVM->pgm.s.apInterPTs[0]);
665 ASMMemZeroPage(pVM->pgm.s.apInterPTs[1]);
666
667 ASMMemZeroPage(pVM->pgm.s.apInterPaePTs[0]);
668 ASMMemZeroPage(pVM->pgm.s.apInterPaePTs[1]);
669
670 ASMMemZeroPage(pVM->pgm.s.pInterPaePDPTR);
671 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.apInterPaePDs); i++)
672 {
673 ASMMemZeroPage(pVM->pgm.s.apInterPaePDs[i]);
674 pVM->pgm.s.pInterPaePDPTR->a[i].u = X86_PDPE_P | PGM_PLXFLAGS_PERMANENT
675 | MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[i]);
676 }
677
678 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.pInterPaePDPTR64->a); i++)
679 {
680 const unsigned iPD = i % ELEMENTS(pVM->pgm.s.apInterPaePDs);
681 pVM->pgm.s.pInterPaePDPTR64->a[i].u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A | PGM_PLXFLAGS_PERMANENT
682 | MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[iPD]);
683 }
684
685 RTHCPHYS HCPhysInterPaePDPTR64 = MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPTR64);
686 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.pInterPaePML4->a); i++)
687 pVM->pgm.s.pInterPaePML4->a[i].u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A | PGM_PLXFLAGS_PERMANENT
688 | HCPhysInterPaePDPTR64;
689
690 /*
691 * Allocate pages for the three possible guest contexts (AMD64, PAE and plain 32-Bit).
692 * We allocate pages for all three posibilities to in order to simplify mappings and
693 * avoid resource failure during mode switches. So, we need to cover all levels of the
694 * of the first 4GB down to PD level.
695 * As with the intermediate context, AMD64 uses the PAE PDPTR and PDs.
696 */
697 pVM->pgm.s.pHC32BitPD = (PX86PD)MMR3PageAllocLow(pVM);
698 pVM->pgm.s.apHCPaePDs[0] = (PX86PDPAE)MMR3PageAlloc(pVM);
699 pVM->pgm.s.apHCPaePDs[1] = (PX86PDPAE)MMR3PageAlloc(pVM);
700 AssertRelease((uintptr_t)pVM->pgm.s.apHCPaePDs[0] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apHCPaePDs[1]);
701 pVM->pgm.s.apHCPaePDs[2] = (PX86PDPAE)MMR3PageAlloc(pVM);
702 AssertRelease((uintptr_t)pVM->pgm.s.apHCPaePDs[1] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apHCPaePDs[2]);
703 pVM->pgm.s.apHCPaePDs[3] = (PX86PDPAE)MMR3PageAlloc(pVM);
704 AssertRelease((uintptr_t)pVM->pgm.s.apHCPaePDs[2] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apHCPaePDs[3]);
705 pVM->pgm.s.pHCPaePDPTR = (PX86PDPTR)MMR3PageAllocLow(pVM);
706 pVM->pgm.s.pHCPaePML4 = (PX86PML4)MMR3PageAllocLow(pVM);
707 if ( !pVM->pgm.s.pHC32BitPD
708 || !pVM->pgm.s.apHCPaePDs[0]
709 || !pVM->pgm.s.apHCPaePDs[1]
710 || !pVM->pgm.s.apHCPaePDs[2]
711 || !pVM->pgm.s.apHCPaePDs[3]
712 || !pVM->pgm.s.pHCPaePDPTR
713 || !pVM->pgm.s.pHCPaePML4)
714 {
715 AssertMsgFailed(("Failed to allocate pages for the intermediate context!\n"));
716 return VERR_NO_PAGE_MEMORY;
717 }
718
719 /* get physical addresses. */
720 pVM->pgm.s.HCPhys32BitPD = MMPage2Phys(pVM, pVM->pgm.s.pHC32BitPD);
721 Assert(MMPagePhys2Page(pVM, pVM->pgm.s.HCPhys32BitPD) == pVM->pgm.s.pHC32BitPD);
722 pVM->pgm.s.aHCPhysPaePDs[0] = MMPage2Phys(pVM, pVM->pgm.s.apHCPaePDs[0]);
723 pVM->pgm.s.aHCPhysPaePDs[1] = MMPage2Phys(pVM, pVM->pgm.s.apHCPaePDs[1]);
724 pVM->pgm.s.aHCPhysPaePDs[2] = MMPage2Phys(pVM, pVM->pgm.s.apHCPaePDs[2]);
725 pVM->pgm.s.aHCPhysPaePDs[3] = MMPage2Phys(pVM, pVM->pgm.s.apHCPaePDs[3]);
726 pVM->pgm.s.HCPhysPaePDPTR = MMPage2Phys(pVM, pVM->pgm.s.pHCPaePDPTR);
727 pVM->pgm.s.HCPhysPaePML4 = MMPage2Phys(pVM, pVM->pgm.s.pHCPaePML4);
728
729 /*
730 * Initialize the pages, setting up the PML4 and PDPTR for action below 4GB.
731 */
732 ASMMemZero32(pVM->pgm.s.pHC32BitPD, PAGE_SIZE);
733
734 ASMMemZero32(pVM->pgm.s.pHCPaePDPTR, PAGE_SIZE);
735 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.apHCPaePDs); i++)
736 {
737 ASMMemZero32(pVM->pgm.s.apHCPaePDs[i], PAGE_SIZE);
738 pVM->pgm.s.pHCPaePDPTR->a[i].u = X86_PDPE_P | PGM_PLXFLAGS_PERMANENT | pVM->pgm.s.aHCPhysPaePDs[i];
739 /* The flags will be corrected when entering and leaving long mode. */
740 }
741
742 ASMMemZero32(pVM->pgm.s.pHCPaePML4, PAGE_SIZE);
743 pVM->pgm.s.pHCPaePML4->a[0].u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_A
744 | PGM_PLXFLAGS_PERMANENT | pVM->pgm.s.HCPhysPaePDPTR;
745
746 CPUMSetHyperCR3(pVM, (uint32_t)pVM->pgm.s.HCPhys32BitPD);
747
748 /*
749 * Initialize paging workers and mode from current host mode
750 * and the guest running in real mode.
751 */
752 pVM->pgm.s.enmHostMode = SUPGetPagingMode();
753 switch (pVM->pgm.s.enmHostMode)
754 {
755 case SUPPAGINGMODE_32_BIT:
756 case SUPPAGINGMODE_32_BIT_GLOBAL:
757 case SUPPAGINGMODE_PAE:
758 case SUPPAGINGMODE_PAE_GLOBAL:
759 case SUPPAGINGMODE_PAE_NX:
760 case SUPPAGINGMODE_PAE_GLOBAL_NX:
761 break;
762
763 case SUPPAGINGMODE_AMD64:
764 case SUPPAGINGMODE_AMD64_GLOBAL:
765 case SUPPAGINGMODE_AMD64_NX:
766 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
767#ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL
768 if (ARCH_BITS != 64)
769 {
770 AssertMsgFailed(("Host mode %d (64-bit) is not supported by non-64bit builds\n", pVM->pgm.s.enmHostMode));
771 LogRel(("Host mode %d (64-bit) is not supported by non-64bit builds\n", pVM->pgm.s.enmHostMode));
772 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
773 }
774#endif
775 break;
776 default:
777 AssertMsgFailed(("Host mode %d is not supported\n", pVM->pgm.s.enmHostMode));
778 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
779 }
780 rc = pgmR3ModeDataInit(pVM, false /* don't resolve GC and R0 syms yet */);
781 if (VBOX_SUCCESS(rc))
782 rc = pgmR3ChangeMode(pVM, PGMMODE_REAL);
783 if (VBOX_SUCCESS(rc))
784 {
785 LogFlow(("pgmR3InitPaging: returns successfully\n"));
786#if HC_ARCH_BITS == 64
787LogRel(("Debug: HCPhys32BitPD=%VHp aHCPhysPaePDs={%VHp,%VHp,%VHp,%VHp} HCPhysPaePDPTR=%VHp HCPhysPaePML4=%VHp\n",
788 pVM->pgm.s.HCPhys32BitPD, pVM->pgm.s.aHCPhysPaePDs[0], pVM->pgm.s.aHCPhysPaePDs[1], pVM->pgm.s.aHCPhysPaePDs[2], pVM->pgm.s.aHCPhysPaePDs[3],
789 pVM->pgm.s.HCPhysPaePDPTR, pVM->pgm.s.HCPhysPaePML4));
790LogRel(("Debug: HCPhysInterPD=%VHp HCPhysInterPaePDPTR=%VHp HCPhysInterPaePML4=%VHp\n",
791 pVM->pgm.s.HCPhysInterPD, pVM->pgm.s.HCPhysInterPaePDPTR, pVM->pgm.s.HCPhysInterPaePML4));
792LogRel(("Debug: apInterPTs={%VHp,%VHp} apInterPaePTs={%VHp,%VHp} apInterPaePDs={%VHp,%VHp,%VHp,%VHp} pInterPaePDPTR64=%VHp\n",
793 MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[1]),
794 MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[1]),
795 MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[0]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[1]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[2]), MMPage2Phys(pVM, pVM->pgm.s.apInterPaePDs[3]),
796 MMPage2Phys(pVM, pVM->pgm.s.pInterPaePDPTR64)));
797#endif
798
799 return VINF_SUCCESS;
800 }
801
802 LogFlow(("pgmR3InitPaging: returns %Vrc\n", rc));
803 return rc;
804}
805
806
807#ifdef VBOX_WITH_STATISTICS
808/**
809 * Init statistics
810 */
811static void pgmR3InitStats(PVM pVM)
812{
813 PPGM pPGM = &pVM->pgm.s;
814 STAM_REG(pVM, &pPGM->StatGCInvalidatePage, STAMTYPE_PROFILE, "/PGM/GC/InvalidatePage", STAMUNIT_TICKS_PER_CALL, "PGMGCInvalidatePage() profiling.");
815 STAM_REG(pVM, &pPGM->StatGCInvalidatePage4KBPages, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/4KBPages", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a 4KB page.");
816 STAM_REG(pVM, &pPGM->StatGCInvalidatePage4MBPages, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/4MBPages", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a 4MB page.");
817 STAM_REG(pVM, &pPGM->StatGCInvalidatePage4MBPagesSkip, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/4MBPagesSkip",STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() skipped a 4MB page.");
818 STAM_REG(pVM, &pPGM->StatGCInvalidatePagePDMappings, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/PDMappings", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a page directory containing mappings (no conflict).");
819 STAM_REG(pVM, &pPGM->StatGCInvalidatePagePDNAs, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/PDNAs", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a not accessed page directory.");
820 STAM_REG(pVM, &pPGM->StatGCInvalidatePagePDNPs, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/PDNPs", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for a not present page directory.");
821 STAM_REG(pVM, &pPGM->StatGCInvalidatePagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/PDOutOfSync", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for an out of sync page directory.");
822 STAM_REG(pVM, &pPGM->StatGCInvalidatePageSkipped, STAMTYPE_COUNTER, "/PGM/GC/InvalidatePage/Skipped", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
823 STAM_REG(pVM, &pPGM->StatGCSyncPT, STAMTYPE_PROFILE, "/PGM/GC/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGCSyncPT() body.");
824 STAM_REG(pVM, &pPGM->StatGCAccessedPage, STAMTYPE_COUNTER, "/PGM/GC/AccessedPage", STAMUNIT_OCCURENCES, "The number of pages marked not present for accessed bit emulation.");
825 STAM_REG(pVM, &pPGM->StatGCDirtyPage, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/Mark", STAMUNIT_OCCURENCES, "The number of pages marked read-only for dirty bit tracking.");
826 STAM_REG(pVM, &pPGM->StatGCDirtyPageBig, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/MarkBig", STAMUNIT_OCCURENCES, "The number of 4MB pages marked read-only for dirty bit tracking.");
827 STAM_REG(pVM, &pPGM->StatGCDirtyPageTrap, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/Trap", STAMUNIT_OCCURENCES, "The number of traps generated for dirty bit tracking.");
828 STAM_REG(pVM, &pPGM->StatGCDirtyPageSkipped, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/Skipped", STAMUNIT_OCCURENCES, "The number of pages already dirty or readonly.");
829 STAM_REG(pVM, &pPGM->StatGCDirtiedPage, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/SetDirty", STAMUNIT_OCCURENCES, "The number of pages marked dirty because of write accesses.");
830 STAM_REG(pVM, &pPGM->StatGCDirtyTrackRealPF, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/RealPF", STAMUNIT_OCCURENCES, "The number of real pages faults during dirty bit tracking.");
831 STAM_REG(pVM, &pPGM->StatGCPageAlreadyDirty, STAMTYPE_COUNTER, "/PGM/GC/DirtyPage/AlreadySet", STAMUNIT_OCCURENCES, "The number of pages already marked dirty because of write accesses.");
832 STAM_REG(pVM, &pPGM->StatGCDirtyBitTracking, STAMTYPE_PROFILE, "/PGM/GC/DirtyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMTrackDirtyBit() body.");
833 STAM_REG(pVM, &pPGM->StatGCSyncPTAlloc, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/Alloc", STAMUNIT_OCCURENCES, "The number of times PGMGCSyncPT() needed to allocate page tables.");
834 STAM_REG(pVM, &pPGM->StatGCSyncPTConflict, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/Conflicts", STAMUNIT_OCCURENCES, "The number of times PGMGCSyncPT() detected conflicts.");
835 STAM_REG(pVM, &pPGM->StatGCSyncPTFailed, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/Failed", STAMUNIT_OCCURENCES, "The number of times PGMGCSyncPT() failed.");
836
837 STAM_REG(pVM, &pPGM->StatGCTrap0e, STAMTYPE_PROFILE, "/PGM/GC/Trap0e", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGCTrap0eHandler() body.");
838 STAM_REG(pVM, &pPGM->StatCheckPageFault, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/CheckPageFault", STAMUNIT_TICKS_PER_CALL, "Profiling of checking for dirty/access emulation faults.");
839 STAM_REG(pVM, &pPGM->StatLazySyncPT, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of lazy page table syncing.");
840 STAM_REG(pVM, &pPGM->StatMapping, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/Mapping", STAMUNIT_TICKS_PER_CALL, "Profiling of checking virtual mappings.");
841 STAM_REG(pVM, &pPGM->StatOutOfSync, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/OutOfSync", STAMUNIT_TICKS_PER_CALL, "Profiling of out of sync page handling.");
842 STAM_REG(pVM, &pPGM->StatHandlers, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/Handlers", STAMUNIT_TICKS_PER_CALL, "Profiling of checking handlers.");
843 STAM_REG(pVM, &pPGM->StatEIPHandlers, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time/EIPHandlers", STAMUNIT_TICKS_PER_CALL, "Profiling of checking eip handlers.");
844 STAM_REG(pVM, &pPGM->StatTrap0eCSAM, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/CSAM", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is CSAM.");
845 STAM_REG(pVM, &pPGM->StatTrap0eDirtyAndAccessedBits, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/DirtyAndAccessedBits", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation.");
846 STAM_REG(pVM, &pPGM->StatTrap0eGuestTrap, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/GuestTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is a guest trap.");
847 STAM_REG(pVM, &pPGM->StatTrap0eHndPhys, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/HandlerPhysical", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is a physical handler.");
848 STAM_REG(pVM, &pPGM->StatTrap0eHndVirt, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/HandlerVirtual",STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is a virtual handler.");
849 STAM_REG(pVM, &pPGM->StatTrap0eHndUnhandled, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/HandlerUnhandled", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page.");
850 STAM_REG(pVM, &pPGM->StatTrap0eMisc, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/Misc", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is not known.");
851 STAM_REG(pVM, &pPGM->StatTrap0eOutOfSync, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/OutOfSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an out-of-sync page.");
852 STAM_REG(pVM, &pPGM->StatTrap0eOutOfSyncHndPhys, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/OutOfSyncHndPhys", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page.");
853 STAM_REG(pVM, &pPGM->StatTrap0eOutOfSyncHndVirt, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/OutOfSyncHndVirt", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page.");
854 STAM_REG(pVM, &pPGM->StatTrap0eOutOfSyncObsHnd, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/OutOfSyncObsHnd", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is an obsolete handler page.");
855 STAM_REG(pVM, &pPGM->StatTrap0eSyncPT, STAMTYPE_PROFILE, "/PGM/GC/Trap0e/Time2/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT.");
856
857 STAM_REG(pVM, &pPGM->StatTrap0eMapHandler, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/Mapping", STAMUNIT_OCCURENCES, "Number of traps due to access handlers in mappings.");
858 STAM_REG(pVM, &pPGM->StatHandlersOutOfSync, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/OutOfSync", STAMUNIT_OCCURENCES, "Number of traps due to out-of-sync handled pages.");
859 STAM_REG(pVM, &pPGM->StatHandlersPhysical, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/Physical", STAMUNIT_OCCURENCES, "Number of traps due to physical access handlers.");
860 STAM_REG(pVM, &pPGM->StatHandlersVirtual, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/Virtual", STAMUNIT_OCCURENCES, "Number of traps due to virtual access handlers.");
861 STAM_REG(pVM, &pPGM->StatHandlersVirtualByPhys, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/VirtualByPhys", STAMUNIT_OCCURENCES, "Number of traps due to virtual access handlers by physical address.");
862 STAM_REG(pVM, &pPGM->StatHandlersVirtualUnmarked, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/VirtualUnmarked", STAMUNIT_OCCURENCES,"Number of traps due to virtual access handlers by virtual address (without proper physical flags).");
863 STAM_REG(pVM, &pPGM->StatHandlersUnhandled, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Handlers/Unhandled", STAMUNIT_OCCURENCES, "Number of traps due to access outside range of monitored page(s).");
864
865 STAM_REG(pVM, &pPGM->StatGCTrap0eConflicts, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Conflicts", STAMUNIT_OCCURENCES, "The number of times #PF was caused by an undetected conflict.");
866 STAM_REG(pVM, &pPGM->StatGCTrap0eUSNotPresentRead, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/NPRead", STAMUNIT_OCCURENCES, "Number of user mode not present read page faults.");
867 STAM_REG(pVM, &pPGM->StatGCTrap0eUSNotPresentWrite, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/NPWrite", STAMUNIT_OCCURENCES, "Number of user mode not present write page faults.");
868 STAM_REG(pVM, &pPGM->StatGCTrap0eUSWrite, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/Write", STAMUNIT_OCCURENCES, "Number of user mode write page faults.");
869 STAM_REG(pVM, &pPGM->StatGCTrap0eUSReserved, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/Reserved", STAMUNIT_OCCURENCES, "Number of user mode reserved bit page faults.");
870 STAM_REG(pVM, &pPGM->StatGCTrap0eUSRead, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/User/Read", STAMUNIT_OCCURENCES, "Number of user mode read page faults.");
871
872 STAM_REG(pVM, &pPGM->StatGCTrap0eSVNotPresentRead, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Supervisor/NPRead", STAMUNIT_OCCURENCES, "Number of supervisor mode not present read page faults.");
873 STAM_REG(pVM, &pPGM->StatGCTrap0eSVNotPresentWrite, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Supervisor/NPWrite", STAMUNIT_OCCURENCES, "Number of supervisor mode not present write page faults.");
874 STAM_REG(pVM, &pPGM->StatGCTrap0eSVWrite, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Supervisor/Write", STAMUNIT_OCCURENCES, "Number of supervisor mode write page faults.");
875 STAM_REG(pVM, &pPGM->StatGCTrap0eSVReserved, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/Supervisor/Reserved", STAMUNIT_OCCURENCES, "Number of supervisor mode reserved bit page faults.");
876 STAM_REG(pVM, &pPGM->StatGCTrap0eUnhandled, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/GuestPF/Unhandled", STAMUNIT_OCCURENCES, "Number of guest real page faults.");
877 STAM_REG(pVM, &pPGM->StatGCTrap0eMap, STAMTYPE_COUNTER, "/PGM/GC/Trap0e/GuestPF/Map", STAMUNIT_OCCURENCES, "Number of guest page faults due to map accesses.");
878
879
880 STAM_REG(pVM, &pPGM->StatGCGuestCR3WriteHandled, STAMTYPE_COUNTER, "/PGM/GC/CR3WriteInt", STAMUNIT_OCCURENCES, "The number of times the Guest CR3 change was successfully handled.");
881 STAM_REG(pVM, &pPGM->StatGCGuestCR3WriteUnhandled, STAMTYPE_COUNTER, "/PGM/GC/CR3WriteEmu", STAMUNIT_OCCURENCES, "The number of times the Guest CR3 change was passed back to the recompiler.");
882 STAM_REG(pVM, &pPGM->StatGCGuestCR3WriteConflict, STAMTYPE_COUNTER, "/PGM/GC/CR3WriteConflict", STAMUNIT_OCCURENCES, "The number of times the Guest CR3 monitoring detected a conflict.");
883
884 STAM_REG(pVM, &pPGM->StatGCPageOutOfSyncSupervisor, STAMTYPE_COUNTER, "/PGM/GC/OutOfSync/SuperVisor", STAMUNIT_OCCURENCES, "Number of traps due to pages out of sync.");
885 STAM_REG(pVM, &pPGM->StatGCPageOutOfSyncUser, STAMTYPE_COUNTER, "/PGM/GC/OutOfSync/User", STAMUNIT_OCCURENCES, "Number of traps due to pages out of sync.");
886
887 STAM_REG(pVM, &pPGM->StatGCGuestROMWriteHandled, STAMTYPE_COUNTER, "/PGM/GC/ROMWriteInt", STAMUNIT_OCCURENCES, "The number of times the Guest ROM change was successfully handled.");
888 STAM_REG(pVM, &pPGM->StatGCGuestROMWriteUnhandled, STAMTYPE_COUNTER, "/PGM/GC/ROMWriteEmu", STAMUNIT_OCCURENCES, "The number of times the Guest ROM change was passed back to the recompiler.");
889
890 STAM_REG(pVM, &pPGM->StatDynMapCacheHits, STAMTYPE_COUNTER, "/PGM/GC/DynMapCache/Hits" , STAMUNIT_OCCURENCES, "Number of dynamic page mapping cache hits.");
891 STAM_REG(pVM, &pPGM->StatDynMapCacheMisses, STAMTYPE_COUNTER, "/PGM/GC/DynMapCache/Misses" , STAMUNIT_OCCURENCES, "Number of dynamic page mapping cache misses.");
892
893 STAM_REG(pVM, &pPGM->StatHCDetectedConflicts, STAMTYPE_COUNTER, "/PGM/HC/DetectedConflicts", STAMUNIT_OCCURENCES, "The number of times PGMR3CheckMappingConflicts() detected a conflict.");
894 STAM_REG(pVM, &pPGM->StatHCGuestPDWrite, STAMTYPE_COUNTER, "/PGM/HC/PDWrite", STAMUNIT_OCCURENCES, "The total number of times pgmHCGuestPDWriteHandler() was called.");
895 STAM_REG(pVM, &pPGM->StatHCGuestPDWriteConflict, STAMTYPE_COUNTER, "/PGM/HC/PDWriteConflict", STAMUNIT_OCCURENCES, "The number of times pgmHCGuestPDWriteHandler() detected a conflict.");
896
897 STAM_REG(pVM, &pPGM->StatHCInvalidatePage, STAMTYPE_PROFILE, "/PGM/HC/InvalidatePage", STAMUNIT_TICKS_PER_CALL, "PGMHCInvalidatePage() profiling.");
898 STAM_REG(pVM, &pPGM->StatHCInvalidatePage4KBPages, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/4KBPages", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a 4KB page.");
899 STAM_REG(pVM, &pPGM->StatHCInvalidatePage4MBPages, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/4MBPages", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a 4MB page.");
900 STAM_REG(pVM, &pPGM->StatHCInvalidatePage4MBPagesSkip, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/4MBPagesSkip",STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() skipped a 4MB page.");
901 STAM_REG(pVM, &pPGM->StatHCInvalidatePagePDMappings, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/PDMappings", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a page directory containing mappings (no conflict).");
902 STAM_REG(pVM, &pPGM->StatHCInvalidatePagePDNAs, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/PDNAs", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a not accessed page directory.");
903 STAM_REG(pVM, &pPGM->StatHCInvalidatePagePDNPs, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/PDNPs", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was called for a not present page directory.");
904 STAM_REG(pVM, &pPGM->StatHCInvalidatePagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/PDOutOfSync", STAMUNIT_OCCURENCES, "The number of times PGMGCInvalidatePage() was called for an out of sync page directory.");
905 STAM_REG(pVM, &pPGM->StatHCInvalidatePageSkipped, STAMTYPE_COUNTER, "/PGM/HC/InvalidatePage/Skipped", STAMUNIT_OCCURENCES, "The number of times PGMHCInvalidatePage() was skipped due to not present shw or pending pending SyncCR3.");
906 STAM_REG(pVM, &pPGM->StatHCResolveConflict, STAMTYPE_PROFILE, "/PGM/HC/ResolveConflict", STAMUNIT_TICKS_PER_CALL, "pgmR3SyncPTResolveConflict() profiling (includes the entire relocation).");
907 STAM_REG(pVM, &pPGM->StatHCPrefetch, STAMTYPE_PROFILE, "/PGM/HC/Prefetch", STAMUNIT_TICKS_PER_CALL, "PGMR3PrefetchPage profiling.");
908
909 STAM_REG(pVM, &pPGM->StatHCSyncPT, STAMTYPE_PROFILE, "/PGM/HC/SyncPT", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMR3SyncPT() body.");
910 STAM_REG(pVM, &pPGM->StatHCAccessedPage, STAMTYPE_COUNTER, "/PGM/HC/AccessedPage", STAMUNIT_OCCURENCES, "The number of pages marked not present for accessed bit emulation.");
911 STAM_REG(pVM, &pPGM->StatHCDirtyPage, STAMTYPE_COUNTER, "/PGM/HC/DirtyPage/Mark", STAMUNIT_OCCURENCES, "The number of pages marked read-only for dirty bit tracking.");
912 STAM_REG(pVM, &pPGM->StatHCDirtyPageBig, STAMTYPE_COUNTER, "/PGM/HC/DirtyPage/MarkBig", STAMUNIT_OCCURENCES, "The number of 4MB pages marked read-only for dirty bit tracking.");
913 STAM_REG(pVM, &pPGM->StatHCDirtyPageTrap, STAMTYPE_COUNTER, "/PGM/HC/DirtyPage/Trap", STAMUNIT_OCCURENCES, "The number of traps generated for dirty bit tracking.");
914 STAM_REG(pVM, &pPGM->StatHCDirtyPageSkipped, STAMTYPE_COUNTER, "/PGM/HC/DirtyPage/Skipped", STAMUNIT_OCCURENCES, "The number of pages already dirty or readonly.");
915 STAM_REG(pVM, &pPGM->StatHCDirtyBitTracking, STAMTYPE_PROFILE, "/PGM/HC/DirtyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMTrackDirtyBit() body.");
916
917 STAM_REG(pVM, &pPGM->StatGCSyncPagePDNAs, STAMTYPE_COUNTER, "/PGM/GC/SyncPagePDNAs", STAMUNIT_OCCURENCES, "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
918 STAM_REG(pVM, &pPGM->StatGCSyncPagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/GC/SyncPagePDOutOfSync", STAMUNIT_OCCURENCES, "The number of time we've encountered an out-of-sync PD in SyncPage.");
919 STAM_REG(pVM, &pPGM->StatHCSyncPagePDNAs, STAMTYPE_COUNTER, "/PGM/HC/SyncPagePDNAs", STAMUNIT_OCCURENCES, "The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit.");
920 STAM_REG(pVM, &pPGM->StatHCSyncPagePDOutOfSync, STAMTYPE_COUNTER, "/PGM/HC/SyncPagePDOutOfSync", STAMUNIT_OCCURENCES, "The number of time we've encountered an out-of-sync PD in SyncPage.");
921
922 STAM_REG(pVM, &pPGM->StatFlushTLB, STAMTYPE_PROFILE, "/PGM/FlushTLB", STAMUNIT_OCCURENCES, "Profiling of the PGMFlushTLB() body.");
923 STAM_REG(pVM, &pPGM->StatFlushTLBNewCR3, STAMTYPE_COUNTER, "/PGM/FlushTLB/NewCR3", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with a new CR3, non-global. (switch)");
924 STAM_REG(pVM, &pPGM->StatFlushTLBNewCR3Global, STAMTYPE_COUNTER, "/PGM/FlushTLB/NewCR3Global", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with a new CR3, global. (switch)");
925 STAM_REG(pVM, &pPGM->StatFlushTLBSameCR3, STAMTYPE_COUNTER, "/PGM/FlushTLB/SameCR3", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with the same CR3, non-global. (flush)");
926 STAM_REG(pVM, &pPGM->StatFlushTLBSameCR3Global, STAMTYPE_COUNTER, "/PGM/FlushTLB/SameCR3Global", STAMUNIT_OCCURENCES, "The number of times PGMFlushTLB was called with the same CR3, global. (flush)");
927
928 STAM_REG(pVM, &pPGM->StatGCSyncCR3, STAMTYPE_PROFILE, "/PGM/GC/SyncCR3", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() body.");
929 STAM_REG(pVM, &pPGM->StatGCSyncCR3Handlers, STAMTYPE_PROFILE, "/PGM/GC/SyncCR3/Handlers", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() update handler section.");
930 STAM_REG(pVM, &pPGM->StatGCSyncCR3HandlerVirtualUpdate, STAMTYPE_PROFILE, "/PGM/GC/SyncCR3/Handlers/VirtualUpdate",STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler updates.");
931 STAM_REG(pVM, &pPGM->StatGCSyncCR3HandlerVirtualReset, STAMTYPE_PROFILE, "/PGM/GC/SyncCR3/Handlers/VirtualReset", STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler resets.");
932 STAM_REG(pVM, &pPGM->StatGCSyncCR3Global, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/Global", STAMUNIT_OCCURENCES, "The number of global CR3 syncs.");
933 STAM_REG(pVM, &pPGM->StatGCSyncCR3NotGlobal, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/NotGlobal", STAMUNIT_OCCURENCES, "The number of non-global CR3 syncs.");
934 STAM_REG(pVM, &pPGM->StatGCSyncCR3DstCacheHit, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstChacheHit", STAMUNIT_OCCURENCES, "The number of times we got some kind of a cache hit.");
935 STAM_REG(pVM, &pPGM->StatGCSyncCR3DstFreed, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstFreed", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry.");
936 STAM_REG(pVM, &pPGM->StatGCSyncCR3DstFreedSrcNP, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstFreedSrcNP", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry for which the source entry was not present.");
937 STAM_REG(pVM, &pPGM->StatGCSyncCR3DstNotPresent, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstNotPresent", STAMUNIT_OCCURENCES, "The number of times we've encountered a not present shadow entry for a present guest entry.");
938 STAM_REG(pVM, &pPGM->StatGCSyncCR3DstSkippedGlobalPD, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstSkippedGlobalPD", STAMUNIT_OCCURENCES, "The number of times a global page directory wasn't flushed.");
939 STAM_REG(pVM, &pPGM->StatGCSyncCR3DstSkippedGlobalPT, STAMTYPE_COUNTER, "/PGM/GC/SyncCR3/DstSkippedGlobalPT", STAMUNIT_OCCURENCES, "The number of times a page table with only global entries wasn't flushed.");
940
941 STAM_REG(pVM, &pPGM->StatHCSyncCR3, STAMTYPE_PROFILE, "/PGM/HC/SyncCR3", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() body.");
942 STAM_REG(pVM, &pPGM->StatHCSyncCR3Handlers, STAMTYPE_PROFILE, "/PGM/HC/SyncCR3/Handlers", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMSyncCR3() update handler section.");
943 STAM_REG(pVM, &pPGM->StatHCSyncCR3HandlerVirtualUpdate, STAMTYPE_PROFILE, "/PGM/HC/SyncCR3/Handlers/VirtualUpdate",STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler updates.");
944 STAM_REG(pVM, &pPGM->StatHCSyncCR3HandlerVirtualReset, STAMTYPE_PROFILE, "/PGM/HC/SyncCR3/Handlers/VirtualReset", STAMUNIT_TICKS_PER_CALL, "Profiling of the virtual handler resets.");
945 STAM_REG(pVM, &pPGM->StatHCSyncCR3Global, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/Global", STAMUNIT_OCCURENCES, "The number of global CR3 syncs.");
946 STAM_REG(pVM, &pPGM->StatHCSyncCR3NotGlobal, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/NotGlobal", STAMUNIT_OCCURENCES, "The number of non-global CR3 syncs.");
947 STAM_REG(pVM, &pPGM->StatHCSyncCR3DstCacheHit, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstChacheHit", STAMUNIT_OCCURENCES, "The number of times we got some kind of a cache hit.");
948 STAM_REG(pVM, &pPGM->StatHCSyncCR3DstFreed, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstFreed", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry.");
949 STAM_REG(pVM, &pPGM->StatHCSyncCR3DstFreedSrcNP, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstFreedSrcNP", STAMUNIT_OCCURENCES, "The number of times we've had to free a shadow entry for which the source entry was not present.");
950 STAM_REG(pVM, &pPGM->StatHCSyncCR3DstNotPresent, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstNotPresent", STAMUNIT_OCCURENCES, "The number of times we've encountered a not present shadow entry for a present guest entry.");
951 STAM_REG(pVM, &pPGM->StatHCSyncCR3DstSkippedGlobalPD, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstSkippedGlobalPD", STAMUNIT_OCCURENCES, "The number of times a global page directory wasn't flushed.");
952 STAM_REG(pVM, &pPGM->StatHCSyncCR3DstSkippedGlobalPT, STAMTYPE_COUNTER, "/PGM/HC/SyncCR3/DstSkippedGlobalPT", STAMUNIT_OCCURENCES, "The number of times a page table with only global entries wasn't flushed.");
953
954 STAM_REG(pVM, &pPGM->StatVirtHandleSearchByPhysGC, STAMTYPE_PROFILE, "/PGM/VirtHandler/SearchByPhys/GC", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmHandlerVirtualFindByPhysAddr in GC.");
955 STAM_REG(pVM, &pPGM->StatVirtHandleSearchByPhysHC, STAMTYPE_PROFILE, "/PGM/VirtHandler/SearchByPhys/HC", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmHandlerVirtualFindByPhysAddr in HC.");
956 STAM_REG(pVM, &pPGM->StatHandlePhysicalReset, STAMTYPE_COUNTER, "/PGM/HC/HandlerPhysicalReset", STAMUNIT_OCCURENCES, "The number of times PGMR3HandlerPhysicalReset is called.");
957
958 STAM_REG(pVM, &pPGM->StatHCGstModifyPage, STAMTYPE_PROFILE, "/PGM/HC/GstModifyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGstModifyPage() body.");
959 STAM_REG(pVM, &pPGM->StatGCGstModifyPage, STAMTYPE_PROFILE, "/PGM/GC/GstModifyPage", STAMUNIT_TICKS_PER_CALL, "Profiling of the PGMGstModifyPage() body.");
960
961 STAM_REG(pVM, &pPGM->StatSynPT4kGC, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/4k", STAMUNIT_OCCURENCES, "Nr of 4k PT syncs");
962 STAM_REG(pVM, &pPGM->StatSynPT4kHC, STAMTYPE_COUNTER, "/PGM/HC/SyncPT/4k", STAMUNIT_OCCURENCES, "Nr of 4k PT syncs");
963 STAM_REG(pVM, &pPGM->StatSynPT4MGC, STAMTYPE_COUNTER, "/PGM/GC/SyncPT/4M", STAMUNIT_OCCURENCES, "Nr of 4M PT syncs");
964 STAM_REG(pVM, &pPGM->StatSynPT4MHC, STAMTYPE_COUNTER, "/PGM/HC/SyncPT/4M", STAMUNIT_OCCURENCES, "Nr of 4M PT syncs");
965
966 STAM_REG(pVM, &pPGM->StatDynRamTotal, STAMTYPE_COUNTER, "/PGM/RAM/TotalAlloc", STAMUNIT_MEGABYTES, "Allocated mbs of guest ram.");
967 STAM_REG(pVM, &pPGM->StatDynRamGrow, STAMTYPE_COUNTER, "/PGM/RAM/Grow", STAMUNIT_OCCURENCES, "Nr of pgmr3PhysGrowRange calls.");
968
969#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
970 STAM_REG(pVM, &pPGM->StatTrackVirgin, STAMTYPE_COUNTER, "/PGM/Track/Virgin", STAMUNIT_OCCURENCES, "The number of first time shadowings");
971 STAM_REG(pVM, &pPGM->StatTrackAliased, STAMTYPE_COUNTER, "/PGM/Track/Aliased", STAMUNIT_OCCURENCES, "The number of times switching to cRef2, i.e. the page is being shadowed by two PTs.");
972 STAM_REG(pVM, &pPGM->StatTrackAliasedMany, STAMTYPE_COUNTER, "/PGM/Track/AliasedMany", STAMUNIT_OCCURENCES, "The number of times we're tracking using cRef2.");
973 STAM_REG(pVM, &pPGM->StatTrackAliasedLots, STAMTYPE_COUNTER, "/PGM/Track/AliasedLots", STAMUNIT_OCCURENCES, "The number of times we're hitting pages which has overflowed cRef2");
974 STAM_REG(pVM, &pPGM->StatTrackOverflows, STAMTYPE_COUNTER, "/PGM/Track/Overflows", STAMUNIT_OCCURENCES, "The number of times the extent list grows to long.");
975 STAM_REG(pVM, &pPGM->StatTrackDeref, STAMTYPE_PROFILE, "/PGM/Track/Deref", STAMUNIT_OCCURENCES, "Profiling of SyncPageWorkerTrackDeref (expensive).");
976#endif
977
978 for (unsigned i = 0; i < PAGE_ENTRIES; i++)
979 {
980 /** @todo r=bird: We need a STAMR3RegisterF()! */
981 char szName[32];
982
983 RTStrPrintf(szName, sizeof(szName), "/PGM/GC/PD/Trap0e/%04X", i);
984 int rc = STAMR3Register(pVM, &pPGM->StatGCTrap0ePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "The number of traps in page directory n.");
985 AssertRC(rc);
986
987 RTStrPrintf(szName, sizeof(szName), "/PGM/GC/PD/SyncPt/%04X", i);
988 rc = STAMR3Register(pVM, &pPGM->StatGCSyncPtPD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "The number of syncs per PD n.");
989 AssertRC(rc);
990
991 RTStrPrintf(szName, sizeof(szName), "/PGM/GC/PD/SyncPage/%04X", i);
992 rc = STAMR3Register(pVM, &pPGM->StatGCSyncPagePD[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, szName, STAMUNIT_OCCURENCES, "The number of out of sync pages per page directory n.");
993 AssertRC(rc);
994 }
995}
996#endif /* VBOX_WITH_STATISTICS */
997
998/**
999 * Init the PGM bits that rely on VMMR0 and MM to be fully initialized.
1000 *
1001 * The dynamic mapping area will also be allocated and initialized at this
1002 * time. We could allocate it during PGMR3Init of course, but the mapping
1003 * wouldn't be allocated at that time preventing us from setting up the
1004 * page table entries with the dummy page.
1005 *
1006 * @returns VBox status code.
1007 * @param pVM VM handle.
1008 */
1009PGMR3DECL(int) PGMR3InitDynMap(PVM pVM)
1010{
1011 /*
1012 * Reserve space for mapping the paging pages into guest context.
1013 */
1014 int rc = MMR3HyperReserve(pVM, PAGE_SIZE * (2 + ELEMENTS(pVM->pgm.s.apHCPaePDs) + 1 + 2 + 2), "Paging", &pVM->pgm.s.pGC32BitPD);
1015 AssertRCReturn(rc, rc);
1016 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
1017
1018 /*
1019 * Reserve space for the dynamic mappings.
1020 */
1021 /** @todo r=bird: Need to verify that the checks for crossing PTs are correct here. They seems to be assuming 4MB PTs.. */
1022 rc = MMR3HyperReserve(pVM, MM_HYPER_DYNAMIC_SIZE, "Dynamic mapping", &pVM->pgm.s.pbDynPageMapBaseGC);
1023 if ( VBOX_SUCCESS(rc)
1024 && (pVM->pgm.s.pbDynPageMapBaseGC >> PGDIR_SHIFT) != ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> PGDIR_SHIFT))
1025 rc = MMR3HyperReserve(pVM, MM_HYPER_DYNAMIC_SIZE, "Dynamic mapping not crossing", &pVM->pgm.s.pbDynPageMapBaseGC);
1026 if (VBOX_SUCCESS(rc))
1027 {
1028 AssertRelease((pVM->pgm.s.pbDynPageMapBaseGC >> PGDIR_SHIFT) == ((pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE - 1) >> PGDIR_SHIFT));
1029 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
1030 }
1031 return rc;
1032}
1033
1034
1035/**
1036 * Ring-3 init finalizing.
1037 *
1038 * @returns VBox status code.
1039 * @param pVM The VM handle.
1040 */
1041PGMR3DECL(int) PGMR3InitFinalize(PVM pVM)
1042{
1043 /*
1044 * Map the paging pages into the guest context.
1045 */
1046 RTGCPTR GCPtr = pVM->pgm.s.pGC32BitPD;
1047 AssertReleaseReturn(GCPtr, VERR_INTERNAL_ERROR);
1048
1049 int rc = PGMMap(pVM, GCPtr, pVM->pgm.s.HCPhys32BitPD, PAGE_SIZE, 0);
1050 AssertRCReturn(rc, rc);
1051 pVM->pgm.s.pGC32BitPD = GCPtr;
1052 GCPtr += PAGE_SIZE;
1053 GCPtr += PAGE_SIZE; /* reserved page */
1054
1055 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.apHCPaePDs); i++)
1056 {
1057 rc = PGMMap(pVM, GCPtr, pVM->pgm.s.aHCPhysPaePDs[i], PAGE_SIZE, 0);
1058 AssertRCReturn(rc, rc);
1059 pVM->pgm.s.apGCPaePDs[i] = GCPtr;
1060 GCPtr += PAGE_SIZE;
1061 }
1062 /* A bit of paranoia is justified. */
1063 AssertRelease((RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[0] + PAGE_SIZE == (RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[1]);
1064 AssertRelease((RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[1] + PAGE_SIZE == (RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[2]);
1065 AssertRelease((RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[2] + PAGE_SIZE == (RTGCUINTPTR)pVM->pgm.s.apGCPaePDs[3]);
1066 GCPtr += PAGE_SIZE; /* reserved page */
1067
1068 rc = PGMMap(pVM, GCPtr, pVM->pgm.s.HCPhysPaePDPTR, PAGE_SIZE, 0);
1069 AssertRCReturn(rc, rc);
1070 pVM->pgm.s.pGCPaePDPTR = GCPtr;
1071 GCPtr += PAGE_SIZE;
1072 GCPtr += PAGE_SIZE; /* reserved page */
1073
1074 rc = PGMMap(pVM, GCPtr, pVM->pgm.s.HCPhysPaePML4, PAGE_SIZE, 0);
1075 AssertRCReturn(rc, rc);
1076 pVM->pgm.s.pGCPaePML4 = GCPtr;
1077 GCPtr += PAGE_SIZE;
1078 GCPtr += PAGE_SIZE; /* reserved page */
1079
1080
1081 /*
1082 * Reserve space for the dynamic mappings.
1083 * Initialize the dynamic mapping pages with dummy pages to simply the cache.
1084 */
1085 /* get the pointer to the page table entries. */
1086 PPGMMAPPING pMapping = pgmGetMapping(pVM, pVM->pgm.s.pbDynPageMapBaseGC);
1087 AssertRelease(pMapping);
1088 const uintptr_t off = pVM->pgm.s.pbDynPageMapBaseGC - pMapping->GCPtr;
1089 const unsigned iPT = off >> X86_PD_SHIFT;
1090 const unsigned iPG = (off >> X86_PT_SHIFT) & X86_PT_MASK;
1091 pVM->pgm.s.paDynPageMap32BitPTEsGC = pMapping->aPTs[iPT].pPTGC + iPG * sizeof(pMapping->aPTs[0].pPTR3->a[0]);
1092 pVM->pgm.s.paDynPageMapPaePTEsGC = pMapping->aPTs[iPT].paPaePTsGC + iPG * sizeof(pMapping->aPTs[0].paPaePTsR3->a[0]);
1093
1094 /* init cache */
1095 RTHCPHYS HCPhysDummy = MMR3PageDummyHCPhys(pVM);
1096 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache); i++)
1097 pVM->pgm.s.aHCPhysDynPageMapCache[i] = HCPhysDummy;
1098
1099 for (unsigned i = 0; i < MM_HYPER_DYNAMIC_SIZE; i += PAGE_SIZE)
1100 {
1101 rc = PGMMap(pVM, pVM->pgm.s.pbDynPageMapBaseGC + i, HCPhysDummy, PAGE_SIZE, 0);
1102 AssertRCReturn(rc, rc);
1103 }
1104
1105 return rc;
1106}
1107
1108
1109/**
1110 * Applies relocations to data and code managed by this
1111 * component. This function will be called at init and
1112 * whenever the VMM need to relocate it self inside the GC.
1113 *
1114 * @param pVM The VM.
1115 * @param offDelta Relocation delta relative to old location.
1116 */
1117PGMR3DECL(void) PGMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1118{
1119 LogFlow(("PGMR3Relocate\n"));
1120
1121 /*
1122 * Paging stuff.
1123 */
1124 pVM->pgm.s.GCPtrCR3Mapping += offDelta;
1125 /** @todo move this into shadow and guest specific relocation functions. */
1126 AssertMsg(pVM->pgm.s.pGC32BitPD, ("Init order, no relocation before paging is initialized!\n"));
1127 pVM->pgm.s.pGC32BitPD += offDelta;
1128 pVM->pgm.s.pGuestPDGC += offDelta;
1129 for (unsigned i = 0; i < ELEMENTS(pVM->pgm.s.apGCPaePDs); i++)
1130 pVM->pgm.s.apGCPaePDs[i] += offDelta;
1131 pVM->pgm.s.pGCPaePDPTR += offDelta;
1132 pVM->pgm.s.pGCPaePML4 += offDelta;
1133
1134 pgmR3ModeDataInit(pVM, true /* resolve GC/R0 symbols */);
1135 pgmR3ModeDataSwitch(pVM, pVM->pgm.s.enmShadowMode, pVM->pgm.s.enmGuestMode);
1136
1137 PGM_SHW_PFN(Relocate, pVM)(pVM, offDelta);
1138 PGM_GST_PFN(Relocate, pVM)(pVM, offDelta);
1139 PGM_BTH_PFN(Relocate, pVM)(pVM, offDelta);
1140
1141 /*
1142 * Trees.
1143 */
1144 pVM->pgm.s.pTreesGC = MMHyperHC2GC(pVM, pVM->pgm.s.pTreesHC);
1145
1146 /*
1147 * Ram ranges.
1148 */
1149 if (pVM->pgm.s.pRamRangesHC)
1150 {
1151 pVM->pgm.s.pRamRangesGC = MMHyperHC2GC(pVM, pVM->pgm.s.pRamRangesHC);
1152 for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesHC; pCur->pNextHC; pCur = pCur->pNextHC)
1153 {
1154 pCur->pNextGC = MMHyperHC2GC(pVM, pCur->pNextHC);
1155 if (pCur->pavHCChunkGC)
1156 pCur->pavHCChunkGC = MMHyperHC2GC(pVM, pCur->pavHCChunkHC);
1157 }
1158 }
1159
1160 /*
1161 * Update the two page directories with all page table mappings.
1162 * (One or more of them have changed, that's why we're here.)
1163 */
1164 pVM->pgm.s.pMappingsGC = MMHyperHC2GC(pVM, pVM->pgm.s.pMappingsR3);
1165 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur->pNextR3; pCur = pCur->pNextR3)
1166 pCur->pNextGC = MMHyperHC2GC(pVM, pCur->pNextR3);
1167
1168 /* Relocate GC addresses of Page Tables. */
1169 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1170 {
1171 for (RTHCUINT i = 0; i < pCur->cPTs; i++)
1172 {
1173 pCur->aPTs[i].pPTGC = MMHyperR3ToGC(pVM, pCur->aPTs[i].pPTR3);
1174 pCur->aPTs[i].paPaePTsGC = MMHyperR3ToGC(pVM, pCur->aPTs[i].paPaePTsR3);
1175 }
1176 }
1177
1178 /*
1179 * Dynamic page mapping area.
1180 */
1181 pVM->pgm.s.paDynPageMap32BitPTEsGC += offDelta;
1182 pVM->pgm.s.paDynPageMapPaePTEsGC += offDelta;
1183 pVM->pgm.s.pbDynPageMapBaseGC += offDelta;
1184
1185 /*
1186 * Physical and virtual handlers.
1187 */
1188 RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysHandlers, true, pgmR3RelocatePhysHandler, &offDelta);
1189 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesHC->VirtHandlers, true, pgmR3RelocateVirtHandler, &offDelta);
1190
1191 /*
1192 * The page pool.
1193 */
1194 pgmR3PoolRelocate(pVM);
1195}
1196
1197
1198/**
1199 * Callback function for relocating a physical access handler.
1200 *
1201 * @returns 0 (continue enum)
1202 * @param pNode Pointer to a PGMPHYSHANDLER node.
1203 * @param pvUser Pointer to the offDelta. This is a pointer to the delta since we're
1204 * not certain the delta will fit in a void pointer for all possible configs.
1205 */
1206static DECLCALLBACK(int) pgmR3RelocatePhysHandler(PAVLROGCPHYSNODECORE pNode, void *pvUser)
1207{
1208 PPGMPHYSHANDLER pHandler = (PPGMPHYSHANDLER)pNode;
1209 RTGCINTPTR offDelta = *(PRTGCINTPTR)pvUser;
1210 if (pHandler->pfnHandlerGC)
1211 pHandler->pfnHandlerGC += offDelta;
1212 if ((RTGCUINTPTR)pHandler->pvUserGC >= 0x10000)
1213 pHandler->pvUserGC += offDelta;
1214 return 0;
1215}
1216
1217
1218/**
1219 * Callback function for relocating a virtual access handler.
1220 *
1221 * @returns 0 (continue enum)
1222 * @param pNode Pointer to a PGMVIRTHANDLER node.
1223 * @param pvUser Pointer to the offDelta. This is a pointer to the delta since we're
1224 * not certain the delta will fit in a void pointer for all possible configs.
1225 */
1226static DECLCALLBACK(int) pgmR3RelocateVirtHandler(PAVLROGCPTRNODECORE pNode, void *pvUser)
1227{
1228 PPGMVIRTHANDLER pHandler = (PPGMVIRTHANDLER)pNode;
1229 RTGCINTPTR offDelta = *(PRTGCINTPTR)pvUser;
1230 Assert(pHandler->pfnHandlerGC);
1231 pHandler->pfnHandlerGC += offDelta;
1232 return 0;
1233}
1234
1235
1236/**
1237 * The VM is being reset.
1238 *
1239 * For the PGM component this means that any PD write monitors
1240 * needs to be removed.
1241 *
1242 * @param pVM VM handle.
1243 */
1244PGMR3DECL(void) PGMR3Reset(PVM pVM)
1245{
1246 LogFlow(("PGMR3Reset:\n"));
1247 VM_ASSERT_EMT(pVM);
1248
1249 /*
1250 * Unfix any fixed mappings and disable CR3 monitoring.
1251 */
1252 pVM->pgm.s.fMappingsFixed = false;
1253 pVM->pgm.s.GCPtrMappingFixed = 0;
1254 pVM->pgm.s.cbMappingFixed = 0;
1255
1256 int rc = PGM_GST_PFN(UnmonitorCR3, pVM)(pVM);
1257 AssertRC(rc);
1258#ifdef DEBUG
1259 PGMR3DumpMappings(pVM);
1260#endif
1261
1262 /*
1263 * Reset the shadow page pool.
1264 */
1265 pgmR3PoolReset(pVM);
1266
1267 /*
1268 * Re-init other members.
1269 */
1270 pVM->pgm.s.fA20Enabled = true;
1271
1272 /*
1273 * Clear the FFs PGM owns.
1274 */
1275 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1276 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1277
1278 /*
1279 * Zero memory.
1280 */
1281 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesHC; pRam; pRam = pRam->pNextHC)
1282 {
1283 unsigned iPage = pRam->cb >> PAGE_SHIFT;
1284 while (iPage-- > 0)
1285 {
1286 if (pRam->aHCPhys[iPage] & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2))
1287 {
1288 Log4(("PGMR3Reset: not clearing phys page %RGp due to flags %RHp\n", pRam->GCPhys + (iPage << PAGE_SHIFT), pRam->aHCPhys[iPage] & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO)));
1289 continue;
1290 }
1291 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1292 {
1293 unsigned iChunk = iPage >> (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
1294 if (pRam->pavHCChunkHC[iChunk])
1295 ASMMemZero32((char *)pRam->pavHCChunkHC[iChunk] + ((iPage << PAGE_SHIFT) & PGM_DYNAMIC_CHUNK_OFFSET_MASK), PAGE_SIZE);
1296 }
1297 else
1298 ASMMemZero32((char *)pRam->pvHC + (iPage << PAGE_SHIFT), PAGE_SIZE);
1299 }
1300 }
1301
1302 /*
1303 * Switch mode back to real mode.
1304 */
1305 rc = pgmR3ChangeMode(pVM, PGMMODE_REAL);
1306 AssertReleaseRC(rc);
1307 STAM_REL_COUNTER_RESET(&pVM->pgm.s.cGuestModeChanges);
1308}
1309
1310
1311/**
1312 * Terminates the PGM.
1313 *
1314 * @returns VBox status code.
1315 * @param pVM Pointer to VM structure.
1316 */
1317PGMR3DECL(int) PGMR3Term(PVM pVM)
1318{
1319 return PDMR3CritSectDelete(&pVM->pgm.s.CritSect);
1320}
1321
1322
1323#ifdef VBOX_STRICT
1324/**
1325 * VM state change callback for clearing fNoMorePhysWrites after
1326 * a snapshot has been created.
1327 */
1328static DECLCALLBACK(void) pgmR3ResetNoMorePhysWritesFlag(PVM pVM, VMSTATE enmState, VMSTATE enmOldState, void *pvUser)
1329{
1330 if (enmState == VMSTATE_RUNNING)
1331 pVM->pgm.s.fNoMorePhysWrites = false;
1332}
1333#endif
1334
1335
1336/**
1337 * Execute state save operation.
1338 *
1339 * @returns VBox status code.
1340 * @param pVM VM Handle.
1341 * @param pSSM SSM operation handle.
1342 */
1343static DECLCALLBACK(int) pgmR3Save(PVM pVM, PSSMHANDLE pSSM)
1344{
1345 PPGM pPGM = &pVM->pgm.s;
1346
1347 /* No more writes to physical memory after this point! */
1348 pVM->pgm.s.fNoMorePhysWrites = true;
1349
1350 /*
1351 * Save basic data (required / unaffected by relocation).
1352 */
1353#if 1
1354 SSMR3PutBool(pSSM, pPGM->fMappingsFixed);
1355#else
1356 SSMR3PutUInt(pSSM, pPGM->fMappingsFixed);
1357#endif
1358 SSMR3PutGCPtr(pSSM, pPGM->GCPtrMappingFixed);
1359 SSMR3PutU32(pSSM, pPGM->cbMappingFixed);
1360 SSMR3PutUInt(pSSM, pPGM->cbRamSize);
1361 SSMR3PutGCPhys(pSSM, pPGM->GCPhysA20Mask);
1362 SSMR3PutUInt(pSSM, pPGM->fA20Enabled);
1363 SSMR3PutUInt(pSSM, pPGM->fSyncFlags);
1364 SSMR3PutUInt(pSSM, pPGM->enmGuestMode);
1365 SSMR3PutU32(pSSM, ~0); /* Separator. */
1366
1367 /*
1368 * The guest mappings.
1369 */
1370 uint32_t i = 0;
1371 for (PPGMMAPPING pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3, i++)
1372 {
1373 SSMR3PutU32(pSSM, i);
1374 SSMR3PutStrZ(pSSM, pMapping->pszDesc); /* This is the best unique id we have... */
1375 SSMR3PutGCPtr(pSSM, pMapping->GCPtr);
1376 SSMR3PutGCUIntPtr(pSSM, pMapping->cPTs);
1377 /* flags are done by the mapping owners! */
1378 }
1379 SSMR3PutU32(pSSM, ~0); /* terminator. */
1380
1381 /*
1382 * Ram range flags and bits.
1383 */
1384 i = 0;
1385 for (PPGMRAMRANGE pRam = pPGM->pRamRangesHC; pRam; pRam = pRam->pNextHC, i++)
1386 {
1387 /** @todo MMIO ranges may move (PCI reconfig), we currently assume they don't. */
1388
1389 SSMR3PutU32(pSSM, i);
1390 SSMR3PutGCPhys(pSSM, pRam->GCPhys);
1391 SSMR3PutGCPhys(pSSM, pRam->GCPhysLast);
1392 SSMR3PutGCPhys(pSSM, pRam->cb);
1393 SSMR3PutU8(pSSM, !!pRam->pvHC); /* boolean indicating memory or not. */
1394
1395 /* Flags. */
1396 const unsigned cPages = pRam->cb >> PAGE_SHIFT;
1397 for (unsigned iPage = 0; iPage < cPages; iPage++)
1398 SSMR3PutU16(pSSM, (uint16_t)(pRam->aHCPhys[iPage] & ~X86_PTE_PAE_PG_MASK));
1399
1400 /* any memory associated with the range. */
1401 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1402 {
1403 for (unsigned iChunk = 0; iChunk < (pRam->cb >> PGM_DYNAMIC_CHUNK_SHIFT); iChunk++)
1404 {
1405 if (pRam->pavHCChunkHC[iChunk])
1406 {
1407 SSMR3PutU8(pSSM, 1); /* chunk present */
1408 SSMR3PutMem(pSSM, pRam->pavHCChunkHC[iChunk], PGM_DYNAMIC_CHUNK_SIZE);
1409 }
1410 else
1411 SSMR3PutU8(pSSM, 0); /* no chunk present */
1412 }
1413 }
1414 else if (pRam->pvHC)
1415 {
1416 int rc = SSMR3PutMem(pSSM, pRam->pvHC, pRam->cb);
1417 if (VBOX_FAILURE(rc))
1418 {
1419 Log(("pgmR3Save: SSMR3PutMem(, %p, %#x) -> %Vrc\n", pRam->pvHC, pRam->cb, rc));
1420 return rc;
1421 }
1422 }
1423 }
1424 return SSMR3PutU32(pSSM, ~0); /* terminator. */
1425}
1426
1427
1428/**
1429 * Execute state load operation.
1430 *
1431 * @returns VBox status code.
1432 * @param pVM VM Handle.
1433 * @param pSSM SSM operation handle.
1434 * @param u32Version Data layout version.
1435 */
1436static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
1437{
1438 /*
1439 * Validate version.
1440 */
1441 if (u32Version != PGM_SAVED_STATE_VERSION)
1442 {
1443 Log(("pgmR3Load: Invalid version u32Version=%d (current %d)!\n", u32Version, PGM_SAVED_STATE_VERSION));
1444 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1445 }
1446
1447 /*
1448 * Call the reset function to make sure all the memory is cleared.
1449 */
1450 PGMR3Reset(pVM);
1451
1452 /*
1453 * Load basic data (required / unaffected by relocation).
1454 */
1455 PPGM pPGM = &pVM->pgm.s;
1456#if 1
1457 SSMR3GetBool(pSSM, &pPGM->fMappingsFixed);
1458#else
1459 uint32_t u;
1460 SSMR3GetU32(pSSM, &u);
1461 pPGM->fMappingsFixed = u;
1462#endif
1463 SSMR3GetGCPtr(pSSM, &pPGM->GCPtrMappingFixed);
1464 SSMR3GetU32(pSSM, &pPGM->cbMappingFixed);
1465
1466 RTUINT cbRamSize;
1467 int rc = SSMR3GetU32(pSSM, &cbRamSize);
1468 if (VBOX_FAILURE(rc))
1469 return rc;
1470 if (cbRamSize != pPGM->cbRamSize)
1471 return VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH;
1472 SSMR3GetGCPhys(pSSM, &pPGM->GCPhysA20Mask);
1473 SSMR3GetUInt(pSSM, &pPGM->fA20Enabled);
1474 SSMR3GetUInt(pSSM, &pPGM->fSyncFlags);
1475 RTUINT uGuestMode;
1476 SSMR3GetUInt(pSSM, &uGuestMode);
1477 pPGM->enmGuestMode = (PGMMODE)uGuestMode;
1478
1479 /* check separator. */
1480 uint32_t u32Sep;
1481 SSMR3GetU32(pSSM, &u32Sep);
1482 if (VBOX_FAILURE(rc))
1483 return rc;
1484 if (u32Sep != (uint32_t)~0)
1485 {
1486 AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
1487 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1488 }
1489
1490 /*
1491 * The guest mappings.
1492 */
1493 uint32_t i = 0;
1494 for (;; i++)
1495 {
1496 /* Check the seqence number / separator. */
1497 rc = SSMR3GetU32(pSSM, &u32Sep);
1498 if (VBOX_FAILURE(rc))
1499 return rc;
1500 if (u32Sep == ~0U)
1501 break;
1502 if (u32Sep != i)
1503 {
1504 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
1505 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1506 }
1507
1508 /* get the mapping details. */
1509 char szDesc[256];
1510 szDesc[0] = '\0';
1511 rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
1512 if (VBOX_FAILURE(rc))
1513 return rc;
1514 RTGCPTR GCPtr;
1515 SSMR3GetGCPtr(pSSM, &GCPtr);
1516 RTGCUINTPTR cPTs;
1517 rc = SSMR3GetU32(pSSM, &cPTs);
1518 if (VBOX_FAILURE(rc))
1519 return rc;
1520
1521 /* find matching range. */
1522 PPGMMAPPING pMapping;
1523 for (pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3)
1524 if ( pMapping->cPTs == cPTs
1525 && !strcmp(pMapping->pszDesc, szDesc))
1526 break;
1527 if (!pMapping)
1528 {
1529 LogRel(("Couldn't find mapping: cPTs=%#x szDesc=%s (GCPtr=%VGv)\n",
1530 cPTs, szDesc, GCPtr));
1531 AssertFailed();
1532 return VERR_SSM_LOAD_CONFIG_MISMATCH;
1533 }
1534
1535 /* relocate it. */
1536 if (pMapping->GCPtr != GCPtr)
1537 {
1538 AssertMsg((GCPtr >> PGDIR_SHIFT << PGDIR_SHIFT) == GCPtr, ("GCPtr=%VGv\n", GCPtr));
1539#if HC_ARCH_BITS == 64
1540LogRel(("Mapping: %VGv -> %VGv %s\n", pMapping->GCPtr, GCPtr, pMapping->pszDesc));
1541#endif
1542 pgmR3MapRelocate(pVM, pMapping, pMapping->GCPtr >> PGDIR_SHIFT, GCPtr >> PGDIR_SHIFT);
1543 }
1544 else
1545 Log(("pgmR3Load: '%s' needed no relocation (%VGv)\n", szDesc, GCPtr));
1546 }
1547
1548 /*
1549 * Ram range flags and bits.
1550 */
1551 i = 0;
1552 for (PPGMRAMRANGE pRam = pPGM->pRamRangesHC; pRam; pRam = pRam->pNextHC, i++)
1553 {
1554 /** @todo MMIO ranges may move (PCI reconfig), we currently assume they don't. */
1555 /* Check the seqence number / separator. */
1556 rc = SSMR3GetU32(pSSM, &u32Sep);
1557 if (VBOX_FAILURE(rc))
1558 return rc;
1559 if (u32Sep == ~0U)
1560 break;
1561 if (u32Sep != i)
1562 {
1563 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
1564 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1565 }
1566
1567 /* Get the range details. */
1568 RTGCPHYS GCPhys;
1569 SSMR3GetGCPhys(pSSM, &GCPhys);
1570 RTGCPHYS GCPhysLast;
1571 SSMR3GetGCPhys(pSSM, &GCPhysLast);
1572 RTGCPHYS cb;
1573 SSMR3GetGCPhys(pSSM, &cb);
1574 uint8_t fHaveBits;
1575 rc = SSMR3GetU8(pSSM, &fHaveBits);
1576 if (VBOX_FAILURE(rc))
1577 return rc;
1578 if (fHaveBits & ~1)
1579 {
1580 AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
1581 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1582 }
1583
1584 /* Match it up with the current range. */
1585 if ( GCPhys != pRam->GCPhys
1586 || GCPhysLast != pRam->GCPhysLast
1587 || cb != pRam->cb
1588 || fHaveBits != !!pRam->pvHC)
1589 {
1590 LogRel(("Ram range: %VGp-%VGp %VGp bytes %s\n"
1591 "State : %VGp-%VGp %VGp bytes %s\n",
1592 pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvHC ? "bits" : "nobits",
1593 GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits"));
1594 AssertFailed();
1595 return VERR_SSM_LOAD_CONFIG_MISMATCH;
1596 }
1597
1598 /* Flags. */
1599 const unsigned cPages = pRam->cb >> PAGE_SHIFT;
1600 for (unsigned iPage = 0; iPage < cPages; iPage++)
1601 {
1602 uint16_t u16 = 0;
1603 SSMR3GetU16(pSSM, &u16);
1604 u16 &= PAGE_OFFSET_MASK & ~( MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_WRITE | MM_RAM_FLAGS_VIRTUAL_ALL
1605 | MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL
1606 | MM_RAM_FLAGS_PHYSICAL_TEMP_OFF );
1607 pRam->aHCPhys[iPage] = (pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK) | (RTHCPHYS)u16;
1608 }
1609
1610 /* any memory associated with the range. */
1611 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1612 {
1613 for (unsigned iChunk = 0; iChunk < (pRam->cb >> PGM_DYNAMIC_CHUNK_SHIFT); iChunk++)
1614 {
1615 uint8_t fValidChunk;
1616
1617 rc = SSMR3GetU8(pSSM, &fValidChunk);
1618 if (VBOX_FAILURE(rc))
1619 return rc;
1620 if (fValidChunk > 1)
1621 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1622
1623 if (fValidChunk)
1624 {
1625 if (!pRam->pavHCChunkHC[iChunk])
1626 {
1627 rc = pgmr3PhysGrowRange(pVM, pRam->GCPhys + iChunk * PGM_DYNAMIC_CHUNK_SIZE);
1628 if (VBOX_FAILURE(rc))
1629 return rc;
1630 }
1631 Assert(pRam->pavHCChunkHC[iChunk]);
1632
1633 SSMR3GetMem(pSSM, pRam->pavHCChunkHC[iChunk], PGM_DYNAMIC_CHUNK_SIZE);
1634 }
1635 /* else nothing to do */
1636 }
1637 }
1638 else if (pRam->pvHC)
1639 {
1640 int rc = SSMR3GetMem(pSSM, pRam->pvHC, pRam->cb);
1641 if (VBOX_FAILURE(rc))
1642 {
1643 Log(("pgmR3Save: SSMR3GetMem(, %p, %#x) -> %Vrc\n", pRam->pvHC, pRam->cb, rc));
1644 return rc;
1645 }
1646 }
1647 }
1648
1649 /*
1650 * We require a full resync now.
1651 */
1652 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1653 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1654 pPGM->fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
1655 pPGM->fPhysCacheFlushPending = true;
1656 pgmR3HandlerPhysicalUpdateAll(pVM);
1657
1658 /*
1659 * Change the paging mode.
1660 */
1661 return pgmR3ChangeMode(pVM, pPGM->enmGuestMode);
1662}
1663
1664
1665/**
1666 * Show paging mode.
1667 *
1668 * @param pVM VM Handle.
1669 * @param pHlp The info helpers.
1670 * @param pszArgs "all" (default), "guest", "shadow" or "host".
1671 */
1672static DECLCALLBACK(void) pgmR3InfoMode(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1673{
1674 /* digest argument. */
1675 bool fGuest, fShadow, fHost;
1676 if (pszArgs)
1677 pszArgs = RTStrStripL(pszArgs);
1678 if (!pszArgs || !*pszArgs || strstr(pszArgs, "all"))
1679 fShadow = fHost = fGuest = true;
1680 else
1681 {
1682 fShadow = fHost = fGuest = false;
1683 if (strstr(pszArgs, "guest"))
1684 fGuest = true;
1685 if (strstr(pszArgs, "shadow"))
1686 fShadow = true;
1687 if (strstr(pszArgs, "host"))
1688 fHost = true;
1689 }
1690
1691 /* print info. */
1692 if (fGuest)
1693 pHlp->pfnPrintf(pHlp, "Guest paging mode: %s, changed %RU64 times, A20 %s\n",
1694 PGMGetModeName(pVM->pgm.s.enmGuestMode), pVM->pgm.s.cGuestModeChanges.c,
1695 pVM->pgm.s.fA20Enabled ? "enabled" : "disabled");
1696 if (fShadow)
1697 pHlp->pfnPrintf(pHlp, "Shadow paging mode: %s\n", PGMGetModeName(pVM->pgm.s.enmShadowMode));
1698 if (fHost)
1699 {
1700 const char *psz;
1701 switch (pVM->pgm.s.enmHostMode)
1702 {
1703 case SUPPAGINGMODE_INVALID: psz = "invalid"; break;
1704 case SUPPAGINGMODE_32_BIT: psz = "32-bit"; break;
1705 case SUPPAGINGMODE_32_BIT_GLOBAL: psz = "32-bit+G"; break;
1706 case SUPPAGINGMODE_PAE: psz = "PAE"; break;
1707 case SUPPAGINGMODE_PAE_GLOBAL: psz = "PAE+G"; break;
1708 case SUPPAGINGMODE_PAE_NX: psz = "PAE+NX"; break;
1709 case SUPPAGINGMODE_PAE_GLOBAL_NX: psz = "PAE+G+NX"; break;
1710 case SUPPAGINGMODE_AMD64: psz = "AMD64"; break;
1711 case SUPPAGINGMODE_AMD64_GLOBAL: psz = "AMD64+G"; break;
1712 case SUPPAGINGMODE_AMD64_NX: psz = "AMD64+NX"; break;
1713 case SUPPAGINGMODE_AMD64_GLOBAL_NX: psz = "AMD64+G+NX"; break;
1714 default: psz = "unknown"; break;
1715 }
1716 pHlp->pfnPrintf(pHlp, "Host paging mode: %s\n", psz);
1717 }
1718}
1719
1720
1721/**
1722 * Dump registered MMIO ranges to the log.
1723 *
1724 * @param pVM VM Handle.
1725 * @param pHlp The info helpers.
1726 * @param pszArgs Arguments, ignored.
1727 */
1728static DECLCALLBACK(void) pgmR3PhysInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1729{
1730 NOREF(pszArgs);
1731 pHlp->pfnPrintf(pHlp,
1732 "RAM ranges (pVM=%p)\n"
1733 "%.*s %.*s\n",
1734 pVM,
1735 sizeof(RTGCPHYS) * 4 + 1, "GC Phys Range ",
1736 sizeof(RTHCPTR) * 2, "pvHC ");
1737
1738 for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesHC; pCur; pCur = pCur->pNextHC)
1739 pHlp->pfnPrintf(pHlp,
1740 "%VGp-%VGp %VHv\n",
1741 pCur->GCPhys,
1742 pCur->GCPhysLast,
1743 pCur->pvHC);
1744}
1745
1746/**
1747 * Dump the page directory to the log.
1748 *
1749 * @param pVM VM Handle.
1750 * @param pHlp The info helpers.
1751 * @param pszArgs Arguments, ignored.
1752 */
1753static DECLCALLBACK(void) pgmR3InfoCr3(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1754{
1755/** @todo fix this! Convert the PGMR3DumpHierarchyHC functions to do guest stuff. */
1756 /* Big pages supported? */
1757 const bool fPSE = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
1758 /* Global pages supported? */
1759 const bool fPGE = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PGE);
1760
1761 NOREF(pszArgs);
1762
1763 /*
1764 * Get page directory addresses.
1765 */
1766 PVBOXPD pPDSrc = pVM->pgm.s.pGuestPDHC;
1767 Assert(pPDSrc);
1768 Assert(MMPhysGCPhys2HCVirt(pVM, (RTGCPHYS)(CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);
1769
1770 /*
1771 * Iterate the page directory.
1772 */
1773 for (unsigned iPD = 0; iPD < ELEMENTS(pPDSrc->a); iPD++)
1774 {
1775 VBOXPDE PdeSrc = pPDSrc->a[iPD];
1776 if (PdeSrc.n.u1Present)
1777 {
1778 if (PdeSrc.b.u1Size && fPSE)
1779 {
1780 pHlp->pfnPrintf(pHlp,
1781 "%04X - %VGp P=%d U=%d RW=%d G=%d - BIG\n",
1782 iPD,
1783 PdeSrc.u & X86_PDE_PG_MASK,
1784 PdeSrc.b.u1Present, PdeSrc.b.u1User, PdeSrc.b.u1Write, PdeSrc.b.u1Global && fPGE);
1785 }
1786 else
1787 {
1788 pHlp->pfnPrintf(pHlp,
1789 "%04X - %VGp P=%d U=%d RW=%d [G=%d]\n",
1790 iPD,
1791 PdeSrc.u & X86_PDE4M_PG_MASK,
1792 PdeSrc.n.u1Present, PdeSrc.n.u1User, PdeSrc.n.u1Write, PdeSrc.b.u1Global && fPGE);
1793 }
1794 }
1795 }
1796}
1797
1798
1799/**
1800 * Serivce a VMMCALLHOST_PGM_LOCK call.
1801 *
1802 * @returns VBox status code.
1803 * @param pVM The VM handle.
1804 */
1805PDMR3DECL(int) PGMR3LockCall(PVM pVM)
1806{
1807 return pgmLock(pVM);
1808}
1809
1810
1811/**
1812 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
1813 *
1814 * @returns PGM_TYPE_*.
1815 * @param pgmMode The mode value to convert.
1816 */
1817DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
1818{
1819 switch (pgmMode)
1820 {
1821 case PGMMODE_REAL: return PGM_TYPE_REAL;
1822 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
1823 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
1824 case PGMMODE_PAE:
1825 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
1826 case PGMMODE_AMD64:
1827 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
1828 default:
1829 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
1830 }
1831}
1832
1833
1834/**
1835 * Gets the index into the paging mode data array of a SHW+GST mode.
1836 *
1837 * @returns PGM::paPagingData index.
1838 * @param uShwType The shadow paging mode type.
1839 * @param uGstType The guest paging mode type.
1840 */
1841DECLINLINE(unsigned) pgmModeDataIndex(unsigned uShwType, unsigned uGstType)
1842{
1843 Assert(uShwType >= PGM_TYPE_32BIT && uShwType <= PGM_TYPE_AMD64);
1844 Assert(uGstType >= PGM_TYPE_REAL && uGstType <= PGM_TYPE_AMD64);
1845 return (uShwType - PGM_TYPE_32BIT) * (PGM_TYPE_AMD64 - PGM_TYPE_32BIT + 1)
1846 + (uGstType - PGM_TYPE_REAL);
1847}
1848
1849
1850/**
1851 * Gets the index into the paging mode data array of a SHW+GST mode.
1852 *
1853 * @returns PGM::paPagingData index.
1854 * @param enmShw The shadow paging mode.
1855 * @param enmGst The guest paging mode.
1856 */
1857DECLINLINE(unsigned) pgmModeDataIndexByMode(PGMMODE enmShw, PGMMODE enmGst)
1858{
1859 Assert(enmShw >= PGMMODE_32_BIT && enmShw <= PGMMODE_MAX);
1860 Assert(enmGst > PGMMODE_INVALID && enmGst < PGMMODE_MAX);
1861 return pgmModeDataIndex(pgmModeToType(enmShw), pgmModeToType(enmGst));
1862}
1863
1864
1865/**
1866 * Calculates the max data index.
1867 * @returns The number of entries in the pagaing data array.
1868 */
1869DECLINLINE(unsigned) pgmModeDataMaxIndex(void)
1870{
1871 return pgmModeDataIndex(PGM_TYPE_AMD64, PGM_TYPE_AMD64) + 1;
1872}
1873
1874
1875/**
1876 * Initializes the paging mode data kept in PGM::paModeData.
1877 *
1878 * @param pVM The VM handle.
1879 * @param fResolveGCAndR0 Indicate whether or not GC and Ring-0 symbols can be resolved now.
1880 * This is used early in the init process to avoid trouble with PDM
1881 * not being initialized yet.
1882 */
1883static int pgmR3ModeDataInit(PVM pVM, bool fResolveGCAndR0)
1884{
1885 PPGMMODEDATA pModeData;
1886 int rc;
1887
1888 /*
1889 * Allocate the array on the first call.
1890 */
1891 if (!pVM->pgm.s.paModeData)
1892 {
1893 pVM->pgm.s.paModeData = (PPGMMODEDATA)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMMODEDATA) * pgmModeDataMaxIndex());
1894 AssertReturn(pVM->pgm.s.paModeData, VERR_NO_MEMORY);
1895 }
1896
1897 /*
1898 * Initialize the array entries.
1899 */
1900 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_32BIT, PGM_TYPE_REAL)];
1901 pModeData->uShwType = PGM_TYPE_32BIT;
1902 pModeData->uGstType = PGM_TYPE_REAL;
1903 rc = PGM_SHW_NAME_32BIT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1904 rc = PGM_GST_NAME_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1905 rc = PGM_BTH_NAME_32BIT_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1906
1907 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_32BIT, PGMMODE_PROTECTED)];
1908 pModeData->uShwType = PGM_TYPE_32BIT;
1909 pModeData->uGstType = PGM_TYPE_PROT;
1910 rc = PGM_SHW_NAME_32BIT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1911 rc = PGM_GST_NAME_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1912 rc = PGM_BTH_NAME_32BIT_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1913
1914 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_32BIT, PGM_TYPE_32BIT)];
1915 pModeData->uShwType = PGM_TYPE_32BIT;
1916 pModeData->uGstType = PGM_TYPE_32BIT;
1917 rc = PGM_SHW_NAME_32BIT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1918 rc = PGM_GST_NAME_32BIT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1919 rc = PGM_BTH_NAME_32BIT_32BIT(InitData)(pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1920
1921 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_PAE, PGM_TYPE_REAL)];
1922 pModeData->uShwType = PGM_TYPE_PAE;
1923 pModeData->uGstType = PGM_TYPE_REAL;
1924 rc = PGM_SHW_NAME_PAE(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1925 rc = PGM_GST_NAME_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1926 rc = PGM_BTH_NAME_PAE_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1927
1928 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_PAE, PGM_TYPE_PROT)];
1929 pModeData->uShwType = PGM_TYPE_PAE;
1930 pModeData->uGstType = PGM_TYPE_PROT;
1931 rc = PGM_SHW_NAME_PAE(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1932 rc = PGM_GST_NAME_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1933 rc = PGM_BTH_NAME_PAE_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1934
1935 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_PAE, PGM_TYPE_32BIT)];
1936 pModeData->uShwType = PGM_TYPE_PAE;
1937 pModeData->uGstType = PGM_TYPE_32BIT;
1938 rc = PGM_SHW_NAME_PAE(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1939 rc = PGM_GST_NAME_32BIT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1940 rc = PGM_BTH_NAME_PAE_32BIT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1941
1942 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_PAE, PGM_TYPE_PAE)];
1943 pModeData->uShwType = PGM_TYPE_PAE;
1944 pModeData->uGstType = PGM_TYPE_PAE;
1945 rc = PGM_SHW_NAME_PAE(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1946 rc = PGM_GST_NAME_PAE(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1947 rc = PGM_BTH_NAME_PAE_PAE(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1948
1949 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_AMD64, PGM_TYPE_REAL)];
1950 pModeData->uShwType = PGM_TYPE_AMD64;
1951 pModeData->uGstType = PGM_TYPE_REAL;
1952 rc = PGM_SHW_NAME_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1953 rc = PGM_GST_NAME_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1954 rc = PGM_BTH_NAME_AMD64_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1955
1956 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_AMD64, PGM_TYPE_PROT)];
1957 pModeData->uShwType = PGM_TYPE_AMD64;
1958 pModeData->uGstType = PGM_TYPE_PROT;
1959 rc = PGM_SHW_NAME_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1960 rc = PGM_GST_NAME_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1961 rc = PGM_BTH_NAME_AMD64_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1962
1963 pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_AMD64, PGM_TYPE_AMD64)];
1964 pModeData->uShwType = PGM_TYPE_AMD64;
1965 pModeData->uGstType = PGM_TYPE_AMD64;
1966 rc = PGM_SHW_NAME_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1967 rc = PGM_GST_NAME_AMD64(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1968 rc = PGM_BTH_NAME_AMD64_AMD64(InitData)(pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
1969
1970 return VINF_SUCCESS;
1971}
1972
1973
1974/**
1975 * Swtich to different (or relocated in the relocate case) mode data.
1976 *
1977 * @param pVM The VM handle.
1978 * @param enmShw The the shadow paging mode.
1979 * @param enmGst The the guest paging mode.
1980 */
1981static void pgmR3ModeDataSwitch(PVM pVM, PGMMODE enmShw, PGMMODE enmGst)
1982{
1983 PPGMMODEDATA pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(enmShw, enmGst)];
1984
1985 Assert(pModeData->uGstType == pgmModeToType(enmGst));
1986 Assert(pModeData->uShwType == pgmModeToType(enmShw));
1987
1988 /* shadow */
1989 pVM->pgm.s.pfnR3ShwRelocate = pModeData->pfnR3ShwRelocate;
1990 pVM->pgm.s.pfnR3ShwExit = pModeData->pfnR3ShwExit;
1991 pVM->pgm.s.pfnR3ShwGetPage = pModeData->pfnR3ShwGetPage;
1992 Assert(pVM->pgm.s.pfnR3ShwGetPage);
1993 pVM->pgm.s.pfnR3ShwModifyPage = pModeData->pfnR3ShwModifyPage;
1994 pVM->pgm.s.pfnR3ShwGetPDEByIndex = pModeData->pfnR3ShwGetPDEByIndex;
1995 pVM->pgm.s.pfnR3ShwSetPDEByIndex = pModeData->pfnR3ShwSetPDEByIndex;
1996 pVM->pgm.s.pfnR3ShwModifyPDEByIndex = pModeData->pfnR3ShwModifyPDEByIndex;
1997
1998 pVM->pgm.s.pfnGCShwGetPage = pModeData->pfnGCShwGetPage;
1999 pVM->pgm.s.pfnGCShwModifyPage = pModeData->pfnGCShwModifyPage;
2000 pVM->pgm.s.pfnGCShwGetPDEByIndex = pModeData->pfnGCShwGetPDEByIndex;
2001 pVM->pgm.s.pfnGCShwSetPDEByIndex = pModeData->pfnGCShwSetPDEByIndex;
2002 pVM->pgm.s.pfnGCShwModifyPDEByIndex = pModeData->pfnGCShwModifyPDEByIndex;
2003
2004 pVM->pgm.s.pfnR0ShwGetPage = pModeData->pfnR0ShwGetPage;
2005 pVM->pgm.s.pfnR0ShwModifyPage = pModeData->pfnR0ShwModifyPage;
2006 pVM->pgm.s.pfnR0ShwGetPDEByIndex = pModeData->pfnR0ShwGetPDEByIndex;
2007 pVM->pgm.s.pfnR0ShwSetPDEByIndex = pModeData->pfnR0ShwSetPDEByIndex;
2008 pVM->pgm.s.pfnR0ShwModifyPDEByIndex = pModeData->pfnR0ShwModifyPDEByIndex;
2009
2010
2011 /* guest */
2012 pVM->pgm.s.pfnR3GstRelocate = pModeData->pfnR3GstRelocate;
2013 pVM->pgm.s.pfnR3GstExit = pModeData->pfnR3GstExit;
2014 pVM->pgm.s.pfnR3GstGetPage = pModeData->pfnR3GstGetPage;
2015 Assert(pVM->pgm.s.pfnR3GstGetPage);
2016 pVM->pgm.s.pfnR3GstModifyPage = pModeData->pfnR3GstModifyPage;
2017 pVM->pgm.s.pfnR3GstGetPDE = pModeData->pfnR3GstGetPDE;
2018 pVM->pgm.s.pfnR3GstMonitorCR3 = pModeData->pfnR3GstMonitorCR3;
2019 pVM->pgm.s.pfnR3GstUnmonitorCR3 = pModeData->pfnR3GstUnmonitorCR3;
2020 pVM->pgm.s.pfnR3GstMapCR3 = pModeData->pfnR3GstMapCR3;
2021 pVM->pgm.s.pfnR3GstUnmapCR3 = pModeData->pfnR3GstUnmapCR3;
2022 pVM->pgm.s.pfnHCGstWriteHandlerCR3 = pModeData->pfnHCGstWriteHandlerCR3;
2023 pVM->pgm.s.pszHCGstWriteHandlerCR3 = pModeData->pszHCGstWriteHandlerCR3;
2024
2025 pVM->pgm.s.pfnGCGstGetPage = pModeData->pfnGCGstGetPage;
2026 pVM->pgm.s.pfnGCGstModifyPage = pModeData->pfnGCGstModifyPage;
2027 pVM->pgm.s.pfnGCGstGetPDE = pModeData->pfnGCGstGetPDE;
2028 pVM->pgm.s.pfnGCGstMonitorCR3 = pModeData->pfnGCGstMonitorCR3;
2029 pVM->pgm.s.pfnGCGstUnmonitorCR3 = pModeData->pfnGCGstUnmonitorCR3;
2030 pVM->pgm.s.pfnGCGstMapCR3 = pModeData->pfnGCGstMapCR3;
2031 pVM->pgm.s.pfnGCGstUnmapCR3 = pModeData->pfnGCGstUnmapCR3;
2032 pVM->pgm.s.pfnGCGstWriteHandlerCR3 = pModeData->pfnGCGstWriteHandlerCR3;
2033
2034 pVM->pgm.s.pfnR0GstGetPage = pModeData->pfnR0GstGetPage;
2035 pVM->pgm.s.pfnR0GstModifyPage = pModeData->pfnR0GstModifyPage;
2036 pVM->pgm.s.pfnR0GstGetPDE = pModeData->pfnR0GstGetPDE;
2037 pVM->pgm.s.pfnR0GstMonitorCR3 = pModeData->pfnR0GstMonitorCR3;
2038 pVM->pgm.s.pfnR0GstUnmonitorCR3 = pModeData->pfnR0GstUnmonitorCR3;
2039 pVM->pgm.s.pfnR0GstMapCR3 = pModeData->pfnR0GstMapCR3;
2040 pVM->pgm.s.pfnR0GstUnmapCR3 = pModeData->pfnR0GstUnmapCR3;
2041 pVM->pgm.s.pfnR0GstWriteHandlerCR3 = pModeData->pfnR0GstWriteHandlerCR3;
2042
2043
2044 /* both */
2045 pVM->pgm.s.pfnR3BthRelocate = pModeData->pfnR3BthRelocate;
2046 pVM->pgm.s.pfnR3BthTrap0eHandler = pModeData->pfnR3BthTrap0eHandler;
2047 pVM->pgm.s.pfnR3BthInvalidatePage = pModeData->pfnR3BthInvalidatePage;
2048 pVM->pgm.s.pfnR3BthSyncCR3 = pModeData->pfnR3BthSyncCR3;
2049 Assert(pVM->pgm.s.pfnR3BthSyncCR3);
2050 pVM->pgm.s.pfnR3BthSyncPage = pModeData->pfnR3BthSyncPage;
2051 pVM->pgm.s.pfnR3BthPrefetchPage = pModeData->pfnR3BthPrefetchPage;
2052 pVM->pgm.s.pfnR3BthVerifyAccessSyncPage = pModeData->pfnR3BthVerifyAccessSyncPage;
2053#ifdef VBOX_STRICT
2054 pVM->pgm.s.pfnR3BthAssertCR3 = pModeData->pfnR3BthAssertCR3;
2055#endif
2056
2057 pVM->pgm.s.pfnGCBthTrap0eHandler = pModeData->pfnGCBthTrap0eHandler;
2058 pVM->pgm.s.pfnGCBthInvalidatePage = pModeData->pfnGCBthInvalidatePage;
2059 pVM->pgm.s.pfnGCBthSyncCR3 = pModeData->pfnGCBthSyncCR3;
2060 pVM->pgm.s.pfnGCBthSyncPage = pModeData->pfnGCBthSyncPage;
2061 pVM->pgm.s.pfnGCBthPrefetchPage = pModeData->pfnGCBthPrefetchPage;
2062 pVM->pgm.s.pfnGCBthVerifyAccessSyncPage = pModeData->pfnGCBthVerifyAccessSyncPage;
2063#ifdef VBOX_STRICT
2064 pVM->pgm.s.pfnGCBthAssertCR3 = pModeData->pfnGCBthAssertCR3;
2065#endif
2066
2067 pVM->pgm.s.pfnR0BthTrap0eHandler = pModeData->pfnR0BthTrap0eHandler;
2068 pVM->pgm.s.pfnR0BthInvalidatePage = pModeData->pfnR0BthInvalidatePage;
2069 pVM->pgm.s.pfnR0BthSyncCR3 = pModeData->pfnR0BthSyncCR3;
2070 pVM->pgm.s.pfnR0BthSyncPage = pModeData->pfnR0BthSyncPage;
2071 pVM->pgm.s.pfnR0BthPrefetchPage = pModeData->pfnR0BthPrefetchPage;
2072 pVM->pgm.s.pfnR0BthVerifyAccessSyncPage = pModeData->pfnR0BthVerifyAccessSyncPage;
2073#ifdef VBOX_STRICT
2074 pVM->pgm.s.pfnR0BthAssertCR3 = pModeData->pfnR0BthAssertCR3;
2075#endif
2076}
2077
2078
2079#ifdef DEBUG_bird
2080#include <stdlib.h> /* getenv() remove me! */
2081#endif
2082
2083/**
2084 * Calculates the shadow paging mode.
2085 *
2086 * @returns The shadow paging mode.
2087 * @param enmGuestMode The guest mode.
2088 * @param enmHostMode The host mode.
2089 * @param enmShadowMode The current shadow mode.
2090 * @param penmSwitcher Where to store the switcher to use.
2091 * VMMSWITCHER_INVALID means no change.
2092 */
2093static PGMMODE pgmR3CalcShadowMode(PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode, VMMSWITCHER *penmSwitcher)
2094{
2095 VMMSWITCHER enmSwitcher = VMMSWITCHER_INVALID;
2096 switch (enmGuestMode)
2097 {
2098 /*
2099 * When switching to real or protected mode we don't change
2100 * anything since it's likely that we'll switch back pretty soon.
2101 *
2102 * During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
2103 * and is supposed to determin which shadow paging and switcher to
2104 * use during init.
2105 */
2106 case PGMMODE_REAL:
2107 case PGMMODE_PROTECTED:
2108 if (enmShadowMode != PGMMODE_INVALID)
2109 break; /* (no change) */
2110 switch (enmHostMode)
2111 {
2112 case SUPPAGINGMODE_32_BIT:
2113 case SUPPAGINGMODE_32_BIT_GLOBAL:
2114 enmShadowMode = PGMMODE_32_BIT;
2115 enmSwitcher = VMMSWITCHER_32_TO_32;
2116 break;
2117
2118 case SUPPAGINGMODE_PAE:
2119 case SUPPAGINGMODE_PAE_NX:
2120 case SUPPAGINGMODE_PAE_GLOBAL:
2121 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2122 enmShadowMode = PGMMODE_PAE;
2123 enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
2124#ifdef DEBUG_bird
2125if (getenv("VBOX_32BIT"))
2126{
2127 enmShadowMode = PGMMODE_32_BIT;
2128 enmSwitcher = VMMSWITCHER_PAE_TO_32;
2129}
2130#endif
2131 break;
2132
2133 case SUPPAGINGMODE_AMD64:
2134 case SUPPAGINGMODE_AMD64_GLOBAL:
2135 case SUPPAGINGMODE_AMD64_NX:
2136 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2137 enmShadowMode = PGMMODE_PAE;
2138 enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
2139 break;
2140
2141 default: AssertMsgFailed(("enmHostMode=%d\n", enmHostMode)); break;
2142 }
2143 break;
2144
2145 case PGMMODE_32_BIT:
2146 switch (enmHostMode)
2147 {
2148 case SUPPAGINGMODE_32_BIT:
2149 case SUPPAGINGMODE_32_BIT_GLOBAL:
2150 enmShadowMode = PGMMODE_32_BIT;
2151 enmSwitcher = VMMSWITCHER_32_TO_32;
2152 break;
2153
2154 case SUPPAGINGMODE_PAE:
2155 case SUPPAGINGMODE_PAE_NX:
2156 case SUPPAGINGMODE_PAE_GLOBAL:
2157 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2158 enmShadowMode = PGMMODE_PAE;
2159 enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
2160#ifdef DEBUG_bird
2161if (getenv("VBOX_32BIT"))
2162{
2163 enmShadowMode = PGMMODE_32_BIT;
2164 enmSwitcher = VMMSWITCHER_PAE_TO_32;
2165}
2166#endif
2167 break;
2168
2169 case SUPPAGINGMODE_AMD64:
2170 case SUPPAGINGMODE_AMD64_GLOBAL:
2171 case SUPPAGINGMODE_AMD64_NX:
2172 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2173 enmShadowMode = PGMMODE_PAE;
2174 enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
2175 break;
2176
2177 default: AssertMsgFailed(("enmHostMode=%d\n", enmHostMode)); break;
2178 }
2179 break;
2180
2181 case PGMMODE_PAE:
2182 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
2183 switch (enmHostMode)
2184 {
2185 case SUPPAGINGMODE_32_BIT:
2186 case SUPPAGINGMODE_32_BIT_GLOBAL:
2187 enmShadowMode = PGMMODE_PAE;
2188 enmSwitcher = VMMSWITCHER_32_TO_PAE;
2189 break;
2190
2191 case SUPPAGINGMODE_PAE:
2192 case SUPPAGINGMODE_PAE_NX:
2193 case SUPPAGINGMODE_PAE_GLOBAL:
2194 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2195 enmShadowMode = PGMMODE_PAE;
2196 enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
2197 break;
2198
2199 case SUPPAGINGMODE_AMD64:
2200 case SUPPAGINGMODE_AMD64_GLOBAL:
2201 case SUPPAGINGMODE_AMD64_NX:
2202 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2203 enmShadowMode = PGMMODE_PAE;
2204 enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
2205 break;
2206
2207 default: AssertMsgFailed(("enmHostMode=%d\n", enmHostMode)); break;
2208 }
2209 break;
2210
2211 case PGMMODE_AMD64:
2212 case PGMMODE_AMD64_NX:
2213 switch (enmHostMode)
2214 {
2215 case SUPPAGINGMODE_32_BIT:
2216 case SUPPAGINGMODE_32_BIT_GLOBAL:
2217 enmShadowMode = PGMMODE_PAE;
2218 enmSwitcher = VMMSWITCHER_32_TO_AMD64;
2219 break;
2220
2221 case SUPPAGINGMODE_PAE:
2222 case SUPPAGINGMODE_PAE_NX:
2223 case SUPPAGINGMODE_PAE_GLOBAL:
2224 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2225 enmShadowMode = PGMMODE_PAE;
2226 enmSwitcher = VMMSWITCHER_PAE_TO_AMD64;
2227 break;
2228
2229 case SUPPAGINGMODE_AMD64:
2230 case SUPPAGINGMODE_AMD64_GLOBAL:
2231 case SUPPAGINGMODE_AMD64_NX:
2232 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2233 enmShadowMode = PGMMODE_PAE;
2234 enmSwitcher = VMMSWITCHER_AMD64_TO_AMD64;
2235 break;
2236
2237 default: AssertMsgFailed(("enmHostMode=%d\n", enmHostMode)); break;
2238 }
2239 break;
2240
2241
2242 default:
2243 AssertReleaseMsgFailed(("enmGuestMode=%d\n", enmGuestMode));
2244 return PGMMODE_INVALID;
2245 }
2246
2247 *penmSwitcher = enmSwitcher;
2248 return enmShadowMode;
2249}
2250
2251
2252/**
2253 * Performs the actual mode change.
2254 * This is called by PGMChangeMode and pgmR3InitPaging().
2255 *
2256 * @returns VBox status code.
2257 * @param pVM VM handle.
2258 * @param enmGuestMode The new guest mode. This is assumed to be different from
2259 * the current mode.
2260 */
2261int pgmR3ChangeMode(PVM pVM, PGMMODE enmGuestMode)
2262{
2263 LogFlow(("pgmR3ChangeMode: Guest mode: %d -> %d\n", pVM->pgm.s.enmGuestMode, enmGuestMode));
2264 STAM_REL_COUNTER_INC(&pVM->pgm.s.cGuestModeChanges);
2265
2266 /*
2267 * Calc the shadow mode and switcher.
2268 */
2269 VMMSWITCHER enmSwitcher;
2270 PGMMODE enmShadowMode = pgmR3CalcShadowMode(enmGuestMode, pVM->pgm.s.enmHostMode, pVM->pgm.s.enmShadowMode, &enmSwitcher);
2271 if (enmSwitcher != VMMSWITCHER_INVALID)
2272 {
2273 /*
2274 * Select new switcher.
2275 */
2276 int rc = VMMR3SelectSwitcher(pVM, enmSwitcher);
2277 if (VBOX_FAILURE(rc))
2278 {
2279 AssertReleaseMsgFailed(("VMMR3SelectSwitcher(%d) -> %Vrc\n", enmSwitcher, rc));
2280 return rc;
2281 }
2282 }
2283
2284 /*
2285 * Exit old mode(s).
2286 */
2287 /* shadow */
2288 if (enmShadowMode != pVM->pgm.s.enmShadowMode)
2289 {
2290 LogFlow(("pgmR3ChangeMode: Shadow mode: %d -> %d\n", pVM->pgm.s.enmShadowMode, enmShadowMode));
2291 if (PGM_SHW_PFN(Exit, pVM))
2292 {
2293 int rc = PGM_SHW_PFN(Exit, pVM)(pVM);
2294 if (VBOX_FAILURE(rc))
2295 {
2296 AssertMsgFailed(("Exit failed for shadow mode %d: %Vrc\n", pVM->pgm.s.enmShadowMode, rc));
2297 return rc;
2298 }
2299 }
2300
2301 }
2302
2303 /* guest */
2304 if (PGM_GST_PFN(Exit, pVM))
2305 {
2306 int rc = PGM_GST_PFN(Exit, pVM)(pVM);
2307 if (VBOX_FAILURE(rc))
2308 {
2309 AssertMsgFailed(("Exit failed for guest mode %d: %Vrc\n", pVM->pgm.s.enmGuestMode, rc));
2310 return rc;
2311 }
2312 }
2313
2314 /*
2315 * Load new paging mode data.
2316 */
2317 pgmR3ModeDataSwitch(pVM, enmShadowMode, enmGuestMode);
2318
2319 /*
2320 * Enter new shadow mode (if changed).
2321 */
2322 if (enmShadowMode != pVM->pgm.s.enmShadowMode)
2323 {
2324 int rc;
2325 pVM->pgm.s.enmShadowMode = enmShadowMode;
2326 switch (enmShadowMode)
2327 {
2328 case PGMMODE_32_BIT:
2329 rc = PGM_SHW_NAME_32BIT(Enter)(pVM);
2330 break;
2331 case PGMMODE_PAE:
2332 case PGMMODE_PAE_NX:
2333 rc = PGM_SHW_NAME_PAE(Enter)(pVM);
2334 break;
2335 case PGMMODE_AMD64:
2336 case PGMMODE_AMD64_NX:
2337 rc = PGM_SHW_NAME_AMD64(Enter)(pVM);
2338 break;
2339 case PGMMODE_REAL:
2340 case PGMMODE_PROTECTED:
2341 default:
2342 AssertReleaseMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
2343 return VERR_INTERNAL_ERROR;
2344 }
2345 if (VBOX_FAILURE(rc))
2346 {
2347 AssertReleaseMsgFailed(("Entering enmShadowMode=%d failed: %Vrc\n", enmShadowMode, rc));
2348 pVM->pgm.s.enmShadowMode = PGMMODE_INVALID;
2349 return rc;
2350 }
2351 }
2352
2353 /*
2354 * Enter the new guest and shadow+guest modes.
2355 */
2356 int rc = -1;
2357 int rc2 = -1;
2358 RTGCPHYS GCPhysCR3 = NIL_RTGCPHYS;
2359 pVM->pgm.s.enmGuestMode = enmGuestMode;
2360 switch (enmGuestMode)
2361 {
2362 case PGMMODE_REAL:
2363 rc = PGM_GST_NAME_REAL(Enter)(pVM, NIL_RTGCPHYS);
2364 switch (pVM->pgm.s.enmShadowMode)
2365 {
2366 case PGMMODE_32_BIT:
2367 rc2 = PGM_BTH_NAME_32BIT_REAL(Enter)(pVM, NIL_RTGCPHYS);
2368 break;
2369 case PGMMODE_PAE:
2370 case PGMMODE_PAE_NX:
2371 rc2 = PGM_BTH_NAME_PAE_REAL(Enter)(pVM, NIL_RTGCPHYS);
2372 break;
2373 case PGMMODE_AMD64:
2374 case PGMMODE_AMD64_NX:
2375 rc2 = PGM_BTH_NAME_AMD64_REAL(Enter)(pVM, NIL_RTGCPHYS);
2376 break;
2377 default: AssertFailed(); break;
2378 }
2379 break;
2380
2381 case PGMMODE_PROTECTED:
2382 rc = PGM_GST_NAME_PROT(Enter)(pVM, NIL_RTGCPHYS);
2383 switch (pVM->pgm.s.enmShadowMode)
2384 {
2385 case PGMMODE_32_BIT:
2386 rc2 = PGM_BTH_NAME_32BIT_PROT(Enter)(pVM, NIL_RTGCPHYS);
2387 break;
2388 case PGMMODE_PAE:
2389 case PGMMODE_PAE_NX:
2390 rc2 = PGM_BTH_NAME_PAE_PROT(Enter)(pVM, NIL_RTGCPHYS);
2391 break;
2392 case PGMMODE_AMD64:
2393 case PGMMODE_AMD64_NX:
2394 rc2 = PGM_BTH_NAME_AMD64_PROT(Enter)(pVM, NIL_RTGCPHYS);
2395 break;
2396 default: AssertFailed(); break;
2397 }
2398 break;
2399
2400 case PGMMODE_32_BIT:
2401 GCPhysCR3 = CPUMGetGuestCR3(pVM) & X86_CR3_PAGE_MASK;
2402 rc = PGM_GST_NAME_32BIT(Enter)(pVM, GCPhysCR3);
2403 switch (pVM->pgm.s.enmShadowMode)
2404 {
2405 case PGMMODE_32_BIT:
2406 rc2 = PGM_BTH_NAME_32BIT_32BIT(Enter)(pVM, GCPhysCR3);
2407 break;
2408 case PGMMODE_PAE:
2409 case PGMMODE_PAE_NX:
2410 rc2 = PGM_BTH_NAME_PAE_32BIT(Enter)(pVM, GCPhysCR3);
2411 break;
2412 case PGMMODE_AMD64:
2413 case PGMMODE_AMD64_NX:
2414 AssertMsgFailed(("Should use PAE shadow mode!\n"));
2415 default: AssertFailed(); break;
2416 }
2417 break;
2418
2419 //case PGMMODE_PAE_NX:
2420 case PGMMODE_PAE:
2421 GCPhysCR3 = CPUMGetGuestCR3(pVM) & X86_CR3_PAE_PAGE_MASK;
2422 rc = PGM_GST_NAME_PAE(Enter)(pVM, GCPhysCR3);
2423 switch (pVM->pgm.s.enmShadowMode)
2424 {
2425 case PGMMODE_PAE:
2426 case PGMMODE_PAE_NX:
2427 rc2 = PGM_BTH_NAME_PAE_PAE(Enter)(pVM, GCPhysCR3);
2428 break;
2429 case PGMMODE_32_BIT:
2430 case PGMMODE_AMD64:
2431 case PGMMODE_AMD64_NX:
2432 AssertMsgFailed(("Should use PAE shadow mode!\n"));
2433 default: AssertFailed(); break;
2434 }
2435 break;
2436
2437 //case PGMMODE_AMD64_NX:
2438 case PGMMODE_AMD64:
2439 GCPhysCR3 = CPUMGetGuestCR3(pVM) & 0xfffffffffffff000ULL; /** @todo define this mask and make CR3 64-bit in this case! */
2440 rc = PGM_GST_NAME_AMD64(Enter)(pVM, GCPhysCR3);
2441 switch (pVM->pgm.s.enmShadowMode)
2442 {
2443 case PGMMODE_AMD64:
2444 case PGMMODE_AMD64_NX:
2445 rc2 = PGM_BTH_NAME_AMD64_AMD64(Enter)(pVM, GCPhysCR3);
2446 break;
2447 case PGMMODE_32_BIT:
2448 case PGMMODE_PAE:
2449 case PGMMODE_PAE_NX:
2450 AssertMsgFailed(("Should use AMD64 shadow mode!\n"));
2451 default: AssertFailed(); break;
2452 }
2453 break;
2454
2455 default:
2456 AssertReleaseMsgFailed(("enmGuestMode=%d\n", enmGuestMode));
2457 rc = VERR_NOT_IMPLEMENTED;
2458 break;
2459 }
2460
2461 /* status codes. */
2462 AssertRC(rc);
2463 AssertRC(rc2);
2464 if (VBOX_SUCCESS(rc))
2465 {
2466 rc = rc2;
2467 if (VBOX_SUCCESS(rc)) /* no informational status codes. */
2468 rc = VINF_SUCCESS;
2469 }
2470
2471 /*
2472 * Notify SELM so it can update the TSSes with correct CR3s.
2473 */
2474 SELMR3PagingModeChanged(pVM);
2475
2476 /* Notify HWACCM as well. */
2477 HWACCMR3PagingModeChanged(pVM, pVM->pgm.s.enmShadowMode);
2478 return rc;
2479}
2480
2481
2482/**
2483 * Dumps a PAE shadow page table.
2484 *
2485 * @returns VBox status code (VINF_SUCCESS).
2486 * @param pVM The VM handle.
2487 * @param pPT Pointer to the page table.
2488 * @param u64Address The virtual address of the page table starts.
2489 * @param fLongMode Set if this a long mode table; clear if it's a legacy mode table.
2490 * @param cMaxDepth The maxium depth.
2491 * @param pHlp Pointer to the output functions.
2492 */
2493static int pgmR3DumpHierarchyHCPaePT(PVM pVM, PX86PTPAE pPT, uint64_t u64Address, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
2494{
2495 for (unsigned i = 0; i < ELEMENTS(pPT->a); i++)
2496 {
2497 X86PTEPAE Pte = pPT->a[i];
2498 if (Pte.n.u1Present)
2499 {
2500 pHlp->pfnPrintf(pHlp,
2501 fLongMode /*P R S A D G WT CD AT NX 4M a p ? */
2502 ? "%016llx 3 | P %c %c %c %c %c %s %s %s %s 4K %c%c%c %016llx\n"
2503 : "%08llx 2 | P %c %c %c %c %c %s %s %s %s 4K %c%c%c %016llx\n",
2504 u64Address + ((uint64_t)i << X86_PT_PAE_SHIFT),
2505 Pte.n.u1Write ? 'W' : 'R',
2506 Pte.n.u1User ? 'U' : 'S',
2507 Pte.n.u1Accessed ? 'A' : '-',
2508 Pte.n.u1Dirty ? 'D' : '-',
2509 Pte.n.u1Global ? 'G' : '-',
2510 Pte.n.u1WriteThru ? "WT" : "--",
2511 Pte.n.u1CacheDisable? "CD" : "--",
2512 Pte.n.u1PAT ? "AT" : "--",
2513 Pte.n.u1NoExecute ? "NX" : "--",
2514 Pte.u & PGM_PTFLAGS_TRACK_DIRTY ? 'd' : '-',
2515 Pte.u & BIT(10) ? '1' : '0',
2516 Pte.u & PGM_PTFLAGS_CSAM_VALIDATED? 'v' : '-',
2517 Pte.u & X86_PTE_PAE_PG_MASK);
2518 }
2519 }
2520 return VINF_SUCCESS;
2521}
2522
2523
2524/**
2525 * Dumps a PAE shadow page directory table.
2526 *
2527 * @returns VBox status code (VINF_SUCCESS).
2528 * @param pVM The VM handle.
2529 * @param HCPhys The physical address of the page directory table.
2530 * @param u64Address The virtual address of the page table starts.
2531 * @param cr4 The CR4, PSE is currently used.
2532 * @param fLongMode Set if this a long mode table; clear if it's a legacy mode table.
2533 * @param cMaxDepth The maxium depth.
2534 * @param pHlp Pointer to the output functions.
2535 */
2536static int pgmR3DumpHierarchyHCPaePD(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
2537{
2538 PX86PDPAE pPD = (PX86PDPAE)MMPagePhys2Page(pVM, HCPhys);
2539 if (!pPD)
2540 {
2541 pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory at HCPhys=%#VHp was not found in the page pool!\n",
2542 fLongMode ? 16 : 8, u64Address, HCPhys);
2543 return VERR_INVALID_PARAMETER;
2544 }
2545 int rc = VINF_SUCCESS;
2546 for (unsigned i = 0; i < ELEMENTS(pPD->a); i++)
2547 {
2548 X86PDEPAE Pde = pPD->a[i];
2549 if (Pde.n.u1Present)
2550 {
2551 if ((cr4 & X86_CR4_PSE) && Pde.b.u1Size)
2552 pHlp->pfnPrintf(pHlp,
2553 fLongMode /*P R S A D G WT CD AT NX 4M a p ? */
2554 ? "%016llx 2 | P %c %c %c %c %c %s %s %s %s 4M %c%c%c %016llx\n"
2555 : "%08llx 1 | P %c %c %c %c %c %s %s %s %s 4M %c%c%c %016llx\n",
2556 u64Address + ((uint64_t)i << X86_PD_PAE_SHIFT),
2557 Pde.b.u1Write ? 'W' : 'R',
2558 Pde.b.u1User ? 'U' : 'S',
2559 Pde.b.u1Accessed ? 'A' : '-',
2560 Pde.b.u1Dirty ? 'D' : '-',
2561 Pde.b.u1Global ? 'G' : '-',
2562 Pde.b.u1WriteThru ? "WT" : "--",
2563 Pde.b.u1CacheDisable? "CD" : "--",
2564 Pde.b.u1PAT ? "AT" : "--",
2565 Pde.b.u1NoExecute ? "NX" : "--",
2566 Pde.u & BIT64(9) ? '1' : '0',
2567 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-',
2568 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
2569 Pde.u & X86_PDE_PAE_PG_MASK);
2570 else
2571 {
2572 pHlp->pfnPrintf(pHlp,
2573 fLongMode /*P R S A D G WT CD AT NX 4M a p ? */
2574 ? "%016llx 2 | P %c %c %c %c %c %s %s .. %s 4K %c%c%c %016llx\n"
2575 : "%08llx 1 | P %c %c %c %c %c %s %s .. %s 4K %c%c%c %016llx\n",
2576 u64Address + ((uint64_t)i << X86_PD_PAE_SHIFT),
2577 Pde.n.u1Write ? 'W' : 'R',
2578 Pde.n.u1User ? 'U' : 'S',
2579 Pde.n.u1Accessed ? 'A' : '-',
2580 Pde.n.u1Reserved0 ? '?' : '.', /* ignored */
2581 Pde.n.u1Reserved1 ? '?' : '.', /* ignored */
2582 Pde.n.u1WriteThru ? "WT" : "--",
2583 Pde.n.u1CacheDisable? "CD" : "--",
2584 Pde.n.u1NoExecute ? "NX" : "--",
2585 Pde.u & BIT64(9) ? '1' : '0',
2586 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-',
2587 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
2588 Pde.u & X86_PDE_PAE_PG_MASK);
2589 if (cMaxDepth >= 1)
2590 {
2591 /** @todo what about using the page pool for mapping PTs? */
2592 uint64_t u64AddressPT = u64Address + ((uint64_t)i << X86_PD_PAE_SHIFT);
2593 RTHCPHYS HCPhysPT = Pde.u & X86_PDE_PAE_PG_MASK;
2594 PX86PTPAE pPT = NULL;
2595 if (!(Pde.u & PGM_PDFLAGS_MAPPING))
2596 pPT = (PX86PTPAE)MMPagePhys2Page(pVM, HCPhysPT);
2597 else
2598 {
2599 for (PPGMMAPPING pMap = pVM->pgm.s.pMappingsR3; pMap; pMap = pMap->pNextR3)
2600 {
2601 uint64_t off = u64AddressPT - pMap->GCPtr;
2602 if (off < pMap->cb)
2603 {
2604 const int iPDE = (uint32_t)(off >> X86_PD_SHIFT);
2605 const int iSub = (int)((off >> X86_PD_PAE_SHIFT) & 1); /* MSC is a pain sometimes */
2606 if ((iSub ? pMap->aPTs[iPDE].HCPhysPaePT1 : pMap->aPTs[iPDE].HCPhysPaePT0) != HCPhysPT)
2607 pHlp->pfnPrintf(pHlp, "%0*llx error! Mapping error! PT %d has HCPhysPT=%VHp not %VHp is in the PD.\n",
2608 fLongMode ? 16 : 8, u64AddressPT, iPDE,
2609 iSub ? pMap->aPTs[iPDE].HCPhysPaePT1 : pMap->aPTs[iPDE].HCPhysPaePT0, HCPhysPT);
2610 pPT = &pMap->aPTs[iPDE].paPaePTsR3[iSub];
2611 }
2612 }
2613 }
2614 int rc2 = VERR_INVALID_PARAMETER;
2615 if (pPT)
2616 rc2 = pgmR3DumpHierarchyHCPaePT(pVM, pPT, u64AddressPT, fLongMode, cMaxDepth - 1, pHlp);
2617 else
2618 pHlp->pfnPrintf(pHlp, "%0*llx error! Page table at HCPhys=%#VHp was not found in the page pool!\n",
2619 fLongMode ? 16 : 8, u64AddressPT, HCPhysPT);
2620 if (rc2 < rc && VBOX_SUCCESS(rc))
2621 rc = rc2;
2622 }
2623 }
2624 }
2625 }
2626 return rc;
2627}
2628
2629
2630/**
2631 * Dumps a PAE shadow page directory pointer table.
2632 *
2633 * @returns VBox status code (VINF_SUCCESS).
2634 * @param pVM The VM handle.
2635 * @param HCPhys The physical address of the page directory pointer table.
2636 * @param u64Address The virtual address of the page table starts.
2637 * @param cr4 The CR4, PSE is currently used.
2638 * @param fLongMode Set if this a long mode table; clear if it's a legacy mode table.
2639 * @param cMaxDepth The maxium depth.
2640 * @param pHlp Pointer to the output functions.
2641 */
2642static int pgmR3DumpHierarchyHCPaePDPTR(PVM pVM, RTHCPHYS HCPhys, uint64_t u64Address, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
2643{
2644 PX86PDPTR pPDPTR = (PX86PDPTR)MMPagePhys2Page(pVM, HCPhys);
2645 if (!pPDPTR)
2646 {
2647 pHlp->pfnPrintf(pHlp, "%0*llx error! Page directory pointer table at HCPhys=%#VHp was not found in the page pool!\n",
2648 fLongMode ? 16 : 8, u64Address, HCPhys);
2649 return VERR_INVALID_PARAMETER;
2650 }
2651
2652 int rc = VINF_SUCCESS;
2653 const unsigned c = fLongMode ? ELEMENTS(pPDPTR->a) : 4;
2654 for (unsigned i = 0; i < c; i++)
2655 {
2656 X86PDPE Pdpe = pPDPTR->a[i];
2657 if (Pdpe.n.u1Present)
2658 {
2659 if (fLongMode)
2660 pHlp->pfnPrintf(pHlp, /*P R S A D G WT CD AT NX 4M a p ? */
2661 "%016llx 1 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx\n",
2662 u64Address + ((uint64_t)i << X86_PDPTR_SHIFT),
2663 Pdpe.n.u1Write ? 'W' : 'R',
2664 Pdpe.n.u1User ? 'U' : 'S',
2665 Pdpe.n.u1Accessed ? 'A' : '-',
2666 Pdpe.n.u3Reserved & 1? '?' : '.', /* ignored */
2667 Pdpe.n.u3Reserved & 4? '!' : '.', /* mbz */
2668 Pdpe.n.u1WriteThru ? "WT" : "--",
2669 Pdpe.n.u1CacheDisable? "CD" : "--",
2670 Pdpe.n.u3Reserved & 2? "!" : "..",/* mbz */
2671 Pdpe.n.u1NoExecute ? "NX" : "--",
2672 Pdpe.u & BIT(9) ? '1' : '0',
2673 Pdpe.u & PGM_PLXFLAGS_PERMANENT ? 'p' : '-',
2674 Pdpe.u & BIT(11) ? '1' : '0',
2675 Pdpe.u & X86_PDPE_PG_MASK);
2676 else
2677 pHlp->pfnPrintf(pHlp, /*P R S A D G WT CD AT NX 4M a p ? */
2678 "%08x 0 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx\n",
2679 i << X86_PDPTR_SHIFT,
2680 Pdpe.n.u1Write ? '!' : '.', /* mbz */
2681 Pdpe.n.u1User ? '!' : '.', /* mbz */
2682 Pdpe.n.u1Accessed ? '!' : '.', /* mbz */
2683 Pdpe.n.u3Reserved & 1? '!' : '.', /* mbz */
2684 Pdpe.n.u3Reserved & 4? '!' : '.', /* mbz */
2685 Pdpe.n.u1WriteThru ? "WT" : "--",
2686 Pdpe.n.u1CacheDisable? "CD" : "--",
2687 Pdpe.n.u3Reserved & 2? "!" : "..",/* mbz */
2688 Pdpe.n.u1NoExecute ? "NX" : "--",
2689 Pdpe.u & BIT(9) ? '1' : '0',
2690 Pdpe.u & PGM_PLXFLAGS_PERMANENT ? 'p' : '-',
2691 Pdpe.u & BIT(11) ? '1' : '0',
2692 Pdpe.u & X86_PDPE_PG_MASK);
2693 if (cMaxDepth >= 1)
2694 {
2695 int rc2 = pgmR3DumpHierarchyHCPaePD(pVM, Pdpe.u & X86_PDPE_PG_MASK, u64Address + ((uint64_t)i << X86_PDPTR_SHIFT),
2696 cr4, fLongMode, cMaxDepth - 1, pHlp);
2697 if (rc2 < rc && VBOX_SUCCESS(rc))
2698 rc = rc2;
2699 }
2700 }
2701 }
2702 return rc;
2703}
2704
2705
2706/**
2707 * Dumps a 32-bit shadow page table.
2708 *
2709 * @returns VBox status code (VINF_SUCCESS).
2710 * @param pVM The VM handle.
2711 * @param HCPhys The physical address of the table.
2712 * @param cr4 The CR4, PSE is currently used.
2713 * @param cMaxDepth The maxium depth.
2714 * @param pHlp Pointer to the output functions.
2715 */
2716static int pgmR3DumpHierarchyHcPaePML4(PVM pVM, RTHCPHYS HCPhys, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
2717{
2718 PX86PML4 pPML4 = (PX86PML4)MMPagePhys2Page(pVM, HCPhys);
2719 if (!pPML4)
2720 {
2721 pHlp->pfnPrintf(pHlp, "Page map level 4 at HCPhys=%#VHp was not found in the page pool!\n", HCPhys);
2722 return VERR_INVALID_PARAMETER;
2723 }
2724
2725 int rc = VINF_SUCCESS;
2726 for (unsigned i = 0; i < ELEMENTS(pPML4->a); i++)
2727 {
2728 X86PML4E Pml4e = pPML4->a[i];
2729 if (Pml4e.n.u1Present)
2730 {
2731 uint64_t u64Address = ((uint64_t)i << X86_PML4_SHIFT) | (((uint64_t)i >> (X86_PML4_SHIFT - X86_PDPTR_SHIFT - 1)) * 0xffff000000000000ULL);
2732 pHlp->pfnPrintf(pHlp, /*P R S A D G WT CD AT NX 4M a p ? */
2733 "%016llx 0 | P %c %c %c %c %c %s %s %s %s .. %c%c%c %016llx\n",
2734 u64Address,
2735 Pml4e.n.u1Write ? 'W' : 'R',
2736 Pml4e.n.u1User ? 'U' : 'S',
2737 Pml4e.n.u1Accessed ? 'A' : '-',
2738 Pml4e.n.u3Reserved & 1? '?' : '.', /* ignored */
2739 Pml4e.n.u3Reserved & 4? '!' : '.', /* mbz */
2740 Pml4e.n.u1WriteThru ? "WT" : "--",
2741 Pml4e.n.u1CacheDisable? "CD" : "--",
2742 Pml4e.n.u3Reserved & 2? "!" : "..",/* mbz */
2743 Pml4e.n.u1NoExecute ? "NX" : "--",
2744 Pml4e.u & BIT(9) ? '1' : '0',
2745 Pml4e.u & PGM_PLXFLAGS_PERMANENT ? 'p' : '-',
2746 Pml4e.u & BIT(11) ? '1' : '0',
2747 Pml4e.u & X86_PML4E_PG_MASK);
2748
2749 if (cMaxDepth >= 1)
2750 {
2751 int rc2 = pgmR3DumpHierarchyHCPaePDPTR(pVM, Pml4e.u & X86_PML4E_PG_MASK, u64Address, cr4, true, cMaxDepth - 1, pHlp);
2752 if (rc2 < rc && VBOX_SUCCESS(rc))
2753 rc = rc2;
2754 }
2755 }
2756 }
2757 return rc;
2758}
2759
2760
2761/**
2762 * Dumps a 32-bit shadow page table.
2763 *
2764 * @returns VBox status code (VINF_SUCCESS).
2765 * @param pVM The VM handle.
2766 * @param pPT Pointer to the page table.
2767 * @param u32Address The virtual address this table starts at.
2768 * @param pHlp Pointer to the output functions.
2769 */
2770int pgmR3DumpHierarchyHC32BitPT(PVM pVM, PX86PT pPT, uint32_t u32Address, PCDBGFINFOHLP pHlp)
2771{
2772 for (unsigned i = 0; i < ELEMENTS(pPT->a); i++)
2773 {
2774 X86PTE Pte = pPT->a[i];
2775 if (Pte.n.u1Present)
2776 {
2777 pHlp->pfnPrintf(pHlp, /*P R S A D G WT CD AT NX 4M a m d */
2778 "%08x 1 | P %c %c %c %c %c %s %s %s .. 4K %c%c%c %08x\n",
2779 u32Address + (i << X86_PT_SHIFT),
2780 Pte.n.u1Write ? 'W' : 'R',
2781 Pte.n.u1User ? 'U' : 'S',
2782 Pte.n.u1Accessed ? 'A' : '-',
2783 Pte.n.u1Dirty ? 'D' : '-',
2784 Pte.n.u1Global ? 'G' : '-',
2785 Pte.n.u1WriteThru ? "WT" : "--",
2786 Pte.n.u1CacheDisable? "CD" : "--",
2787 Pte.n.u1PAT ? "AT" : "--",
2788 Pte.u & PGM_PTFLAGS_TRACK_DIRTY ? 'd' : '-',
2789 Pte.u & BIT(10) ? '1' : '0',
2790 Pte.u & PGM_PTFLAGS_CSAM_VALIDATED ? 'v' : '-',
2791 Pte.u & X86_PDE_PG_MASK);
2792 }
2793 }
2794 return VINF_SUCCESS;
2795}
2796
2797
2798/**
2799 * Dumps a 32-bit shadow page directory and page tables.
2800 *
2801 * @returns VBox status code (VINF_SUCCESS).
2802 * @param pVM The VM handle.
2803 * @param cr3 The root of the hierarchy.
2804 * @param cr4 The CR4, PSE is currently used.
2805 * @param cMaxDepth How deep into the hierarchy the dumper should go.
2806 * @param pHlp Pointer to the output functions.
2807 */
2808int pgmR3DumpHierarchyHC32BitPD(PVM pVM, uint32_t cr3, uint32_t cr4, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
2809{
2810 PX86PD pPD = (PX86PD)MMPagePhys2Page(pVM, cr3 & X86_CR3_PAGE_MASK);
2811 if (!pPD)
2812 {
2813 pHlp->pfnPrintf(pHlp, "Page directory at %#x was not found in the page pool!\n", cr3 & X86_CR3_PAGE_MASK);
2814 return VERR_INVALID_PARAMETER;
2815 }
2816
2817 int rc = VINF_SUCCESS;
2818 for (unsigned i = 0; i < ELEMENTS(pPD->a); i++)
2819 {
2820 X86PDE Pde = pPD->a[i];
2821 if (Pde.n.u1Present)
2822 {
2823 const uint32_t u32Address = i << X86_PD_SHIFT;
2824 if ((cr4 & X86_CR4_PSE) && Pde.b.u1Size)
2825 pHlp->pfnPrintf(pHlp, /*P R S A D G WT CD AT NX 4M a m d */
2826 "%08x 0 | P %c %c %c %c %c %s %s %s .. 4M %c%c%c %08x\n",
2827 u32Address,
2828 Pde.b.u1Write ? 'W' : 'R',
2829 Pde.b.u1User ? 'U' : 'S',
2830 Pde.b.u1Accessed ? 'A' : '-',
2831 Pde.b.u1Dirty ? 'D' : '-',
2832 Pde.b.u1Global ? 'G' : '-',
2833 Pde.b.u1WriteThru ? "WT" : "--",
2834 Pde.b.u1CacheDisable? "CD" : "--",
2835 Pde.b.u1PAT ? "AT" : "--",
2836 Pde.u & BIT64(9) ? '1' : '0',
2837 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-',
2838 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
2839 Pde.u & X86_PDE4M_PG_MASK);
2840 else
2841 {
2842 pHlp->pfnPrintf(pHlp, /*P R S A D G WT CD AT NX 4M a m d */
2843 "%08x 0 | P %c %c %c %c %c %s %s .. .. 4K %c%c%c %08x\n",
2844 u32Address,
2845 Pde.n.u1Write ? 'W' : 'R',
2846 Pde.n.u1User ? 'U' : 'S',
2847 Pde.n.u1Accessed ? 'A' : '-',
2848 Pde.n.u1Reserved0 ? '?' : '.', /* ignored */
2849 Pde.n.u1Reserved1 ? '?' : '.', /* ignored */
2850 Pde.n.u1WriteThru ? "WT" : "--",
2851 Pde.n.u1CacheDisable? "CD" : "--",
2852 Pde.u & BIT64(9) ? '1' : '0',
2853 Pde.u & PGM_PDFLAGS_MAPPING ? 'm' : '-',
2854 Pde.u & PGM_PDFLAGS_TRACK_DIRTY ? 'd' : '-',
2855 Pde.u & X86_PDE_PG_MASK);
2856 if (cMaxDepth >= 1)
2857 {
2858 /** @todo what about using the page pool for mapping PTs? */
2859 RTHCPHYS HCPhys = Pde.u & X86_PDE_PG_MASK;
2860 PX86PT pPT = NULL;
2861 if (!(Pde.u & PGM_PDFLAGS_MAPPING))
2862 pPT = (PX86PT)MMPagePhys2Page(pVM, HCPhys);
2863 else
2864 {
2865 for (PPGMMAPPING pMap = pVM->pgm.s.pMappingsR3; pMap; pMap = pMap->pNextR3)
2866 if (u32Address - pMap->GCPtr < pMap->cb)
2867 {
2868 int iPDE = (u32Address - pMap->GCPtr) >> X86_PD_SHIFT;
2869 if (pMap->aPTs[iPDE].HCPhysPT != HCPhys)
2870 pHlp->pfnPrintf(pHlp, "%08x error! Mapping error! PT %d has HCPhysPT=%VHp not %VHp is in the PD.\n",
2871 u32Address, iPDE, pMap->aPTs[iPDE].HCPhysPT, HCPhys);
2872 pPT = pMap->aPTs[iPDE].pPTR3;
2873 }
2874 }
2875 int rc2 = VERR_INVALID_PARAMETER;
2876 if (pPT)
2877 rc2 = pgmR3DumpHierarchyHC32BitPT(pVM, pPT, u32Address, pHlp);
2878 else
2879 pHlp->pfnPrintf(pHlp, "%08x error! Page table at %#x was not found in the page pool!\n", u32Address, HCPhys);
2880 if (rc2 < rc && VBOX_SUCCESS(rc))
2881 rc = rc2;
2882 }
2883 }
2884 }
2885 }
2886
2887 return rc;
2888}
2889
2890
2891/**
2892 * Dumps a 32-bit shadow page table.
2893 *
2894 * @returns VBox status code (VINF_SUCCESS).
2895 * @param pVM The VM handle.
2896 * @param pPT Pointer to the page table.
2897 * @param u32Address The virtual address this table starts at.
2898 * @param PhysSearch Address to search for.
2899 */
2900int pgmR3DumpHierarchyGC32BitPT(PVM pVM, PX86PT pPT, uint32_t u32Address, RTGCPHYS PhysSearch)
2901{
2902 for (unsigned i = 0; i < ELEMENTS(pPT->a); i++)
2903 {
2904 X86PTE Pte = pPT->a[i];
2905 if (Pte.n.u1Present)
2906 {
2907 Log(( /*P R S A D G WT CD AT NX 4M a m d */
2908 "%08x 1 | P %c %c %c %c %c %s %s %s .. 4K %c%c%c %08x\n",
2909 u32Address + (i << X86_PT_SHIFT),
2910 Pte.n.u1Write ? 'W' : 'R',
2911 Pte.n.u1User ? 'U' : 'S',
2912 Pte.n.u1Accessed ? 'A' : '-',
2913 Pte.n.u1Dirty ? 'D' : '-',
2914 Pte.n.u1Global ? 'G' : '-',
2915 Pte.n.u1WriteThru ? "WT" : "--",
2916 Pte.n.u1CacheDisable? "CD" : "--",
2917 Pte.n.u1PAT ? "AT" : "--",
2918 Pte.u & PGM_PTFLAGS_TRACK_DIRTY ? 'd' : '-',
2919 Pte.u & BIT(10) ? '1' : '0',
2920 Pte.u & PGM_PTFLAGS_CSAM_VALIDATED ? 'v' : '-',
2921 Pte.u & X86_PDE_PG_MASK));
2922
2923 if ((Pte.u & X86_PDE_PG_MASK) == PhysSearch)
2924 {
2925 uint64_t fPageShw = 0;
2926 RTHCPHYS pPhysHC = 0;
2927
2928 PGMShwGetPage(pVM, (RTGCPTR)(u32Address + (i << X86_PT_SHIFT)), &fPageShw, &pPhysHC);
2929 Log(("Found %VGp at %VGv -> flags=%llx\n", PhysSearch, (RTGCPTR)(u32Address + (i << X86_PT_SHIFT)), fPageShw));
2930 }
2931 }
2932 }
2933 return VINF_SUCCESS;
2934}
2935
2936
2937/**
2938 * Dumps a 32-bit guest page directory and page tables.
2939 *
2940 * @returns VBox status code (VINF_SUCCESS).
2941 * @param pVM The VM handle.
2942 * @param cr3 The root of the hierarchy.
2943 * @param cr4 The CR4, PSE is currently used.
2944 * @param PhysSearch Address to search for.
2945 */
2946PGMR3DECL(int) PGMR3DumpHierarchyGC(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCPHYS PhysSearch)
2947{
2948 bool fLongMode = false;
2949 const unsigned cch = fLongMode ? 16 : 8; NOREF(cch);
2950 PX86PD pPD = 0;
2951
2952 int rc = PGM_GCPHYS_2_PTR(pVM, cr3 & X86_CR3_PAGE_MASK, &pPD);
2953 if (VBOX_FAILURE(rc) || !pPD)
2954 {
2955 Log(("Page directory at %#x was not found in the page pool!\n", cr3 & X86_CR3_PAGE_MASK));
2956 return VERR_INVALID_PARAMETER;
2957 }
2958
2959 Log(("cr3=%08x cr4=%08x%s\n"
2960 "%-*s P - Present\n"
2961 "%-*s | R/W - Read (0) / Write (1)\n"
2962 "%-*s | | U/S - User (1) / Supervisor (0)\n"
2963 "%-*s | | | A - Accessed\n"
2964 "%-*s | | | | D - Dirty\n"
2965 "%-*s | | | | | G - Global\n"
2966 "%-*s | | | | | | WT - Write thru\n"
2967 "%-*s | | | | | | | CD - Cache disable\n"
2968 "%-*s | | | | | | | | AT - Attribute table (PAT)\n"
2969 "%-*s | | | | | | | | | NX - No execute (K8)\n"
2970 "%-*s | | | | | | | | | | 4K/4M/2M - Page size.\n"
2971 "%-*s | | | | | | | | | | | AVL - a=allocated; m=mapping; d=track dirty;\n"
2972 "%-*s | | | | | | | | | | | | p=permanent; v=validated;\n"
2973 "%-*s Level | | | | | | | | | | | | Page\n"
2974 /* xxxx n **** P R S A D G WT CD AT NX 4M AVL xxxxxxxxxxxxx
2975 - W U - - - -- -- -- -- -- 010 */
2976 , cr3, cr4, fLongMode ? " Long Mode" : "",
2977 cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "",
2978 cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "Address"));
2979
2980 for (unsigned i = 0; i < ELEMENTS(pPD->a); i++)
2981 {
2982 X86PDE Pde = pPD->a[i];
2983 if (Pde.n.u1Present)
2984 {
2985 const uint32_t u32Address = i << X86_PD_SHIFT;
2986
2987 if ((cr4 & X86_CR4_PSE) && Pde.b.u1Size)
2988 Log(( /*P R S A D G WT CD AT NX 4M a m d */
2989 "%08x 0 | P %c %c %c %c %c %s %s %s .. 4M %c%c%c %08x\n",
2990 u32Address,
2991 Pde.b.u1Write ? 'W' : 'R',
2992 Pde.b.u1User ? 'U' : 'S',
2993 Pde.b.u1Accessed ? 'A' : '-',
2994 Pde.b.u1Dirty ? 'D' : '-',
2995 Pde.b.u1Global ? 'G' : '-',
2996 Pde.b.u1WriteThru ? "WT" : "--",
2997 Pde.b.u1CacheDisable? "CD" : "--",
2998 Pde.b.u1PAT ? "AT" : "--",
2999 Pde.u & BIT(9) ? '1' : '0',
3000 Pde.u & BIT(10) ? '1' : '0',
3001 Pde.u & BIT(11) ? '1' : '0',
3002 Pde.u & X86_PDE4M_PG_MASK));
3003 /** @todo PhysSearch */
3004 else
3005 {
3006 Log(( /*P R S A D G WT CD AT NX 4M a m d */
3007 "%08x 0 | P %c %c %c %c %c %s %s .. .. 4K %c%c%c %08x\n",
3008 u32Address,
3009 Pde.n.u1Write ? 'W' : 'R',
3010 Pde.n.u1User ? 'U' : 'S',
3011 Pde.n.u1Accessed ? 'A' : '-',
3012 Pde.n.u1Reserved0 ? '?' : '.', /* ignored */
3013 Pde.n.u1Reserved1 ? '?' : '.', /* ignored */
3014 Pde.n.u1WriteThru ? "WT" : "--",
3015 Pde.n.u1CacheDisable? "CD" : "--",
3016 Pde.u & BIT(9) ? '1' : '0',
3017 Pde.u & BIT(10) ? '1' : '0',
3018 Pde.u & BIT(11) ? '1' : '0',
3019 Pde.u & X86_PDE_PG_MASK));
3020 ////if (cMaxDepth >= 1)
3021 {
3022 /** @todo what about using the page pool for mapping PTs? */
3023 RTGCPHYS GCPhys = Pde.u & X86_PDE_PG_MASK;
3024 PX86PT pPT = NULL;
3025
3026 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pPT);
3027
3028 int rc2 = VERR_INVALID_PARAMETER;
3029 if (pPT)
3030 rc2 = pgmR3DumpHierarchyGC32BitPT(pVM, pPT, u32Address, PhysSearch);
3031 else
3032 Log(("%08x error! Page table at %#x was not found in the page pool!\n", u32Address, GCPhys));
3033 if (rc2 < rc && VBOX_SUCCESS(rc))
3034 rc = rc2;
3035 }
3036 }
3037 }
3038 }
3039
3040 return rc;
3041}
3042
3043
3044/**
3045 * Dumps a page table hierarchy use only physical addresses and cr4/lm flags.
3046 *
3047 * @returns VBox status code (VINF_SUCCESS).
3048 * @param pVM The VM handle.
3049 * @param cr3 The root of the hierarchy.
3050 * @param cr4 The cr4, only PAE and PSE is currently used.
3051 * @param fLongMode Set if long mode, false if not long mode.
3052 * @param cMaxDepth Number of levels to dump.
3053 * @param pHlp Pointer to the output functions.
3054 */
3055PGMR3DECL(int) PGMR3DumpHierarchyHC(PVM pVM, uint32_t cr3, uint32_t cr4, bool fLongMode, unsigned cMaxDepth, PCDBGFINFOHLP pHlp)
3056{
3057 if (!pHlp)
3058 pHlp = DBGFR3InfoLogHlp();
3059 if (!cMaxDepth)
3060 return VINF_SUCCESS;
3061 const unsigned cch = fLongMode ? 16 : 8;
3062 pHlp->pfnPrintf(pHlp,
3063 "cr3=%08x cr4=%08x%s\n"
3064 "%-*s P - Present\n"
3065 "%-*s | R/W - Read (0) / Write (1)\n"
3066 "%-*s | | U/S - User (1) / Supervisor (0)\n"
3067 "%-*s | | | A - Accessed\n"
3068 "%-*s | | | | D - Dirty\n"
3069 "%-*s | | | | | G - Global\n"
3070 "%-*s | | | | | | WT - Write thru\n"
3071 "%-*s | | | | | | | CD - Cache disable\n"
3072 "%-*s | | | | | | | | AT - Attribute table (PAT)\n"
3073 "%-*s | | | | | | | | | NX - No execute (K8)\n"
3074 "%-*s | | | | | | | | | | 4K/4M/2M - Page size.\n"
3075 "%-*s | | | | | | | | | | | AVL - a=allocated; m=mapping; d=track dirty;\n"
3076 "%-*s | | | | | | | | | | | | p=permanent; v=validated;\n"
3077 "%-*s Level | | | | | | | | | | | | Page\n"
3078 /* xxxx n **** P R S A D G WT CD AT NX 4M AVL xxxxxxxxxxxxx
3079 - W U - - - -- -- -- -- -- 010 */
3080 , cr3, cr4, fLongMode ? " Long Mode" : "",
3081 cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "",
3082 cch, "", cch, "", cch, "", cch, "", cch, "", cch, "", cch, "Address");
3083 if (cr4 & X86_CR4_PAE)
3084 {
3085 if (fLongMode)
3086 return pgmR3DumpHierarchyHcPaePML4(pVM, cr3 & X86_CR3_PAGE_MASK, cr4, cMaxDepth, pHlp);
3087 return pgmR3DumpHierarchyHCPaePDPTR(pVM, cr3 & X86_CR3_PAE_PAGE_MASK, 0, cr4, false, cMaxDepth, pHlp);
3088 }
3089 return pgmR3DumpHierarchyHC32BitPD(pVM, cr3 & X86_CR3_PAGE_MASK, cr4, cMaxDepth, pHlp);
3090}
3091
3092
3093
3094#ifdef VBOX_WITH_DEBUGGER
3095/**
3096 * The '.pgmram' command.
3097 *
3098 * @returns VBox status.
3099 * @param pCmd Pointer to the command descriptor (as registered).
3100 * @param pCmdHlp Pointer to command helper functions.
3101 * @param pVM Pointer to the current VM (if any).
3102 * @param paArgs Pointer to (readonly) array of arguments.
3103 * @param cArgs Number of arguments in the array.
3104 */
3105static DECLCALLBACK(int) pgmR3CmdRam(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3106{
3107 /*
3108 * Validate input.
3109 */
3110 if (!pVM)
3111 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
3112 if (!pVM->pgm.s.pRamRangesGC)
3113 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Sorry, no Ram is registered.\n");
3114
3115 /*
3116 * Dump the ranges.
3117 */
3118 int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "From - To (incl) pvHC\n");
3119 PPGMRAMRANGE pRam;
3120 for (pRam = pVM->pgm.s.pRamRangesHC; pRam; pRam = pRam->pNextHC)
3121 {
3122 rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL,
3123 "%VGp - %VGp %p\n",
3124 pRam->GCPhys, pRam->GCPhysLast, pRam->pvHC);
3125 if (VBOX_FAILURE(rc))
3126 return rc;
3127 }
3128
3129 return VINF_SUCCESS;
3130}
3131
3132
3133/**
3134 * The '.pgmmap' command.
3135 *
3136 * @returns VBox status.
3137 * @param pCmd Pointer to the command descriptor (as registered).
3138 * @param pCmdHlp Pointer to command helper functions.
3139 * @param pVM Pointer to the current VM (if any).
3140 * @param paArgs Pointer to (readonly) array of arguments.
3141 * @param cArgs Number of arguments in the array.
3142 */
3143static DECLCALLBACK(int) pgmR3CmdMap(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3144{
3145 /*
3146 * Validate input.
3147 */
3148 if (!pVM)
3149 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
3150 if (!pVM->pgm.s.pMappingsR3)
3151 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Sorry, no mappings are registered.\n");
3152
3153 /*
3154 * Print message about the fixedness of the mappings.
3155 */
3156 int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, pVM->pgm.s.fMappingsFixed ? "The mappings are FIXED.\n" : "The mappings are FLOATING.\n");
3157 if (VBOX_FAILURE(rc))
3158 return rc;
3159
3160 /*
3161 * Dump the ranges.
3162 */
3163 PPGMMAPPING pCur;
3164 for (pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
3165 {
3166 rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL,
3167 "%08x - %08x %s\n",
3168 pCur->GCPtr, pCur->GCPtrLast, pCur->pszDesc);
3169 if (VBOX_FAILURE(rc))
3170 return rc;
3171 }
3172
3173 return VINF_SUCCESS;
3174}
3175
3176
3177/**
3178 * The '.pgmsync' command.
3179 *
3180 * @returns VBox status.
3181 * @param pCmd Pointer to the command descriptor (as registered).
3182 * @param pCmdHlp Pointer to command helper functions.
3183 * @param pVM Pointer to the current VM (if any).
3184 * @param paArgs Pointer to (readonly) array of arguments.
3185 * @param cArgs Number of arguments in the array.
3186 */
3187static DECLCALLBACK(int) pgmR3CmdSync(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3188{
3189 /*
3190 * Validate input.
3191 */
3192 if (!pVM)
3193 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
3194
3195 /*
3196 * Force page directory sync.
3197 */
3198 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
3199
3200 int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Forcing page directory sync.\n");
3201 if (VBOX_FAILURE(rc))
3202 return rc;
3203
3204 return VINF_SUCCESS;
3205}
3206
3207
3208/**
3209 * The '.pgmsyncalways' command.
3210 *
3211 * @returns VBox status.
3212 * @param pCmd Pointer to the command descriptor (as registered).
3213 * @param pCmdHlp Pointer to command helper functions.
3214 * @param pVM Pointer to the current VM (if any).
3215 * @param paArgs Pointer to (readonly) array of arguments.
3216 * @param cArgs Number of arguments in the array.
3217 */
3218static DECLCALLBACK(int) pgmR3CmdSyncAlways(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3219{
3220 /*
3221 * Validate input.
3222 */
3223 if (!pVM)
3224 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
3225
3226 /*
3227 * Force page directory sync.
3228 */
3229 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS)
3230 {
3231 ASMAtomicAndU32(&pVM->pgm.s.fSyncFlags, ~PGM_SYNC_ALWAYS);
3232 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Disabled permanent forced page directory syncing.\n");
3233 }
3234 else
3235 {
3236 ASMAtomicOrU32(&pVM->pgm.s.fSyncFlags, PGM_SYNC_ALWAYS);
3237 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
3238 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Enabled permanent forced page directory syncing.\n");
3239 }
3240}
3241
3242#endif
3243
3244/**
3245 * pvUser argument of the pgmR3CheckIntegrity*Node callbacks.
3246 */
3247typedef struct PGMCHECKINTARGS
3248{
3249 bool fLeftToRight; /**< true: left-to-right; false: right-to-left. */
3250 PPGMPHYSHANDLER pPrevPhys;
3251 PPGMVIRTHANDLER pPrevVirt;
3252 PPGMPHYS2VIRTHANDLER pPrevPhys2Virt;
3253 PVM pVM;
3254} PGMCHECKINTARGS, *PPGMCHECKINTARGS;
3255
3256/**
3257 * Validate a node in the physical handler tree.
3258 *
3259 * @returns 0 on if ok, other wise 1.
3260 * @param pNode The handler node.
3261 * @param pvUser pVM.
3262 */
3263static DECLCALLBACK(int) pgmR3CheckIntegrityPhysHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
3264{
3265 PPGMCHECKINTARGS pArgs = (PPGMCHECKINTARGS)pvUser;
3266 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)pNode;
3267 AssertReleaseReturn(!((uintptr_t)pCur & 7), 1);
3268 AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %VGp-%VGp %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
3269 AssertReleaseMsg( !pArgs->pPrevPhys
3270 || (pArgs->fLeftToRight ? pArgs->pPrevPhys->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys->Core.KeyLast > pCur->Core.Key),
3271 ("pPrevPhys=%p %VGp-%VGp %s\n"
3272 " pCur=%p %VGp-%VGp %s\n",
3273 pArgs->pPrevPhys, pArgs->pPrevPhys->Core.Key, pArgs->pPrevPhys->Core.KeyLast, pArgs->pPrevPhys->pszDesc,
3274 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
3275 pArgs->pPrevPhys = pCur;
3276 return 0;
3277}
3278
3279
3280/**
3281 * Validate a node in the virtual handler tree.
3282 *
3283 * @returns 0 on if ok, other wise 1.
3284 * @param pNode The handler node.
3285 * @param pvUser pVM.
3286 */
3287static DECLCALLBACK(int) pgmR3CheckIntegrityVirtHandlerNode(PAVLROGCPTRNODECORE pNode, void *pvUser)
3288{
3289 PPGMCHECKINTARGS pArgs = (PPGMCHECKINTARGS)pvUser;
3290 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
3291 AssertReleaseReturn(!((uintptr_t)pCur & 7), 1);
3292 AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %VGv-%VGv %s\n", pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
3293 AssertReleaseMsg( !pArgs->pPrevVirt
3294 || (pArgs->fLeftToRight ? pArgs->pPrevVirt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevVirt->Core.KeyLast > pCur->Core.Key),
3295 ("pPrevVirt=%p %VGv-%VGv %s\n"
3296 " pCur=%p %VGv-%VGv %s\n",
3297 pArgs->pPrevVirt, pArgs->pPrevVirt->Core.Key, pArgs->pPrevVirt->Core.KeyLast, pArgs->pPrevVirt->pszDesc,
3298 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc));
3299 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
3300 {
3301 AssertReleaseMsg(pCur->aPhysToVirt[iPage].offVirtHandler == -RT_OFFSETOF(PGMVIRTHANDLER, aPhysToVirt[iPage]),
3302 ("pCur=%p %VGv-%VGv %s\n"
3303 "iPage=%d offVirtHandle=%#x expected %#x\n",
3304 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->pszDesc,
3305 iPage, pCur->aPhysToVirt[iPage].offVirtHandler, -RT_OFFSETOF(PGMVIRTHANDLER, aPhysToVirt[iPage])));
3306 }
3307 pArgs->pPrevVirt = pCur;
3308 return 0;
3309}
3310
3311
3312/**
3313 * Validate a node in the virtual handler tree.
3314 *
3315 * @returns 0 on if ok, other wise 1.
3316 * @param pNode The handler node.
3317 * @param pvUser pVM.
3318 */
3319static DECLCALLBACK(int) pgmR3CheckIntegrityPhysToVirtHandlerNode(PAVLROGCPHYSNODECORE pNode, void *pvUser)
3320{
3321 PPGMCHECKINTARGS pArgs = (PPGMCHECKINTARGS)pvUser;
3322 PPGMPHYS2VIRTHANDLER pCur = (PPGMPHYS2VIRTHANDLER)pNode;
3323 AssertReleaseMsgReturn(!((uintptr_t)pCur & 3), ("\n"), 1);
3324 AssertReleaseMsgReturn(!(pCur->offVirtHandler & 3), ("\n"), 1);
3325 AssertReleaseMsg(pCur->Core.Key <= pCur->Core.KeyLast,("pCur=%p %VGp-%VGp\n", pCur, pCur->Core.Key, pCur->Core.KeyLast));
3326 AssertReleaseMsg( !pArgs->pPrevPhys2Virt
3327 || (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
3328 ("pPrevPhys2Virt=%p %VGp-%VGp\n"
3329 " pCur=%p %VGp-%VGp\n",
3330 pArgs->pPrevPhys2Virt, pArgs->pPrevPhys2Virt->Core.Key, pArgs->pPrevPhys2Virt->Core.KeyLast,
3331 pCur, pCur->Core.Key, pCur->Core.KeyLast));
3332 AssertReleaseMsg( !pArgs->pPrevPhys2Virt
3333 || (pArgs->fLeftToRight ? pArgs->pPrevPhys2Virt->Core.KeyLast < pCur->Core.Key : pArgs->pPrevPhys2Virt->Core.KeyLast > pCur->Core.Key),
3334 ("pPrevPhys2Virt=%p %VGp-%VGp\n"
3335 " pCur=%p %VGp-%VGp\n",
3336 pArgs->pPrevPhys2Virt, pArgs->pPrevPhys2Virt->Core.Key, pArgs->pPrevPhys2Virt->Core.KeyLast,
3337 pCur, pCur->Core.Key, pCur->Core.KeyLast));
3338 AssertReleaseMsg((pCur->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD),
3339 ("pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3340 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias));
3341 if (pCur->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
3342 {
3343 PPGMPHYS2VIRTHANDLER pCur2 = pCur;
3344 for (;;)
3345 {
3346 pCur2 = (PPGMPHYS2VIRTHANDLER)((intptr_t)pCur + (pCur->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
3347 AssertReleaseMsg(pCur2 != pCur,
3348 (" pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3349 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias));
3350 AssertReleaseMsg((pCur2->offNextAlias & (PGMPHYS2VIRTHANDLER_IN_TREE | PGMPHYS2VIRTHANDLER_IS_HEAD)) == PGMPHYS2VIRTHANDLER_IN_TREE,
3351 (" pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
3352 "pCur2=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3353 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias,
3354 pCur2, pCur2->Core.Key, pCur2->Core.KeyLast, pCur2->offVirtHandler, pCur2->offNextAlias));
3355 AssertReleaseMsg((pCur2->Core.Key ^ pCur->Core.Key) < PAGE_SIZE,
3356 (" pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
3357 "pCur2=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3358 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias,
3359 pCur2, pCur2->Core.Key, pCur2->Core.KeyLast, pCur2->offVirtHandler, pCur2->offNextAlias));
3360 AssertReleaseMsg((pCur2->Core.KeyLast ^ pCur->Core.KeyLast) < PAGE_SIZE,
3361 (" pCur=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
3362 "pCur2=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3363 pCur, pCur->Core.Key, pCur->Core.KeyLast, pCur->offVirtHandler, pCur->offNextAlias,
3364 pCur2, pCur2->Core.Key, pCur2->Core.KeyLast, pCur2->offVirtHandler, pCur2->offNextAlias));
3365 if (!(pCur2->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
3366 break;
3367 }
3368 }
3369
3370 pArgs->pPrevPhys2Virt = pCur;
3371 return 0;
3372}
3373
3374
3375/**
3376 * Perform an integrity check on the PGM component.
3377 *
3378 * @returns VINF_SUCCESS if everything is fine.
3379 * @returns VBox error status after asserting on integrity breach.
3380 * @param pVM The VM handle.
3381 */
3382PDMR3DECL(int) PGMR3CheckIntegrity(PVM pVM)
3383{
3384 AssertReleaseReturn(pVM->pgm.s.offVM, VERR_INTERNAL_ERROR);
3385
3386 /*
3387 * Check the trees.
3388 */
3389 int cErrors = 0;
3390 PGMCHECKINTARGS Args = { true, NULL, NULL, NULL, pVM };
3391 cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysHandlers, true, pgmR3CheckIntegrityPhysHandlerNode, &Args);
3392 Args.fLeftToRight = false;
3393 cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysHandlers, false, pgmR3CheckIntegrityPhysHandlerNode, &Args);
3394 Args.fLeftToRight = true;
3395 cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesHC->VirtHandlers, true, pgmR3CheckIntegrityVirtHandlerNode, &Args);
3396 Args.fLeftToRight = false;
3397 cErrors += RTAvlroGCPtrDoWithAll( &pVM->pgm.s.pTreesHC->VirtHandlers, false, pgmR3CheckIntegrityVirtHandlerNode, &Args);
3398 Args.fLeftToRight = true;
3399 cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysToVirtHandlers, true, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);
3400 Args.fLeftToRight = false;
3401 cErrors += RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysToVirtHandlers, false, pgmR3CheckIntegrityPhysToVirtHandlerNode, &Args);
3402
3403 return !cErrors ? VINF_SUCCESS : VERR_INTERNAL_ERROR;
3404}
3405
3406
3407/**
3408 * Inform PGM if we want all mappings to be put into the shadow page table. (necessary for e.g. VMX)
3409 *
3410 * @returns VBox status code.
3411 * @param pVM VM handle.
3412 * @param fEnable Enable or disable shadow mappings
3413 */
3414PGMR3DECL(int) PGMR3ChangeShwPDMappings(PVM pVM, bool fEnable)
3415{
3416 pVM->pgm.s.fDisableMappings = !fEnable;
3417
3418 size_t cb;
3419 int rc = PGMR3MappingsSize(pVM, &cb);
3420 AssertRCReturn(rc, rc);
3421
3422 /* Pretend the mappings are now fixed; to force a refresh of the reserved PDEs. */
3423 rc = PGMR3MappingsFix(pVM, MM_HYPER_AREA_ADDRESS, cb);
3424 AssertRCReturn(rc, rc);
3425
3426 return VINF_SUCCESS;
3427}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette