VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 93573

Last change on this file since 93573 was 93573, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 The guest SLAT mode should be EPT even when nested-guest paging isn't enabled (since we need to still perform SLAT for real-mode).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 141.8 KB
Line 
1/* $Id: PGMAll.cpp 93573 2022-02-03 11:22:36Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/sup.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/stam.h>
32#include <VBox/vmm/trpm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hm.h>
35#include <VBox/vmm/hm_vmx.h>
36#include "PGMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include "PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*********************************************************************************************************************************
48* Internal Functions *
49*********************************************************************************************************************************/
50DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
51DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
52DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3);
53#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
54static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk,
55 PPGMPTWALKGST pGstWalk);
56static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
57static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3);
58#endif
59static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
60static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
61
62
63#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
64/* Guest - EPT SLAT is identical for all guest paging mode. */
65# define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT
66# define PGM_GST_TYPE PGM_TYPE_EPT
67# include "PGMGstDefs.h"
68# include "PGMAllGstSlatEpt.cpp.h"
69# undef PGM_GST_TYPE
70#endif
71
72
73/*
74 * Shadow - 32-bit mode
75 */
76#define PGM_SHW_TYPE PGM_TYPE_32BIT
77#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
78#include "PGMAllShw.h"
79
80/* Guest - real mode */
81#define PGM_GST_TYPE PGM_TYPE_REAL
82#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
83#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
84#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
85#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
86#include "PGMGstDefs.h"
87#include "PGMAllGst.h"
88#include "PGMAllBth.h"
89#undef BTH_PGMPOOLKIND_PT_FOR_PT
90#undef BTH_PGMPOOLKIND_ROOT
91#undef PGM_BTH_NAME
92#undef PGM_GST_TYPE
93#undef PGM_GST_NAME
94
95/* Guest - protected mode */
96#define PGM_GST_TYPE PGM_TYPE_PROT
97#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
98#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
99#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
100#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
101#include "PGMGstDefs.h"
102#include "PGMAllGst.h"
103#include "PGMAllBth.h"
104#undef BTH_PGMPOOLKIND_PT_FOR_PT
105#undef BTH_PGMPOOLKIND_ROOT
106#undef PGM_BTH_NAME
107#undef PGM_GST_TYPE
108#undef PGM_GST_NAME
109
110/* Guest - 32-bit mode */
111#define PGM_GST_TYPE PGM_TYPE_32BIT
112#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
113#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
114#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
115#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
116#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
117#include "PGMGstDefs.h"
118#include "PGMAllGst.h"
119#include "PGMAllBth.h"
120#undef BTH_PGMPOOLKIND_PT_FOR_BIG
121#undef BTH_PGMPOOLKIND_PT_FOR_PT
122#undef BTH_PGMPOOLKIND_ROOT
123#undef PGM_BTH_NAME
124#undef PGM_GST_TYPE
125#undef PGM_GST_NAME
126
127#undef PGM_SHW_TYPE
128#undef PGM_SHW_NAME
129
130
131/*
132 * Shadow - PAE mode
133 */
134#define PGM_SHW_TYPE PGM_TYPE_PAE
135#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
136#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
137#include "PGMAllShw.h"
138
139/* Guest - real mode */
140#define PGM_GST_TYPE PGM_TYPE_REAL
141#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
142#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
143#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
144#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
145#include "PGMGstDefs.h"
146#include "PGMAllBth.h"
147#undef BTH_PGMPOOLKIND_PT_FOR_PT
148#undef BTH_PGMPOOLKIND_ROOT
149#undef PGM_BTH_NAME
150#undef PGM_GST_TYPE
151#undef PGM_GST_NAME
152
153/* Guest - protected mode */
154#define PGM_GST_TYPE PGM_TYPE_PROT
155#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
156#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
157#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
158#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
159#include "PGMGstDefs.h"
160#include "PGMAllBth.h"
161#undef BTH_PGMPOOLKIND_PT_FOR_PT
162#undef BTH_PGMPOOLKIND_ROOT
163#undef PGM_BTH_NAME
164#undef PGM_GST_TYPE
165#undef PGM_GST_NAME
166
167/* Guest - 32-bit mode */
168#define PGM_GST_TYPE PGM_TYPE_32BIT
169#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
170#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
171#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
172#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
173#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
174#include "PGMGstDefs.h"
175#include "PGMAllBth.h"
176#undef BTH_PGMPOOLKIND_PT_FOR_BIG
177#undef BTH_PGMPOOLKIND_PT_FOR_PT
178#undef BTH_PGMPOOLKIND_ROOT
179#undef PGM_BTH_NAME
180#undef PGM_GST_TYPE
181#undef PGM_GST_NAME
182
183
184/* Guest - PAE mode */
185#define PGM_GST_TYPE PGM_TYPE_PAE
186#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
187#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
188#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
189#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
190#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
191#include "PGMGstDefs.h"
192#include "PGMAllGst.h"
193#include "PGMAllBth.h"
194#undef BTH_PGMPOOLKIND_PT_FOR_BIG
195#undef BTH_PGMPOOLKIND_PT_FOR_PT
196#undef BTH_PGMPOOLKIND_ROOT
197#undef PGM_BTH_NAME
198#undef PGM_GST_TYPE
199#undef PGM_GST_NAME
200
201#undef PGM_SHW_TYPE
202#undef PGM_SHW_NAME
203
204
205/*
206 * Shadow - AMD64 mode
207 */
208#define PGM_SHW_TYPE PGM_TYPE_AMD64
209#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
210#include "PGMAllShw.h"
211
212/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
213/** @todo retire this hack. */
214#define PGM_GST_TYPE PGM_TYPE_PROT
215#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
216#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
217#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
218#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
219#include "PGMGstDefs.h"
220#include "PGMAllBth.h"
221#undef BTH_PGMPOOLKIND_PT_FOR_PT
222#undef BTH_PGMPOOLKIND_ROOT
223#undef PGM_BTH_NAME
224#undef PGM_GST_TYPE
225#undef PGM_GST_NAME
226
227#ifdef VBOX_WITH_64_BITS_GUESTS
228/* Guest - AMD64 mode */
229# define PGM_GST_TYPE PGM_TYPE_AMD64
230# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
231# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
232# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
233# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
234# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
235# include "PGMGstDefs.h"
236# include "PGMAllGst.h"
237# include "PGMAllBth.h"
238# undef BTH_PGMPOOLKIND_PT_FOR_BIG
239# undef BTH_PGMPOOLKIND_PT_FOR_PT
240# undef BTH_PGMPOOLKIND_ROOT
241# undef PGM_BTH_NAME
242# undef PGM_GST_TYPE
243# undef PGM_GST_NAME
244#endif /* VBOX_WITH_64_BITS_GUESTS */
245
246#undef PGM_SHW_TYPE
247#undef PGM_SHW_NAME
248
249
250/*
251 * Shadow - 32-bit nested paging mode.
252 */
253#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
254#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
255#include "PGMAllShw.h"
256
257/* Guest - real mode */
258#define PGM_GST_TYPE PGM_TYPE_REAL
259#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
260#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
261#include "PGMGstDefs.h"
262#include "PGMAllBth.h"
263#undef PGM_BTH_NAME
264#undef PGM_GST_TYPE
265#undef PGM_GST_NAME
266
267/* Guest - protected mode */
268#define PGM_GST_TYPE PGM_TYPE_PROT
269#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
270#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
271#include "PGMGstDefs.h"
272#include "PGMAllBth.h"
273#undef PGM_BTH_NAME
274#undef PGM_GST_TYPE
275#undef PGM_GST_NAME
276
277/* Guest - 32-bit mode */
278#define PGM_GST_TYPE PGM_TYPE_32BIT
279#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
280#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
281#include "PGMGstDefs.h"
282#include "PGMAllBth.h"
283#undef PGM_BTH_NAME
284#undef PGM_GST_TYPE
285#undef PGM_GST_NAME
286
287/* Guest - PAE mode */
288#define PGM_GST_TYPE PGM_TYPE_PAE
289#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
290#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
291#include "PGMGstDefs.h"
292#include "PGMAllBth.h"
293#undef PGM_BTH_NAME
294#undef PGM_GST_TYPE
295#undef PGM_GST_NAME
296
297#ifdef VBOX_WITH_64_BITS_GUESTS
298/* Guest - AMD64 mode */
299# define PGM_GST_TYPE PGM_TYPE_AMD64
300# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
301# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
302# include "PGMGstDefs.h"
303# include "PGMAllBth.h"
304# undef PGM_BTH_NAME
305# undef PGM_GST_TYPE
306# undef PGM_GST_NAME
307#endif /* VBOX_WITH_64_BITS_GUESTS */
308
309#undef PGM_SHW_TYPE
310#undef PGM_SHW_NAME
311
312
313/*
314 * Shadow - PAE nested paging mode.
315 */
316#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
317#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
318#include "PGMAllShw.h"
319
320/* Guest - real mode */
321#define PGM_GST_TYPE PGM_TYPE_REAL
322#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
323#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
324#include "PGMGstDefs.h"
325#include "PGMAllBth.h"
326#undef PGM_BTH_NAME
327#undef PGM_GST_TYPE
328#undef PGM_GST_NAME
329
330/* Guest - protected mode */
331#define PGM_GST_TYPE PGM_TYPE_PROT
332#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
333#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
334#include "PGMGstDefs.h"
335#include "PGMAllBth.h"
336#undef PGM_BTH_NAME
337#undef PGM_GST_TYPE
338#undef PGM_GST_NAME
339
340/* Guest - 32-bit mode */
341#define PGM_GST_TYPE PGM_TYPE_32BIT
342#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
343#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
344#include "PGMGstDefs.h"
345#include "PGMAllBth.h"
346#undef PGM_BTH_NAME
347#undef PGM_GST_TYPE
348#undef PGM_GST_NAME
349
350/* Guest - PAE mode */
351#define PGM_GST_TYPE PGM_TYPE_PAE
352#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
353#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
354#include "PGMGstDefs.h"
355#include "PGMAllBth.h"
356#undef PGM_BTH_NAME
357#undef PGM_GST_TYPE
358#undef PGM_GST_NAME
359
360#ifdef VBOX_WITH_64_BITS_GUESTS
361/* Guest - AMD64 mode */
362# define PGM_GST_TYPE PGM_TYPE_AMD64
363# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
364# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
365# include "PGMGstDefs.h"
366# include "PGMAllBth.h"
367# undef PGM_BTH_NAME
368# undef PGM_GST_TYPE
369# undef PGM_GST_NAME
370#endif /* VBOX_WITH_64_BITS_GUESTS */
371
372#undef PGM_SHW_TYPE
373#undef PGM_SHW_NAME
374
375
376/*
377 * Shadow - AMD64 nested paging mode.
378 */
379#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
380#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
381#include "PGMAllShw.h"
382
383/* Guest - real mode */
384#define PGM_GST_TYPE PGM_TYPE_REAL
385#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
386#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
387#include "PGMGstDefs.h"
388#include "PGMAllBth.h"
389#undef PGM_BTH_NAME
390#undef PGM_GST_TYPE
391#undef PGM_GST_NAME
392
393/* Guest - protected mode */
394#define PGM_GST_TYPE PGM_TYPE_PROT
395#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
396#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
397#include "PGMGstDefs.h"
398#include "PGMAllBth.h"
399#undef PGM_BTH_NAME
400#undef PGM_GST_TYPE
401#undef PGM_GST_NAME
402
403/* Guest - 32-bit mode */
404#define PGM_GST_TYPE PGM_TYPE_32BIT
405#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
406#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
407#include "PGMGstDefs.h"
408#include "PGMAllBth.h"
409#undef PGM_BTH_NAME
410#undef PGM_GST_TYPE
411#undef PGM_GST_NAME
412
413/* Guest - PAE mode */
414#define PGM_GST_TYPE PGM_TYPE_PAE
415#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
416#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
417#include "PGMGstDefs.h"
418#include "PGMAllBth.h"
419#undef PGM_BTH_NAME
420#undef PGM_GST_TYPE
421#undef PGM_GST_NAME
422
423#ifdef VBOX_WITH_64_BITS_GUESTS
424/* Guest - AMD64 mode */
425# define PGM_GST_TYPE PGM_TYPE_AMD64
426# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
427# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
428# include "PGMGstDefs.h"
429# include "PGMAllBth.h"
430# undef PGM_BTH_NAME
431# undef PGM_GST_TYPE
432# undef PGM_GST_NAME
433#endif /* VBOX_WITH_64_BITS_GUESTS */
434
435#undef PGM_SHW_TYPE
436#undef PGM_SHW_NAME
437
438
439/*
440 * Shadow - EPT.
441 */
442#define PGM_SHW_TYPE PGM_TYPE_EPT
443#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
444#include "PGMAllShw.h"
445
446/* Guest - real mode */
447#define PGM_GST_TYPE PGM_TYPE_REAL
448#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
449#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
450#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
451#include "PGMGstDefs.h"
452#include "PGMAllBth.h"
453#undef BTH_PGMPOOLKIND_PT_FOR_PT
454#undef PGM_BTH_NAME
455#undef PGM_GST_TYPE
456#undef PGM_GST_NAME
457
458/* Guest - protected mode */
459#define PGM_GST_TYPE PGM_TYPE_PROT
460#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
461#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
462#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
463#include "PGMGstDefs.h"
464#include "PGMAllBth.h"
465#undef BTH_PGMPOOLKIND_PT_FOR_PT
466#undef PGM_BTH_NAME
467#undef PGM_GST_TYPE
468#undef PGM_GST_NAME
469
470/* Guest - 32-bit mode */
471#define PGM_GST_TYPE PGM_TYPE_32BIT
472#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
473#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
474#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
475#include "PGMGstDefs.h"
476#include "PGMAllBth.h"
477#undef BTH_PGMPOOLKIND_PT_FOR_PT
478#undef PGM_BTH_NAME
479#undef PGM_GST_TYPE
480#undef PGM_GST_NAME
481
482/* Guest - PAE mode */
483#define PGM_GST_TYPE PGM_TYPE_PAE
484#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
485#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
486#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
487#include "PGMGstDefs.h"
488#include "PGMAllBth.h"
489#undef BTH_PGMPOOLKIND_PT_FOR_PT
490#undef PGM_BTH_NAME
491#undef PGM_GST_TYPE
492#undef PGM_GST_NAME
493
494#ifdef VBOX_WITH_64_BITS_GUESTS
495/* Guest - AMD64 mode */
496# define PGM_GST_TYPE PGM_TYPE_AMD64
497# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
498# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
499# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
500# include "PGMGstDefs.h"
501# include "PGMAllBth.h"
502# undef BTH_PGMPOOLKIND_PT_FOR_PT
503# undef PGM_BTH_NAME
504# undef PGM_GST_TYPE
505# undef PGM_GST_NAME
506#endif /* VBOX_WITH_64_BITS_GUESTS */
507
508#undef PGM_SHW_TYPE
509#undef PGM_SHW_NAME
510
511
512/*
513 * Shadow - NEM / None.
514 */
515#define PGM_SHW_TYPE PGM_TYPE_NONE
516#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
517#include "PGMAllShw.h"
518
519/* Guest - real mode */
520#define PGM_GST_TYPE PGM_TYPE_REAL
521#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
522#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
523#include "PGMGstDefs.h"
524#include "PGMAllBth.h"
525#undef PGM_BTH_NAME
526#undef PGM_GST_TYPE
527#undef PGM_GST_NAME
528
529/* Guest - protected mode */
530#define PGM_GST_TYPE PGM_TYPE_PROT
531#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
532#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
533#include "PGMGstDefs.h"
534#include "PGMAllBth.h"
535#undef PGM_BTH_NAME
536#undef PGM_GST_TYPE
537#undef PGM_GST_NAME
538
539/* Guest - 32-bit mode */
540#define PGM_GST_TYPE PGM_TYPE_32BIT
541#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
542#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
543#include "PGMGstDefs.h"
544#include "PGMAllBth.h"
545#undef PGM_BTH_NAME
546#undef PGM_GST_TYPE
547#undef PGM_GST_NAME
548
549/* Guest - PAE mode */
550#define PGM_GST_TYPE PGM_TYPE_PAE
551#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
552#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
553#include "PGMGstDefs.h"
554#include "PGMAllBth.h"
555#undef PGM_BTH_NAME
556#undef PGM_GST_TYPE
557#undef PGM_GST_NAME
558
559#ifdef VBOX_WITH_64_BITS_GUESTS
560/* Guest - AMD64 mode */
561# define PGM_GST_TYPE PGM_TYPE_AMD64
562# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
563# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
564# include "PGMGstDefs.h"
565# include "PGMAllBth.h"
566# undef PGM_BTH_NAME
567# undef PGM_GST_TYPE
568# undef PGM_GST_NAME
569#endif /* VBOX_WITH_64_BITS_GUESTS */
570
571#undef PGM_SHW_TYPE
572#undef PGM_SHW_NAME
573
574
575
576/**
577 * Guest mode data array.
578 */
579PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
580{
581 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* 0 */
582 {
583 PGM_TYPE_REAL,
584 PGM_GST_NAME_REAL(GetPage),
585 PGM_GST_NAME_REAL(ModifyPage),
586 PGM_GST_NAME_REAL(Enter),
587 PGM_GST_NAME_REAL(Exit),
588#ifdef IN_RING3
589 PGM_GST_NAME_REAL(Relocate),
590#endif
591 },
592 {
593 PGM_TYPE_PROT,
594 PGM_GST_NAME_PROT(GetPage),
595 PGM_GST_NAME_PROT(ModifyPage),
596 PGM_GST_NAME_PROT(Enter),
597 PGM_GST_NAME_PROT(Exit),
598#ifdef IN_RING3
599 PGM_GST_NAME_PROT(Relocate),
600#endif
601 },
602 {
603 PGM_TYPE_32BIT,
604 PGM_GST_NAME_32BIT(GetPage),
605 PGM_GST_NAME_32BIT(ModifyPage),
606 PGM_GST_NAME_32BIT(Enter),
607 PGM_GST_NAME_32BIT(Exit),
608#ifdef IN_RING3
609 PGM_GST_NAME_32BIT(Relocate),
610#endif
611 },
612 {
613 PGM_TYPE_PAE,
614 PGM_GST_NAME_PAE(GetPage),
615 PGM_GST_NAME_PAE(ModifyPage),
616 PGM_GST_NAME_PAE(Enter),
617 PGM_GST_NAME_PAE(Exit),
618#ifdef IN_RING3
619 PGM_GST_NAME_PAE(Relocate),
620#endif
621 },
622#ifdef VBOX_WITH_64_BITS_GUESTS
623 {
624 PGM_TYPE_AMD64,
625 PGM_GST_NAME_AMD64(GetPage),
626 PGM_GST_NAME_AMD64(ModifyPage),
627 PGM_GST_NAME_AMD64(Enter),
628 PGM_GST_NAME_AMD64(Exit),
629# ifdef IN_RING3
630 PGM_GST_NAME_AMD64(Relocate),
631# endif
632 },
633#endif
634};
635
636
637/**
638 * The shadow mode data array.
639 */
640PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
641{
642 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
643 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
644 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
645 {
646 PGM_TYPE_32BIT,
647 PGM_SHW_NAME_32BIT(GetPage),
648 PGM_SHW_NAME_32BIT(ModifyPage),
649 PGM_SHW_NAME_32BIT(Enter),
650 PGM_SHW_NAME_32BIT(Exit),
651#ifdef IN_RING3
652 PGM_SHW_NAME_32BIT(Relocate),
653#endif
654 },
655 {
656 PGM_TYPE_PAE,
657 PGM_SHW_NAME_PAE(GetPage),
658 PGM_SHW_NAME_PAE(ModifyPage),
659 PGM_SHW_NAME_PAE(Enter),
660 PGM_SHW_NAME_PAE(Exit),
661#ifdef IN_RING3
662 PGM_SHW_NAME_PAE(Relocate),
663#endif
664 },
665 {
666 PGM_TYPE_AMD64,
667 PGM_SHW_NAME_AMD64(GetPage),
668 PGM_SHW_NAME_AMD64(ModifyPage),
669 PGM_SHW_NAME_AMD64(Enter),
670 PGM_SHW_NAME_AMD64(Exit),
671#ifdef IN_RING3
672 PGM_SHW_NAME_AMD64(Relocate),
673#endif
674 },
675 {
676 PGM_TYPE_NESTED_32BIT,
677 PGM_SHW_NAME_NESTED_32BIT(GetPage),
678 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
679 PGM_SHW_NAME_NESTED_32BIT(Enter),
680 PGM_SHW_NAME_NESTED_32BIT(Exit),
681#ifdef IN_RING3
682 PGM_SHW_NAME_NESTED_32BIT(Relocate),
683#endif
684 },
685 {
686 PGM_TYPE_NESTED_PAE,
687 PGM_SHW_NAME_NESTED_PAE(GetPage),
688 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
689 PGM_SHW_NAME_NESTED_PAE(Enter),
690 PGM_SHW_NAME_NESTED_PAE(Exit),
691#ifdef IN_RING3
692 PGM_SHW_NAME_NESTED_PAE(Relocate),
693#endif
694 },
695 {
696 PGM_TYPE_NESTED_AMD64,
697 PGM_SHW_NAME_NESTED_AMD64(GetPage),
698 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
699 PGM_SHW_NAME_NESTED_AMD64(Enter),
700 PGM_SHW_NAME_NESTED_AMD64(Exit),
701#ifdef IN_RING3
702 PGM_SHW_NAME_NESTED_AMD64(Relocate),
703#endif
704 },
705 {
706 PGM_TYPE_EPT,
707 PGM_SHW_NAME_EPT(GetPage),
708 PGM_SHW_NAME_EPT(ModifyPage),
709 PGM_SHW_NAME_EPT(Enter),
710 PGM_SHW_NAME_EPT(Exit),
711#ifdef IN_RING3
712 PGM_SHW_NAME_EPT(Relocate),
713#endif
714 },
715 {
716 PGM_TYPE_NONE,
717 PGM_SHW_NAME_NONE(GetPage),
718 PGM_SHW_NAME_NONE(ModifyPage),
719 PGM_SHW_NAME_NONE(Enter),
720 PGM_SHW_NAME_NONE(Exit),
721#ifdef IN_RING3
722 PGM_SHW_NAME_NONE(Relocate),
723#endif
724 },
725};
726
727
728/**
729 * The guest+shadow mode data array.
730 */
731PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
732{
733#if !defined(IN_RING3) && !defined(VBOX_STRICT)
734# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
735# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
736 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler) }
737
738#elif !defined(IN_RING3) && defined(VBOX_STRICT)
739# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
740# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
741 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(AssertCR3) }
742
743#elif defined(IN_RING3) && !defined(VBOX_STRICT)
744# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
745# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
746 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
747
748#elif defined(IN_RING3) && defined(VBOX_STRICT)
749# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
750# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
751 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
752
753#else
754# error "Misconfig."
755#endif
756
757 /* 32-bit shadow paging mode: */
758 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
759 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
760 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
761 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
762 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
763 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
764 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
765 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
766 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
767 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
768 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
769
770 /* PAE shadow paging mode: */
771 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
772 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
773 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
774 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
775 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
776 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
777 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
778 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
779 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
780 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
781 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
782
783 /* AMD64 shadow paging mode: */
784 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
785 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
786 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
787 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
788 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
789#ifdef VBOX_WITH_64_BITS_GUESTS
790 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
791#else
792 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
793#endif
794 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
795 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
796 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
797 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
798 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
799
800 /* 32-bit nested paging mode: */
801 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
802 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
803 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
804 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
805 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
806#ifdef VBOX_WITH_64_BITS_GUESTS
807 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
808#else
809 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
810#endif
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
812 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
814 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
815 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
816
817 /* PAE nested paging mode: */
818 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
819 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
820 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
821 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
822 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
823#ifdef VBOX_WITH_64_BITS_GUESTS
824 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
825#else
826 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
827#endif
828 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
829 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
831 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
832 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
833
834 /* AMD64 nested paging mode: */
835 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
836 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
837 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
838 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
839 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
840#ifdef VBOX_WITH_64_BITS_GUESTS
841 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
842#else
843 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
844#endif
845 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
846 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
848 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
849 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
850
851 /* EPT nested paging mode: */
852 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
853 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
854 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
855 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
856 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
857#ifdef VBOX_WITH_64_BITS_GUESTS
858 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
859#else
860 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
861#endif
862 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
863 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
865 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
866 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
867
868 /* NONE / NEM: */
869 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
870 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
871 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
872 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
873 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
874#ifdef VBOX_WITH_64_BITS_GUESTS
875 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
876#else
877 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
878#endif
879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
880 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
881 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
882 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
883 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
884
885
886#undef PGMMODEDATABTH_ENTRY
887#undef PGMMODEDATABTH_NULL_ENTRY
888};
889
890
891#ifdef IN_RING0
892/**
893 * #PF Handler.
894 *
895 * @returns VBox status code (appropriate for trap handling and GC return).
896 * @param pVCpu The cross context virtual CPU structure.
897 * @param uErr The trap error code.
898 * @param pRegFrame Trap register frame.
899 * @param pvFault The fault address.
900 */
901VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
902{
903 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
904
905 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
906 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
907 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
908
909
910# ifdef VBOX_WITH_STATISTICS
911 /*
912 * Error code stats.
913 */
914 if (uErr & X86_TRAP_PF_US)
915 {
916 if (!(uErr & X86_TRAP_PF_P))
917 {
918 if (uErr & X86_TRAP_PF_RW)
919 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
920 else
921 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
922 }
923 else if (uErr & X86_TRAP_PF_RW)
924 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
925 else if (uErr & X86_TRAP_PF_RSVD)
926 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
927 else if (uErr & X86_TRAP_PF_ID)
928 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
929 else
930 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
931 }
932 else
933 { /* Supervisor */
934 if (!(uErr & X86_TRAP_PF_P))
935 {
936 if (uErr & X86_TRAP_PF_RW)
937 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
938 else
939 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
940 }
941 else if (uErr & X86_TRAP_PF_RW)
942 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
943 else if (uErr & X86_TRAP_PF_ID)
944 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
945 else if (uErr & X86_TRAP_PF_RSVD)
946 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
947 }
948# endif /* VBOX_WITH_STATISTICS */
949
950 /*
951 * Call the worker.
952 */
953 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
954 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
955 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
956 bool fLockTaken = false;
957 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
958 if (fLockTaken)
959 {
960 PGM_LOCK_ASSERT_OWNER(pVM);
961 PGM_UNLOCK(pVM);
962 }
963 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
964
965 /*
966 * Return code tweaks.
967 */
968 if (rc != VINF_SUCCESS)
969 {
970 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
971 rc = VINF_SUCCESS;
972
973 /* Note: hack alert for difficult to reproduce problem. */
974 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
975 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
976 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
977 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
978 {
979 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
980 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
981 rc = VINF_SUCCESS;
982 }
983 }
984
985 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
986 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
987 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
988 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
989 return rc;
990}
991#endif /* IN_RING0 */
992
993
994/**
995 * Prefetch a page
996 *
997 * Typically used to sync commonly used pages before entering raw mode
998 * after a CR3 reload.
999 *
1000 * @returns VBox status code suitable for scheduling.
1001 * @retval VINF_SUCCESS on success.
1002 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
1003 * @param pVCpu The cross context virtual CPU structure.
1004 * @param GCPtrPage Page to invalidate.
1005 */
1006VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1007{
1008 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1009
1010 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1011 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1012 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1013 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1014
1015 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1016 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1017 return rc;
1018}
1019
1020
1021/**
1022 * Emulation of the invlpg instruction (HC only actually).
1023 *
1024 * @returns Strict VBox status code, special care required.
1025 * @retval VINF_PGM_SYNC_CR3 - handled.
1026 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1027 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1028 *
1029 * @param pVCpu The cross context virtual CPU structure.
1030 * @param GCPtrPage Page to invalidate.
1031 *
1032 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1033 * safe, but there could be edge cases!
1034 *
1035 * @todo Flush page or page directory only if necessary!
1036 * @todo VBOXSTRICTRC
1037 */
1038VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1039{
1040 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1041 int rc;
1042 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1043
1044 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1045
1046 /*
1047 * Call paging mode specific worker.
1048 */
1049 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1050 PGM_LOCK_VOID(pVM);
1051
1052 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1053 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1054 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1055 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1056
1057 PGM_UNLOCK(pVM);
1058 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1059
1060 /* Ignore all irrelevant error codes. */
1061 if ( rc == VERR_PAGE_NOT_PRESENT
1062 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1063 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1064 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1065 rc = VINF_SUCCESS;
1066
1067 return rc;
1068}
1069
1070
1071/**
1072 * Executes an instruction using the interpreter.
1073 *
1074 * @returns VBox status code (appropriate for trap handling and GC return).
1075 * @param pVM The cross context VM structure.
1076 * @param pVCpu The cross context virtual CPU structure.
1077 * @param pRegFrame Register frame.
1078 * @param pvFault Fault address.
1079 */
1080VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1081{
1082 NOREF(pVM);
1083 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1084 if (rc == VERR_EM_INTERPRETER)
1085 rc = VINF_EM_RAW_EMULATE_INSTR;
1086 if (rc != VINF_SUCCESS)
1087 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1088 return rc;
1089}
1090
1091
1092/**
1093 * Gets effective page information (from the VMM page directory).
1094 *
1095 * @returns VBox status code.
1096 * @param pVCpu The cross context virtual CPU structure.
1097 * @param GCPtr Guest Context virtual address of the page.
1098 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1099 * @param pHCPhys Where to store the HC physical address of the page.
1100 * This is page aligned.
1101 * @remark You should use PGMMapGetPage() for pages in a mapping.
1102 */
1103VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1104{
1105 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1106 PGM_LOCK_VOID(pVM);
1107
1108 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1109 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1110 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1111 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1112
1113 PGM_UNLOCK(pVM);
1114 return rc;
1115}
1116
1117
1118/**
1119 * Modify page flags for a range of pages in the shadow context.
1120 *
1121 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1122 *
1123 * @returns VBox status code.
1124 * @param pVCpu The cross context virtual CPU structure.
1125 * @param GCPtr Virtual address of the first page in the range.
1126 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1127 * @param fMask The AND mask - page flags X86_PTE_*.
1128 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1129 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1130 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1131 */
1132DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1133{
1134 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1135 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1136
1137 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
1138
1139 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1140 PGM_LOCK_VOID(pVM);
1141
1142 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1143 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1144 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1145 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, GUEST_PAGE_SIZE, fFlags, fMask, fOpFlags);
1146
1147 PGM_UNLOCK(pVM);
1148 return rc;
1149}
1150
1151
1152/**
1153 * Changing the page flags for a single page in the shadow page tables so as to
1154 * make it read-only.
1155 *
1156 * @returns VBox status code.
1157 * @param pVCpu The cross context virtual CPU structure.
1158 * @param GCPtr Virtual address of the first page in the range.
1159 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1160 */
1161VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1162{
1163 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1164}
1165
1166
1167/**
1168 * Changing the page flags for a single page in the shadow page tables so as to
1169 * make it writable.
1170 *
1171 * The call must know with 101% certainty that the guest page tables maps this
1172 * as writable too. This function will deal shared, zero and write monitored
1173 * pages.
1174 *
1175 * @returns VBox status code.
1176 * @param pVCpu The cross context virtual CPU structure.
1177 * @param GCPtr Virtual address of the first page in the range.
1178 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1179 */
1180VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1181{
1182 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1183 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1184 return VINF_SUCCESS;
1185}
1186
1187
1188/**
1189 * Changing the page flags for a single page in the shadow page tables so as to
1190 * make it not present.
1191 *
1192 * @returns VBox status code.
1193 * @param pVCpu The cross context virtual CPU structure.
1194 * @param GCPtr Virtual address of the first page in the range.
1195 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1196 */
1197VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1198{
1199 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1200}
1201
1202
1203/**
1204 * Changing the page flags for a single page in the shadow page tables so as to
1205 * make it supervisor and writable.
1206 *
1207 * This if for dealing with CR0.WP=0 and readonly user pages.
1208 *
1209 * @returns VBox status code.
1210 * @param pVCpu The cross context virtual CPU structure.
1211 * @param GCPtr Virtual address of the first page in the range.
1212 * @param fBigPage Whether or not this is a big page. If it is, we have to
1213 * change the shadow PDE as well. If it isn't, the caller
1214 * has checked that the shadow PDE doesn't need changing.
1215 * We ASSUME 4KB pages backing the big page here!
1216 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1217 */
1218int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1219{
1220 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1221 if (rc == VINF_SUCCESS && fBigPage)
1222 {
1223 /* this is a bit ugly... */
1224 switch (pVCpu->pgm.s.enmShadowMode)
1225 {
1226 case PGMMODE_32_BIT:
1227 {
1228 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1229 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1230 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1231 pPde->u |= X86_PDE_RW;
1232 Log(("-> PDE=%#llx (32)\n", pPde->u));
1233 break;
1234 }
1235 case PGMMODE_PAE:
1236 case PGMMODE_PAE_NX:
1237 {
1238 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1239 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1240 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1241 pPde->u |= X86_PDE_RW;
1242 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1243 break;
1244 }
1245 default:
1246 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1247 }
1248 }
1249 return rc;
1250}
1251
1252
1253/**
1254 * Gets the shadow page directory for the specified address, PAE.
1255 *
1256 * @returns Pointer to the shadow PD.
1257 * @param pVCpu The cross context virtual CPU structure.
1258 * @param GCPtr The address.
1259 * @param uGstPdpe Guest PDPT entry. Valid.
1260 * @param ppPD Receives address of page directory
1261 */
1262int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1263{
1264 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1265 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1266 PPGMPOOLPAGE pShwPage;
1267 int rc;
1268 PGM_LOCK_ASSERT_OWNER(pVM);
1269
1270
1271 /* Allocate page directory if not present. */
1272 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1273 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1274 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1275 X86PGPAEUINT const uPdpe = pPdpe->u;
1276 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1277 {
1278 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1279 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1280 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1281
1282 pgmPoolCacheUsed(pPool, pShwPage);
1283
1284 /* Update the entry if necessary. */
1285 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1286 if (uPdpeNew == uPdpe)
1287 { /* likely */ }
1288 else
1289 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1290 }
1291 else
1292 {
1293 RTGCPTR64 GCPdPt;
1294 PGMPOOLKIND enmKind;
1295 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1296 {
1297 /* AMD-V nested paging or real/protected mode without paging. */
1298 GCPdPt = GCPtr & ~(RT_BIT_64(X86_PDPT_SHIFT) - 1);
1299 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1300 }
1301 else if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1302 {
1303 if (uGstPdpe & X86_PDPE_P)
1304 {
1305 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1306 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1307 }
1308 else
1309 {
1310 /* PD not present; guest must reload CR3 to change it.
1311 * No need to monitor anything in this case. */
1312 /** @todo r=bird: WTF is hit?!? */
1313 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1314 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1315 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1316 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1317 }
1318 }
1319 else
1320 {
1321 GCPdPt = CPUMGetGuestCR3(pVCpu);
1322 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1323 }
1324
1325 /* Create a reference back to the PDPT by using the index in its shadow page. */
1326 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1327 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1328 &pShwPage);
1329 AssertRCReturn(rc, rc);
1330
1331 /* Hook it up. */
1332 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1333 }
1334 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1335
1336 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1337 return VINF_SUCCESS;
1338}
1339
1340
1341/**
1342 * Gets the pointer to the shadow page directory entry for an address, PAE.
1343 *
1344 * @returns Pointer to the PDE.
1345 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1346 * @param GCPtr The address.
1347 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1348 */
1349DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1350{
1351 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1352 PGM_LOCK_ASSERT_OWNER(pVM);
1353
1354 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1355 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1356 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1357 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1358 if (!(uPdpe & X86_PDPE_P))
1359 {
1360 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1361 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1362 }
1363 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1364
1365 /* Fetch the pgm pool shadow descriptor. */
1366 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1367 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1368
1369 *ppShwPde = pShwPde;
1370 return VINF_SUCCESS;
1371}
1372
1373
1374/**
1375 * Syncs the SHADOW page directory pointer for the specified address.
1376 *
1377 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1378 *
1379 * The caller is responsible for making sure the guest has a valid PD before
1380 * calling this function.
1381 *
1382 * @returns VBox status code.
1383 * @param pVCpu The cross context virtual CPU structure.
1384 * @param GCPtr The address.
1385 * @param uGstPml4e Guest PML4 entry (valid).
1386 * @param uGstPdpe Guest PDPT entry (valid).
1387 * @param ppPD Receives address of page directory
1388 */
1389static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1390{
1391 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1392 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1393 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1394 int rc;
1395
1396 PGM_LOCK_ASSERT_OWNER(pVM);
1397
1398 /*
1399 * PML4.
1400 */
1401 PPGMPOOLPAGE pShwPage;
1402 {
1403 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1404 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1405 X86PGPAEUINT const uPml4e = pPml4e->u;
1406
1407 /* Allocate page directory pointer table if not present. */
1408 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1409 {
1410 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1411 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1412
1413 pgmPoolCacheUsed(pPool, pShwPage);
1414
1415 /* Update the entry if needed. */
1416 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1417 | (uPml4e & PGM_PML4_FLAGS);
1418 if (uPml4e == uPml4eNew)
1419 { /* likely */ }
1420 else
1421 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1422 }
1423 else
1424 {
1425 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1426
1427 RTGCPTR64 GCPml4;
1428 PGMPOOLKIND enmKind;
1429 if (fNestedPagingOrNoGstPaging)
1430 {
1431 /* AMD-V nested paging or real/protected mode without paging */
1432 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; /** @todo bogus calculation for PML5 */
1433 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1434 }
1435 else
1436 {
1437 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1438 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1439 }
1440
1441 /* Create a reference back to the PDPT by using the index in its shadow page. */
1442 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1443 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1444 &pShwPage);
1445 AssertRCReturn(rc, rc);
1446
1447 /* Hook it up. */
1448 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1449 | (uPml4e & PGM_PML4_FLAGS));
1450 }
1451 }
1452
1453 /*
1454 * PDPT.
1455 */
1456 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1457 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1458 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1459 X86PGPAEUINT const uPdpe = pPdpe->u;
1460
1461 /* Allocate page directory if not present. */
1462 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1463 {
1464 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1465 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1466
1467 pgmPoolCacheUsed(pPool, pShwPage);
1468
1469 /* Update the entry if needed. */
1470 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1471 | (uPdpe & PGM_PDPT_FLAGS);
1472 if (uPdpe == uPdpeNew)
1473 { /* likely */ }
1474 else
1475 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1476 }
1477 else
1478 {
1479 RTGCPTR64 GCPdPt;
1480 PGMPOOLKIND enmKind;
1481 if (fNestedPagingOrNoGstPaging)
1482 {
1483 /* AMD-V nested paging or real/protected mode without paging */
1484 GCPdPt = GCPtr & ~(RT_BIT_64(iPdPt << X86_PDPT_SHIFT) - 1);
1485 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1486 }
1487 else
1488 {
1489 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1490 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1491 }
1492
1493 /* Create a reference back to the PDPT by using the index in its shadow page. */
1494 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1495 pShwPage->idx, iPdPt, false /*fLockPage*/,
1496 &pShwPage);
1497 AssertRCReturn(rc, rc);
1498
1499 /* Hook it up. */
1500 ASMAtomicWriteU64(&pPdpe->u,
1501 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1502 }
1503
1504 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1505 return VINF_SUCCESS;
1506}
1507
1508
1509/**
1510 * Gets the SHADOW page directory pointer for the specified address (long mode).
1511 *
1512 * @returns VBox status code.
1513 * @param pVCpu The cross context virtual CPU structure.
1514 * @param GCPtr The address.
1515 * @param ppPml4e Receives the address of the page map level 4 entry.
1516 * @param ppPdpt Receives the address of the page directory pointer table.
1517 * @param ppPD Receives the address of the page directory.
1518 */
1519DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1520{
1521 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1522 PGM_LOCK_ASSERT_OWNER(pVM);
1523
1524 /*
1525 * PML4
1526 */
1527 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1528 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1529 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1530 if (ppPml4e)
1531 *ppPml4e = (PX86PML4E)pPml4e;
1532 X86PGPAEUINT const uPml4e = pPml4e->u;
1533 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1534 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1535 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1536
1537 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1538 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1539 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1540
1541 /*
1542 * PDPT
1543 */
1544 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1545 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1546 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1547 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1548 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1549
1550 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1551 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1552
1553 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1554 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1555 return VINF_SUCCESS;
1556}
1557
1558
1559/**
1560 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1561 * backing pages in case the PDPT or PML4 entry is missing.
1562 *
1563 * @returns VBox status code.
1564 * @param pVCpu The cross context virtual CPU structure.
1565 * @param GCPtr The address.
1566 * @param ppPdpt Receives address of pdpt
1567 * @param ppPD Receives address of page directory
1568 */
1569static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1570{
1571 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1572 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1573 int rc;
1574
1575 Assert(pVM->pgm.s.fNestedPaging);
1576 PGM_LOCK_ASSERT_OWNER(pVM);
1577
1578 /*
1579 * PML4 level.
1580 */
1581 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1582 Assert(pPml4);
1583
1584 /* Allocate page directory pointer table if not present. */
1585 PPGMPOOLPAGE pShwPage;
1586 {
1587 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1588 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1589 EPTPML4E Pml4e;
1590 Pml4e.u = pPml4e->u;
1591 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1592 {
1593 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1594 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1595 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1596 &pShwPage);
1597 AssertRCReturn(rc, rc);
1598
1599 /* Hook up the new PDPT now. */
1600 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1601 }
1602 else
1603 {
1604 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1605 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1606
1607 pgmPoolCacheUsed(pPool, pShwPage);
1608
1609 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1610 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1611 { }
1612 else
1613 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1614 }
1615 }
1616
1617 /*
1618 * PDPT level.
1619 */
1620 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1621 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1622 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1623
1624 if (ppPdpt)
1625 *ppPdpt = pPdpt;
1626
1627 /* Allocate page directory if not present. */
1628 EPTPDPTE Pdpe;
1629 Pdpe.u = pPdpe->u;
1630 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1631 {
1632 RTGCPTR64 const GCPdPt = GCPtr & ~(RT_BIT_64(EPT_PDPT_SHIFT) - 1);
1633 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1634 pShwPage->idx, iPdPt, false /*fLockPage*/,
1635 &pShwPage);
1636 AssertRCReturn(rc, rc);
1637
1638 /* Hook up the new PD now. */
1639 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1640 }
1641 else
1642 {
1643 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1644 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1645
1646 pgmPoolCacheUsed(pPool, pShwPage);
1647
1648 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1649 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1650 { }
1651 else
1652 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1653 }
1654
1655 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1656 return VINF_SUCCESS;
1657}
1658
1659
1660#ifdef IN_RING0
1661/**
1662 * Synchronizes a range of nested page table entries.
1663 *
1664 * The caller must own the PGM lock.
1665 *
1666 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1667 * @param GCPhys Where to start.
1668 * @param cPages How many pages which entries should be synced.
1669 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1670 * host paging mode for AMD-V).
1671 */
1672int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1673{
1674 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1675
1676/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1677 int rc;
1678 switch (enmShwPagingMode)
1679 {
1680 case PGMMODE_32_BIT:
1681 {
1682 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1683 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1684 break;
1685 }
1686
1687 case PGMMODE_PAE:
1688 case PGMMODE_PAE_NX:
1689 {
1690 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1691 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1692 break;
1693 }
1694
1695 case PGMMODE_AMD64:
1696 case PGMMODE_AMD64_NX:
1697 {
1698 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1699 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1700 break;
1701 }
1702
1703 case PGMMODE_EPT:
1704 {
1705 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1706 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1707 break;
1708 }
1709
1710 default:
1711 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1712 }
1713 return rc;
1714}
1715#endif /* IN_RING0 */
1716
1717
1718/**
1719 * Gets effective Guest OS page information.
1720 *
1721 * When GCPtr is in a big page, the function will return as if it was a normal
1722 * 4KB page. If the need for distinguishing between big and normal page becomes
1723 * necessary at a later point, a PGMGstGetPage() will be created for that
1724 * purpose.
1725 *
1726 * @returns VBox status code.
1727 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1728 * @param GCPtr Guest Context virtual address of the page.
1729 * @param pWalk Where to store the page walk information.
1730 */
1731VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
1732{
1733 VMCPU_ASSERT_EMT(pVCpu);
1734 Assert(pWalk);
1735 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1736 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1737 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1738 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
1739}
1740
1741
1742/**
1743 * Maps the guest CR3.
1744 *
1745 * @returns VBox status code.
1746 * @param pVCpu The cross context virtual CPU structure.
1747 * @param GCPhysCr3 The guest CR3 value.
1748 * @param pHCPtrGuestCr3 Where to store the mapped memory.
1749 */
1750DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3)
1751{
1752 /** @todo this needs some reworking wrt. locking? */
1753 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1754 PGM_LOCK_VOID(pVM);
1755 PPGMPAGE pPageCr3 = pgmPhysGetPage(pVM, GCPhysCr3);
1756 AssertReturnStmt(pPageCr3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
1757
1758 RTHCPTR HCPtrGuestCr3;
1759 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCr3, GCPhysCr3, (void **)&HCPtrGuestCr3);
1760 PGM_UNLOCK(pVM);
1761
1762 *pHCPtrGuestCr3 = HCPtrGuestCr3;
1763 return rc;
1764}
1765
1766
1767/**
1768 * Unmaps the guest CR3.
1769 *
1770 * @returns VBox status code.
1771 * @param pVCpu The cross context virtual CPU structure.
1772 */
1773DECLINLINE(int) pgmGstUnmapCr3(PVMCPUCC pVCpu)
1774{
1775 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1776 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1777 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
1778 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
1779}
1780
1781
1782/**
1783 * Performs a guest page table walk.
1784 *
1785 * The guest should be in paged protect mode or long mode when making a call to
1786 * this function.
1787 *
1788 * @returns VBox status code.
1789 * @retval VINF_SUCCESS on success.
1790 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1791 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1792 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1793 *
1794 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1795 * @param GCPtr The guest virtual address to walk by.
1796 * @param pWalk Where to return the walk result. This is valid for some
1797 * error codes as well.
1798 * @param pGstWalk The guest mode specific page walk information.
1799 */
1800int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1801{
1802 VMCPU_ASSERT_EMT(pVCpu);
1803 switch (pVCpu->pgm.s.enmGuestMode)
1804 {
1805 case PGMMODE_32_BIT:
1806 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1807 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy);
1808
1809 case PGMMODE_PAE:
1810 case PGMMODE_PAE_NX:
1811 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1812 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae);
1813
1814 case PGMMODE_AMD64:
1815 case PGMMODE_AMD64_NX:
1816 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1817 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64);
1818
1819 case PGMMODE_REAL:
1820 case PGMMODE_PROTECTED:
1821 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1822 return VERR_PGM_NOT_USED_IN_MODE;
1823
1824 case PGMMODE_EPT:
1825 case PGMMODE_NESTED_32BIT:
1826 case PGMMODE_NESTED_PAE:
1827 case PGMMODE_NESTED_AMD64:
1828 default:
1829 AssertFailed();
1830 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1831 return VERR_PGM_NOT_USED_IN_MODE;
1832 }
1833}
1834
1835
1836#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1837/**
1838 * Performs a guest second-level address translation (SLAT).
1839 *
1840 * The guest paging mode must be 32-bit, PAE or AMD64 when making a call to this
1841 * function.
1842 *
1843 * @returns VBox status code.
1844 * @retval VINF_SUCCESS on success.
1845 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1846 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1847 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1848 *
1849 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1850 * @param GCPhysNested The nested-guest physical address being translated
1851 * (input).
1852 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is
1853 * valid. This indicates the SLAT is caused when
1854 * translating a nested-guest linear address.
1855 * @param GCPtrNested The nested-guest virtual address that initiated the
1856 * SLAT. If none, pass NIL_RTGCPTR.
1857 * @param pWalk Where to return the walk result. This is valid for
1858 * some error codes as well.
1859 * @param pGstWalk The second-level paging-mode specific walk
1860 * information.
1861 */
1862static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
1863 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1864{
1865 /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
1866 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
1867 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
1868 switch (pVCpu->pgm.s.enmGuestSlatMode)
1869 {
1870 case PGMSLAT_EPT:
1871 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1872 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
1873
1874 default:
1875 AssertFailed();
1876 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1877 return VERR_PGM_NOT_USED_IN_MODE;
1878 }
1879}
1880
1881
1882/**
1883 * Performs a guest second-level address translation (SLAT) for a nested-guest
1884 * physical address.
1885 *
1886 * This version requires the SLAT mode to be provided by the caller because we could
1887 * be in the process of switching paging modes (MOV CRX) and cannot presume control
1888 * register values.
1889 *
1890 * @returns VBox status code.
1891 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1892 * @param enmSlatMode The second-level paging mode to use.
1893 * @param GCPhysNested The nested-guest physical address to translate.
1894 * @param pWalk Where to store the walk result.
1895 * @param pGstWalk Where to store the second-level paging-mode specific
1896 * walk information.
1897 */
1898static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
1899 PPGMPTWALKGST pGstWalk)
1900{
1901 AssertPtr(pWalk);
1902 AssertPtr(pGstWalk);
1903 switch (enmSlatMode)
1904 {
1905 case PGMSLAT_EPT:
1906 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1907 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, NIL_RTGCPTR, pWalk,
1908 &pGstWalk->u.Ept);
1909
1910 default:
1911 AssertFailed();
1912 return VERR_PGM_NOT_USED_IN_MODE;
1913 }
1914}
1915#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1916
1917
1918/**
1919 * Tries to continue the previous walk.
1920 *
1921 * @note Requires the caller to hold the PGM lock from the first
1922 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
1923 * we cannot use the pointers.
1924 *
1925 * @returns VBox status code.
1926 * @retval VINF_SUCCESS on success.
1927 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1928 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1929 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1930 *
1931 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1932 * @param GCPtr The guest virtual address to walk by.
1933 * @param pWalk Pointer to the previous walk result and where to return
1934 * the result of this walk. This is valid for some error
1935 * codes as well.
1936 * @param pGstWalk The guest-mode specific walk information.
1937 */
1938int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1939{
1940 /*
1941 * We can only handle successfully walks.
1942 * We also limit ourselves to the next page.
1943 */
1944 if ( pWalk->fSucceeded
1945 && GCPtr - pWalk->GCPtr == GUEST_PAGE_SIZE)
1946 {
1947 Assert(pWalk->uLevel == 0);
1948 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
1949 {
1950 /*
1951 * AMD64
1952 */
1953 if (!pWalk->fGigantPage && !pWalk->fBigPage)
1954 {
1955 /*
1956 * We fall back to full walk if the PDE table changes, if any
1957 * reserved bits are set, or if the effective page access changes.
1958 */
1959 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
1960 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
1961 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
1962 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
1963
1964 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT))
1965 {
1966 if (pGstWalk->u.Amd64.pPte)
1967 {
1968 X86PTEPAE Pte;
1969 Pte.u = pGstWalk->u.Amd64.pPte[1].u;
1970 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
1971 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1972 {
1973 pWalk->GCPtr = GCPtr;
1974 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1975 pGstWalk->u.Amd64.Pte.u = Pte.u;
1976 pGstWalk->u.Amd64.pPte++;
1977 return VINF_SUCCESS;
1978 }
1979 }
1980 }
1981 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT))
1982 {
1983 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
1984 if (pGstWalk->u.Amd64.pPde)
1985 {
1986 X86PDEPAE Pde;
1987 Pde.u = pGstWalk->u.Amd64.pPde[1].u;
1988 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame)
1989 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
1990 {
1991 /* Get the new PTE and check out the first entry. */
1992 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
1993 &pGstWalk->u.Amd64.pPt);
1994 if (RT_SUCCESS(rc))
1995 {
1996 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0];
1997 X86PTEPAE Pte;
1998 Pte.u = pGstWalk->u.Amd64.pPte->u;
1999 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2000 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2001 {
2002 pWalk->GCPtr = GCPtr;
2003 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2004 pGstWalk->u.Amd64.Pte.u = Pte.u;
2005 pGstWalk->u.Amd64.Pde.u = Pde.u;
2006 pGstWalk->u.Amd64.pPde++;
2007 return VINF_SUCCESS;
2008 }
2009 }
2010 }
2011 }
2012 }
2013 }
2014 else if (!pWalk->fGigantPage)
2015 {
2016 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK))
2017 {
2018 pWalk->GCPtr = GCPtr;
2019 pWalk->GCPhys += GUEST_PAGE_SIZE;
2020 return VINF_SUCCESS;
2021 }
2022 }
2023 else
2024 {
2025 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK))
2026 {
2027 pWalk->GCPtr = GCPtr;
2028 pWalk->GCPhys += GUEST_PAGE_SIZE;
2029 return VINF_SUCCESS;
2030 }
2031 }
2032 }
2033 }
2034 /* Case we don't handle. Do full walk. */
2035 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk);
2036}
2037
2038
2039/**
2040 * Modify page flags for a range of pages in the guest's tables
2041 *
2042 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2043 *
2044 * @returns VBox status code.
2045 * @param pVCpu The cross context virtual CPU structure.
2046 * @param GCPtr Virtual address of the first page in the range.
2047 * @param cb Size (in bytes) of the range to apply the modification to.
2048 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2049 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2050 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2051 */
2052VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2053{
2054 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2055 VMCPU_ASSERT_EMT(pVCpu);
2056
2057 /*
2058 * Validate input.
2059 */
2060 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2061 Assert(cb);
2062
2063 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2064
2065 /*
2066 * Adjust input.
2067 */
2068 cb += GCPtr & GUEST_PAGE_OFFSET_MASK;
2069 cb = RT_ALIGN_Z(cb, GUEST_PAGE_SIZE);
2070 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
2071
2072 /*
2073 * Call worker.
2074 */
2075 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2076 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2077 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2078 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2079
2080 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2081 return rc;
2082}
2083
2084
2085/**
2086 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2087 *
2088 * @returns @c true if the PDPE is valid, @c false otherwise.
2089 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2090 * @param paPaePdpes The PAE PDPEs to validate.
2091 *
2092 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2093 */
2094VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2095{
2096 Assert(paPaePdpes);
2097 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2098 {
2099 X86PDPE const PaePdpe = paPaePdpes[i];
2100 if ( !(PaePdpe.u & X86_PDPE_P)
2101 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2102 { /* likely */ }
2103 else
2104 return false;
2105 }
2106 return true;
2107}
2108
2109
2110/**
2111 * Performs the lazy mapping of the 32-bit guest PD.
2112 *
2113 * @returns VBox status code.
2114 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2115 * @param ppPd Where to return the pointer to the mapping. This is
2116 * always set.
2117 */
2118int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2119{
2120 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2121 PGM_LOCK_VOID(pVM);
2122
2123 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2124
2125 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
2126 PPGMPAGE pPage;
2127 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2128 if (RT_SUCCESS(rc))
2129 {
2130 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2131 if (RT_SUCCESS(rc))
2132 {
2133# ifdef IN_RING3
2134 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2135 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2136# else
2137 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2138 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2139# endif
2140 PGM_UNLOCK(pVM);
2141 return VINF_SUCCESS;
2142 }
2143 AssertRC(rc);
2144 }
2145 PGM_UNLOCK(pVM);
2146
2147 *ppPd = NULL;
2148 return rc;
2149}
2150
2151
2152/**
2153 * Performs the lazy mapping of the PAE guest PDPT.
2154 *
2155 * @returns VBox status code.
2156 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2157 * @param ppPdpt Where to return the pointer to the mapping. This is
2158 * always set.
2159 */
2160int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2161{
2162 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2163 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2164 PGM_LOCK_VOID(pVM);
2165
2166 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
2167 PPGMPAGE pPage;
2168 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to
2169 * guest-physical address here. */
2170 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2171 if (RT_SUCCESS(rc))
2172 {
2173 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2174 if (RT_SUCCESS(rc))
2175 {
2176# ifdef IN_RING3
2177 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2178 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2179# else
2180 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2181 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2182# endif
2183 PGM_UNLOCK(pVM);
2184 return VINF_SUCCESS;
2185 }
2186 AssertRC(rc);
2187 }
2188
2189 PGM_UNLOCK(pVM);
2190 *ppPdpt = NULL;
2191 return rc;
2192}
2193
2194
2195/**
2196 * Performs the lazy mapping / updating of a PAE guest PD.
2197 *
2198 * @returns Pointer to the mapping.
2199 * @returns VBox status code.
2200 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2201 * @param iPdpt Which PD entry to map (0..3).
2202 * @param ppPd Where to return the pointer to the mapping. This is
2203 * always set.
2204 */
2205int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2206{
2207 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2208 PGM_LOCK_VOID(pVM);
2209
2210 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2211 Assert(pGuestPDPT);
2212 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2213 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2214 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2215
2216 PPGMPAGE pPage;
2217 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2218 if (RT_SUCCESS(rc))
2219 {
2220 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2221 AssertRC(rc);
2222 if (RT_SUCCESS(rc))
2223 {
2224# ifdef IN_RING3
2225 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2226 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2227# else
2228 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2229 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2230# endif
2231 if (fChanged)
2232 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2233 PGM_UNLOCK(pVM);
2234 return VINF_SUCCESS;
2235 }
2236 }
2237
2238 /* Invalid page or some failure, invalidate the entry. */
2239 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2240 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2241 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2242
2243 PGM_UNLOCK(pVM);
2244 return rc;
2245}
2246
2247
2248/**
2249 * Performs the lazy mapping of the 32-bit guest PD.
2250 *
2251 * @returns VBox status code.
2252 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2253 * @param ppPml4 Where to return the pointer to the mapping. This will
2254 * always be set.
2255 */
2256int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2257{
2258 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2259 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2260 PGM_LOCK_VOID(pVM);
2261
2262 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
2263 PPGMPAGE pPage;
2264 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2265 if (RT_SUCCESS(rc))
2266 {
2267 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2268 if (RT_SUCCESS(rc))
2269 {
2270# ifdef IN_RING3
2271 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2272 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2273# else
2274 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2275 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2276# endif
2277 PGM_UNLOCK(pVM);
2278 return VINF_SUCCESS;
2279 }
2280 }
2281
2282 PGM_UNLOCK(pVM);
2283 *ppPml4 = NULL;
2284 return rc;
2285}
2286
2287
2288#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2289 /**
2290 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
2291 *
2292 * @returns VBox status code.
2293 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2294 * @param ppEptPml4 Where to return the pointer to the mapping. This will
2295 * always be set.
2296 */
2297int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
2298{
2299 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
2300 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2301 PGM_LOCK_VOID(pVM);
2302
2303 RTGCPHYS const GCPhysEpt = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
2304 PPGMPAGE pPage;
2305 int rc = pgmPhysGetPageEx(pVM, GCPhysEpt, &pPage);
2306 if (RT_SUCCESS(rc))
2307 {
2308 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysEpt, (void **)ppEptPml4);
2309 if (RT_SUCCESS(rc))
2310 {
2311# ifdef IN_RING3
2312 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
2313 pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
2314# else
2315 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
2316 pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
2317# endif
2318 PGM_UNLOCK(pVM);
2319 return VINF_SUCCESS;
2320 }
2321 }
2322
2323 PGM_UNLOCK(pVM);
2324 *ppEptPml4 = NULL;
2325 return rc;
2326}
2327#endif
2328
2329
2330/**
2331 * Gets the current CR3 register value for the shadow memory context.
2332 * @returns CR3 value.
2333 * @param pVCpu The cross context virtual CPU structure.
2334 */
2335VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2336{
2337 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2338 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2339 return pPoolPage->Core.Key;
2340}
2341
2342
2343/**
2344 * Forces lazy remapping of the guest's PAE page-directory structures.
2345 *
2346 * @param pVCpu The cross context virtual CPU structure.
2347 */
2348static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2349{
2350 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2351 {
2352 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2353 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2354 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2355 }
2356}
2357
2358
2359/**
2360 * Gets the CR3 mask corresponding to the given paging mode.
2361 *
2362 * @returns The CR3 mask.
2363 * @param enmMode The paging mode.
2364 */
2365DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode)
2366{
2367 /** @todo This work can be optimized either by storing the masks in
2368 * pVCpu->pgm.s.afGstCr3Masks[] for all PGMMODEs -or- just do this once and
2369 * store the result when entering guest mode since we currently use it only
2370 * for enmGuestMode. */
2371 switch (enmMode)
2372 {
2373 case PGMMODE_PAE:
2374 case PGMMODE_PAE_NX:
2375 return X86_CR3_PAE_PAGE_MASK;
2376 case PGMMODE_AMD64:
2377 case PGMMODE_AMD64_NX:
2378 return X86_CR3_AMD64_PAGE_MASK;
2379 case PGMMODE_EPT:
2380 return X86_CR3_EPT_PAGE_MASK;
2381 default:
2382 return X86_CR3_PAGE_MASK;
2383 }
2384}
2385
2386
2387/**
2388 * Gets the masked CR3 value according to the current guest paging mode.
2389 *
2390 * @returns The masked PGM CR3 value.
2391 * @param pVCpu The cross context virtual CPU structure.
2392 * @param uCr3 The raw guest CR3 value.
2393 */
2394DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
2395{
2396 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode);
2397 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);
2398 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2399 return GCPhysCR3;
2400}
2401
2402
2403#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2404/**
2405 * Performs second-level address translation for the given CR3 and updates the
2406 * nested-guest CR3 when successful.
2407 *
2408 * @returns VBox status code.
2409 * @param pVCpu The cross context virtual CPU structure.
2410 * @param uCr3 The masked nested-guest CR3 value.
2411 * @param pGCPhysCR3 Where to store the translated CR3.
2412 *
2413 * @warning This updates PGMCPU::GCPhysNstGstCR3 when the translation succeeds. Be
2414 * mindful of this in code that's hyper sensitive to the order of
2415 * operations.
2416 */
2417static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3)
2418{
2419 if (uCr3 != pVCpu->pgm.s.GCPhysNstGstCR3)
2420 {
2421 PGMPTWALK Walk;
2422 PGMPTWALKGST GstWalk;
2423 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, uCr3, &Walk, &GstWalk);
2424 if (RT_SUCCESS(rc))
2425 {
2426 /* Update nested-guest CR3. */
2427 pVCpu->pgm.s.GCPhysNstGstCR3 = uCr3;
2428
2429 /* Pass back the translated result. */
2430 *pGCPhysCr3 = Walk.GCPhys;
2431 return VINF_SUCCESS;
2432 }
2433
2434 /* Translation failed. */
2435 *pGCPhysCr3 = NIL_RTGCPHYS;
2436 return rc;
2437 }
2438
2439 /*
2440 * If the nested-guest CR3 has not changed, then the previously
2441 * translated CR3 result (i.e. GCPhysCR3) is passed back.
2442 */
2443 *pGCPhysCr3 = pVCpu->pgm.s.GCPhysCR3;
2444 return VINF_SUCCESS;
2445}
2446#endif
2447
2448
2449/**
2450 * Performs and schedules necessary updates following a CR3 load or reload.
2451 *
2452 * This will normally involve mapping the guest PD or nPDPT
2453 *
2454 * @returns VBox status code.
2455 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2456 * safely be ignored and overridden since the FF will be set too then.
2457 * @param pVCpu The cross context virtual CPU structure.
2458 * @param cr3 The new cr3.
2459 * @param fGlobal Indicates whether this is a global flush or not.
2460 */
2461VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal)
2462{
2463 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2464 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2465
2466 VMCPU_ASSERT_EMT(pVCpu);
2467
2468 /*
2469 * Always flag the necessary updates; necessary for hardware acceleration
2470 */
2471 /** @todo optimize this, it shouldn't always be necessary. */
2472 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2473 if (fGlobal)
2474 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2475
2476 /*
2477 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2478 */
2479 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2480 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2481#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2482 if ( pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT
2483 && PGMMODE_WITH_PAGING(pVCpu->pgm.s.enmGuestMode))
2484 {
2485 LogFlowFunc(("nested_cr3=%RX64 old=%RX64\n", GCPhysCR3, pVCpu->pgm.s.GCPhysNstGstCR3));
2486 RTGCPHYS GCPhysOut;
2487 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2488 if (RT_SUCCESS(rc))
2489 GCPhysCR3 = GCPhysOut;
2490 else
2491 {
2492 /* CR3 SLAT translation failed but we try to pretend it
2493 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2494 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2495 int const rc2 = pgmGstUnmapCr3(pVCpu);
2496 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2497 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2498 return rc2;
2499 }
2500 }
2501#endif
2502
2503 LogFlowFunc(("cr3=%RX64 old=%RX64 fGlobal=%d\n", cr3, GCPhysOldCR3, fGlobal));
2504 int rc = VINF_SUCCESS;
2505 if (GCPhysOldCR3 != GCPhysCR3)
2506 {
2507 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2508 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2509 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2510
2511 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2512 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2513 if (RT_LIKELY(rc == VINF_SUCCESS))
2514 { }
2515 else
2516 {
2517 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2518 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2519 pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped) = false;
2520 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2521 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2522 }
2523
2524 if (fGlobal)
2525 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2526 else
2527 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2528 }
2529 else
2530 {
2531#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2532 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2533 if (pPool->cDirtyPages)
2534 {
2535 PGM_LOCK_VOID(pVM);
2536 pgmPoolResetDirtyPages(pVM);
2537 PGM_UNLOCK(pVM);
2538 }
2539#endif
2540 if (fGlobal)
2541 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2542 else
2543 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2544
2545 /*
2546 * Flush PAE PDPTEs.
2547 */
2548 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2549 pgmGstFlushPaePdpes(pVCpu);
2550 }
2551
2552 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2553 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2554 return rc;
2555}
2556
2557
2558/**
2559 * Performs and schedules necessary updates following a CR3 load or reload when
2560 * using nested or extended paging.
2561 *
2562 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2563 * TLB and triggering a SyncCR3.
2564 *
2565 * This will normally involve mapping the guest PD or nPDPT
2566 *
2567 * @returns VBox status code.
2568 * @retval VINF_SUCCESS.
2569 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2570 * paging modes). This can safely be ignored and overridden since the
2571 * FF will be set too then.
2572 * @param pVCpu The cross context virtual CPU structure.
2573 * @param cr3 The new CR3.
2574 */
2575VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3)
2576{
2577 VMCPU_ASSERT_EMT(pVCpu);
2578
2579 /* We assume we're only called in nested paging mode. */
2580 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2581
2582 /*
2583 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2584 */
2585 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2586 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2587#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2588 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2589 {
2590 LogFlowFunc(("nested_cr3=%RX64 old_nested_cr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysNstGstCR3));
2591 RTGCPHYS GCPhysOut;
2592 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2593 if (RT_SUCCESS(rc))
2594 GCPhysCR3 = GCPhysOut;
2595 else
2596 {
2597 /* CR3 SLAT translation failed but we try to pretend it
2598 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2599 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2600 int const rc2 = pgmGstUnmapCr3(pVCpu);
2601 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2602 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2603 return rc2;
2604 }
2605 }
2606#endif
2607
2608 LogFlowFunc(("cr3=%RX64 old=%RX64\n", cr3, GCPhysOldCR3));
2609 int rc = VINF_SUCCESS;
2610 if (GCPhysOldCR3 != GCPhysCR3)
2611 {
2612 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2613 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2614 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2615
2616 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2617 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2618
2619 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2620 }
2621 /*
2622 * Flush PAE PDPTEs.
2623 */
2624 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2625 pgmGstFlushPaePdpes(pVCpu);
2626
2627 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2628 return rc;
2629}
2630
2631
2632/**
2633 * Synchronize the paging structures.
2634 *
2635 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2636 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2637 * in several places, most importantly whenever the CR3 is loaded.
2638 *
2639 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2640 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2641 * the VMM into guest context.
2642 * @param pVCpu The cross context virtual CPU structure.
2643 * @param cr0 Guest context CR0 register
2644 * @param cr3 Guest context CR3 register
2645 * @param cr4 Guest context CR4 register
2646 * @param fGlobal Including global page directories or not
2647 */
2648VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2649{
2650 int rc;
2651
2652 VMCPU_ASSERT_EMT(pVCpu);
2653
2654 /*
2655 * The pool may have pending stuff and even require a return to ring-3 to
2656 * clear the whole thing.
2657 */
2658 rc = pgmPoolSyncCR3(pVCpu);
2659 if (rc != VINF_SUCCESS)
2660 return rc;
2661
2662 /*
2663 * We might be called when we shouldn't.
2664 *
2665 * The mode switching will ensure that the PD is resynced after every mode
2666 * switch. So, if we find ourselves here when in protected or real mode
2667 * we can safely clear the FF and return immediately.
2668 */
2669 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2670 {
2671 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2672 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2673 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2674 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2675 return VINF_SUCCESS;
2676 }
2677
2678 /* If global pages are not supported, then all flushes are global. */
2679 if (!(cr4 & X86_CR4_PGE))
2680 fGlobal = true;
2681 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2682 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2683
2684 /*
2685 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2686 * This should be done before SyncCR3.
2687 */
2688 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2689 {
2690 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2691
2692 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2693 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2694#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2695 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2696 {
2697 RTGCPHYS GCPhysOut;
2698 int rc2 = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2699 if (RT_SUCCESS(rc2))
2700 GCPhysCR3 = GCPhysOut;
2701 else
2702 {
2703 /* CR3 SLAT translation failed but we try to pretend it
2704 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2705 AssertMsgFailed(("Failed to translate CR3 %#RX64. rc=%Rrc\n", cr3, rc2));
2706 rc2 = pgmGstUnmapCr3(pVCpu);
2707 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2708 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2709 return rc2;
2710 }
2711 }
2712#endif
2713 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
2714 if (GCPhysOldCR3 != GCPhysCR3)
2715 {
2716 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2717 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2718 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2719 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2720 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2721 }
2722
2723 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2724 if ( rc == VINF_PGM_SYNC_CR3
2725 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2726 {
2727 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2728#ifdef IN_RING3
2729 rc = pgmPoolSyncCR3(pVCpu);
2730#else
2731 if (rc == VINF_PGM_SYNC_CR3)
2732 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2733 return VINF_PGM_SYNC_CR3;
2734#endif
2735 }
2736 AssertRCReturn(rc, rc);
2737 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2738 }
2739
2740 /*
2741 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2742 */
2743 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2744
2745 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2746 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2747 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2748 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2749
2750 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2751 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2752 if (rc == VINF_SUCCESS)
2753 {
2754 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2755 {
2756 /* Go back to ring 3 if a pgm pool sync is again pending. */
2757 return VINF_PGM_SYNC_CR3;
2758 }
2759
2760 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2761 {
2762 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2763 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2764 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2765 }
2766 }
2767
2768 /*
2769 * Now flush the CR3 (guest context).
2770 */
2771 if (rc == VINF_SUCCESS)
2772 PGM_INVL_VCPU_TLBS(pVCpu);
2773 return rc;
2774}
2775
2776
2777/**
2778 * Maps all the PAE PDPE entries.
2779 *
2780 * @returns VBox status code.
2781 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2782 * @param paPaePdpes The new PAE PDPE values.
2783 *
2784 * @remarks This function may be invoked during the process of changing the guest
2785 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
2786 * reflect PAE paging just yet.
2787 */
2788VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2789{
2790 Assert(paPaePdpes);
2791 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2792 {
2793 X86PDPE const PaePdpe = paPaePdpes[i];
2794
2795 /*
2796 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
2797 * are deferred.[1] Also, different situations require different handling of invalid
2798 * PDPE entries. Here we assume the caller has already validated or doesn't require
2799 * validation of the PDPEs.
2800 *
2801 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
2802 */
2803 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
2804 {
2805 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2806 RTHCPTR HCPtr;
2807 RTGCPHYS const GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
2808
2809 PGM_LOCK_VOID(pVM);
2810 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2811 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
2812 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
2813 PGM_UNLOCK(pVM);
2814 if (RT_SUCCESS(rc))
2815 {
2816#ifdef IN_RING3
2817 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
2818 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
2819#else
2820 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
2821 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
2822#endif
2823 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
2824 continue;
2825 }
2826 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
2827 }
2828 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2829 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2830 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2831 }
2832
2833 return VINF_SUCCESS;
2834}
2835
2836
2837/**
2838 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
2839 *
2840 * @returns VBox status code.
2841 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2842 * @param cr3 The guest CR3 value.
2843 *
2844 * @remarks This function may be invoked during the process of changing the guest
2845 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
2846 * PAE paging just yet.
2847 */
2848VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
2849{
2850 /*
2851 * Read the page-directory-pointer table (PDPT) at CR3.
2852 */
2853 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
2854 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2855
2856#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2857 if (CPUMIsGuestVmxEptPaePagingEnabled(pVCpu))
2858 {
2859 RTGCPHYS GCPhysOut;
2860 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2861 if (RT_SUCCESS(rc))
2862 GCPhysCR3 = GCPhysOut;
2863 else
2864 {
2865 AssertMsgFailed(("Failed to load CR3 at %#RX64. rc=%Rrc\n", GCPhysCR3, rc));
2866 return rc;
2867 }
2868 }
2869#endif
2870
2871 RTHCPTR HCPtrGuestCr3;
2872 int rc = pgmGstMapCr3(pVCpu, GCPhysCR3, &HCPtrGuestCr3);
2873 if (RT_SUCCESS(rc))
2874 {
2875 /*
2876 * Validate the page-directory-pointer table entries (PDPE).
2877 */
2878 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
2879 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
2880 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
2881 {
2882 /*
2883 * Map the PDPT.
2884 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
2885 * that PGMFlushTLB will be called soon and only a change to CR3 then
2886 * will cause the shadow page tables to be updated.
2887 */
2888#ifdef IN_RING3
2889 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
2890 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2891#else
2892 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2893 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
2894#endif
2895
2896 /*
2897 * Update CPUM.
2898 * We do this prior to mapping the PDPEs to keep the order consistent
2899 * with what's used in HM. In practice, it doesn't really matter.
2900 */
2901 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
2902
2903 /*
2904 * Map the PDPEs.
2905 */
2906 rc = PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
2907 if (RT_SUCCESS(rc))
2908 {
2909#ifdef IN_RING3
2910 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = true;
2911 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = false;
2912#else
2913 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = false;
2914 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = true;
2915#endif
2916 }
2917 }
2918 else
2919 rc = VERR_PGM_PAE_PDPE_RSVD;
2920 }
2921 return rc;
2922}
2923
2924
2925/**
2926 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2927 *
2928 * @returns VBox status code, with the following informational code for
2929 * VM scheduling.
2930 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2931 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2932 *
2933 * @param pVCpu The cross context virtual CPU structure.
2934 * @param cr0 The new cr0.
2935 * @param cr4 The new cr4.
2936 * @param efer The new extended feature enable register.
2937 * @param fForce Whether to force a mode change.
2938 */
2939VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce)
2940{
2941 VMCPU_ASSERT_EMT(pVCpu);
2942
2943 /*
2944 * Calc the new guest mode.
2945 *
2946 * Note! We check PG before PE and without requiring PE because of the
2947 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2948 */
2949 PGMMODE enmGuestMode;
2950 if (cr0 & X86_CR0_PG)
2951 {
2952 if (!(cr4 & X86_CR4_PAE))
2953 {
2954 bool const fPse = !!(cr4 & X86_CR4_PSE);
2955 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2956 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2957 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2958 enmGuestMode = PGMMODE_32_BIT;
2959 }
2960 else if (!(efer & MSR_K6_EFER_LME))
2961 {
2962 if (!(efer & MSR_K6_EFER_NXE))
2963 enmGuestMode = PGMMODE_PAE;
2964 else
2965 enmGuestMode = PGMMODE_PAE_NX;
2966 }
2967 else
2968 {
2969 if (!(efer & MSR_K6_EFER_NXE))
2970 enmGuestMode = PGMMODE_AMD64;
2971 else
2972 enmGuestMode = PGMMODE_AMD64_NX;
2973 }
2974 }
2975 else if (!(cr0 & X86_CR0_PE))
2976 enmGuestMode = PGMMODE_REAL;
2977 else
2978 enmGuestMode = PGMMODE_PROTECTED;
2979
2980 /*
2981 * Did it change?
2982 */
2983 if ( !fForce
2984 && pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2985 return VINF_SUCCESS;
2986
2987 /* Flush the TLB */
2988 PGM_INVL_VCPU_TLBS(pVCpu);
2989 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2990}
2991
2992
2993/**
2994 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
2995 *
2996 * @returns PGM_TYPE_*.
2997 * @param pgmMode The mode value to convert.
2998 */
2999DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
3000{
3001 switch (pgmMode)
3002 {
3003 case PGMMODE_REAL: return PGM_TYPE_REAL;
3004 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
3005 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
3006 case PGMMODE_PAE:
3007 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
3008 case PGMMODE_AMD64:
3009 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
3010 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
3011 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
3012 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
3013 case PGMMODE_EPT: return PGM_TYPE_EPT;
3014 case PGMMODE_NONE: return PGM_TYPE_NONE;
3015 default:
3016 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
3017 }
3018}
3019
3020
3021/**
3022 * Calculates the shadow paging mode.
3023 *
3024 * @returns The shadow paging mode.
3025 * @param pVM The cross context VM structure.
3026 * @param enmGuestMode The guest mode.
3027 * @param enmHostMode The host mode.
3028 * @param enmShadowMode The current shadow mode.
3029 */
3030static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
3031{
3032 switch (enmGuestMode)
3033 {
3034 /*
3035 * When switching to real or protected mode we don't change
3036 * anything since it's likely that we'll switch back pretty soon.
3037 *
3038 * During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
3039 * and is supposed to determine which shadow paging and switcher to
3040 * use during init.
3041 */
3042 case PGMMODE_REAL:
3043 case PGMMODE_PROTECTED:
3044 if ( enmShadowMode != PGMMODE_INVALID
3045 && VM_IS_RAW_MODE_ENABLED(pVM) /* always switch in hm and nem modes! */)
3046 break; /* (no change) */
3047
3048 switch (enmHostMode)
3049 {
3050 case SUPPAGINGMODE_32_BIT:
3051 case SUPPAGINGMODE_32_BIT_GLOBAL:
3052 enmShadowMode = PGMMODE_32_BIT;
3053 break;
3054
3055 case SUPPAGINGMODE_PAE:
3056 case SUPPAGINGMODE_PAE_NX:
3057 case SUPPAGINGMODE_PAE_GLOBAL:
3058 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3059 enmShadowMode = PGMMODE_PAE;
3060 break;
3061
3062 case SUPPAGINGMODE_AMD64:
3063 case SUPPAGINGMODE_AMD64_GLOBAL:
3064 case SUPPAGINGMODE_AMD64_NX:
3065 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3066 enmShadowMode = PGMMODE_PAE;
3067 break;
3068
3069 default:
3070 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3071 }
3072 break;
3073
3074 case PGMMODE_32_BIT:
3075 switch (enmHostMode)
3076 {
3077 case SUPPAGINGMODE_32_BIT:
3078 case SUPPAGINGMODE_32_BIT_GLOBAL:
3079 enmShadowMode = PGMMODE_32_BIT;
3080 break;
3081
3082 case SUPPAGINGMODE_PAE:
3083 case SUPPAGINGMODE_PAE_NX:
3084 case SUPPAGINGMODE_PAE_GLOBAL:
3085 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3086 enmShadowMode = PGMMODE_PAE;
3087 break;
3088
3089 case SUPPAGINGMODE_AMD64:
3090 case SUPPAGINGMODE_AMD64_GLOBAL:
3091 case SUPPAGINGMODE_AMD64_NX:
3092 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3093 enmShadowMode = PGMMODE_PAE;
3094 break;
3095
3096 default:
3097 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3098 }
3099 break;
3100
3101 case PGMMODE_PAE:
3102 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3103 switch (enmHostMode)
3104 {
3105 case SUPPAGINGMODE_32_BIT:
3106 case SUPPAGINGMODE_32_BIT_GLOBAL:
3107 enmShadowMode = PGMMODE_PAE;
3108 break;
3109
3110 case SUPPAGINGMODE_PAE:
3111 case SUPPAGINGMODE_PAE_NX:
3112 case SUPPAGINGMODE_PAE_GLOBAL:
3113 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3114 enmShadowMode = PGMMODE_PAE;
3115 break;
3116
3117 case SUPPAGINGMODE_AMD64:
3118 case SUPPAGINGMODE_AMD64_GLOBAL:
3119 case SUPPAGINGMODE_AMD64_NX:
3120 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3121 enmShadowMode = PGMMODE_PAE;
3122 break;
3123
3124 default:
3125 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3126 }
3127 break;
3128
3129 case PGMMODE_AMD64:
3130 case PGMMODE_AMD64_NX:
3131 switch (enmHostMode)
3132 {
3133 case SUPPAGINGMODE_32_BIT:
3134 case SUPPAGINGMODE_32_BIT_GLOBAL:
3135 enmShadowMode = PGMMODE_AMD64;
3136 break;
3137
3138 case SUPPAGINGMODE_PAE:
3139 case SUPPAGINGMODE_PAE_NX:
3140 case SUPPAGINGMODE_PAE_GLOBAL:
3141 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3142 enmShadowMode = PGMMODE_AMD64;
3143 break;
3144
3145 case SUPPAGINGMODE_AMD64:
3146 case SUPPAGINGMODE_AMD64_GLOBAL:
3147 case SUPPAGINGMODE_AMD64_NX:
3148 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3149 enmShadowMode = PGMMODE_AMD64;
3150 break;
3151
3152 default:
3153 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3154 }
3155 break;
3156
3157 default:
3158 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3159 }
3160
3161 /*
3162 * Override the shadow mode when NEM or nested paging is active.
3163 */
3164 if (VM_IS_NEM_ENABLED(pVM))
3165 {
3166 pVM->pgm.s.fNestedPaging = true;
3167 enmShadowMode = PGMMODE_NONE;
3168 }
3169 else
3170 {
3171 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3172 pVM->pgm.s.fNestedPaging = fNestedPaging;
3173 if (fNestedPaging)
3174 {
3175 if (HMIsVmxActive(pVM))
3176 enmShadowMode = PGMMODE_EPT;
3177 else
3178 {
3179 /* The nested SVM paging depends on the host one. */
3180 Assert(HMIsSvmActive(pVM));
3181 if ( enmGuestMode == PGMMODE_AMD64
3182 || enmGuestMode == PGMMODE_AMD64_NX)
3183 enmShadowMode = PGMMODE_NESTED_AMD64;
3184 else
3185 switch (pVM->pgm.s.enmHostMode)
3186 {
3187 case SUPPAGINGMODE_32_BIT:
3188 case SUPPAGINGMODE_32_BIT_GLOBAL:
3189 enmShadowMode = PGMMODE_NESTED_32BIT;
3190 break;
3191
3192 case SUPPAGINGMODE_PAE:
3193 case SUPPAGINGMODE_PAE_GLOBAL:
3194 case SUPPAGINGMODE_PAE_NX:
3195 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3196 enmShadowMode = PGMMODE_NESTED_PAE;
3197 break;
3198
3199 case SUPPAGINGMODE_AMD64:
3200 case SUPPAGINGMODE_AMD64_GLOBAL:
3201 case SUPPAGINGMODE_AMD64_NX:
3202 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3203 enmShadowMode = PGMMODE_NESTED_AMD64;
3204 break;
3205
3206 default:
3207 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3208 }
3209 }
3210 }
3211#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3212 else
3213 {
3214 /* Nested paging is a requirement for nested VT-x. */
3215 AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3216 }
3217#endif
3218 }
3219
3220 return enmShadowMode;
3221}
3222
3223
3224/**
3225 * Performs the actual mode change.
3226 * This is called by PGMChangeMode and pgmR3InitPaging().
3227 *
3228 * @returns VBox status code. May suspend or power off the VM on error, but this
3229 * will trigger using FFs and not informational status codes.
3230 *
3231 * @param pVM The cross context VM structure.
3232 * @param pVCpu The cross context virtual CPU structure.
3233 * @param enmGuestMode The new guest mode. This is assumed to be different from
3234 * the current mode.
3235 */
3236VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode)
3237{
3238 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3239 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3240
3241 /*
3242 * Calc the shadow mode and switcher.
3243 */
3244 PGMMODE enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3245
3246 /*
3247 * Exit old mode(s).
3248 */
3249 /* shadow */
3250 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3251 {
3252 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3253 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3254 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3255 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3256 {
3257 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3258 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3259 }
3260 }
3261 else
3262 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3263
3264 /* guest */
3265 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3266 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3267 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3268 {
3269 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3270 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3271 }
3272 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3273 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
3274 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
3275
3276 /*
3277 * Change the paging mode data indexes.
3278 */
3279 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3280 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3281 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3282 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3283 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3284 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3285 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3286#ifdef IN_RING3
3287 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3288#endif
3289
3290 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3291 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3292 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3293 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3294 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3295 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3296 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3297#ifdef IN_RING3
3298 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3299#endif
3300
3301 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3302 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3303 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3304 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3305 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3306 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3307 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3308 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3309 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3310 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3311#ifdef VBOX_STRICT
3312 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3313#endif
3314
3315 /*
3316 * Enter new shadow mode (if changed).
3317 */
3318 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3319 {
3320 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3321 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu, enmGuestMode >= PGMMODE_AMD64);
3322 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3323 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3324 }
3325
3326 /*
3327 * Always flag the necessary updates
3328 */
3329 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3330
3331 /*
3332 * Enter the new guest and shadow+guest modes.
3333 */
3334 /* Calc the new CR3 value. */
3335 RTGCPHYS GCPhysCR3;
3336 switch (enmGuestMode)
3337 {
3338 case PGMMODE_REAL:
3339 case PGMMODE_PROTECTED:
3340 GCPhysCR3 = NIL_RTGCPHYS;
3341 break;
3342
3343 case PGMMODE_32_BIT:
3344 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3345 break;
3346
3347 case PGMMODE_PAE_NX:
3348 case PGMMODE_PAE:
3349 if (!pVM->cpum.ro.GuestFeatures.fPae)
3350#ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3351 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3352 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3353#else
3354 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3355
3356#endif
3357 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3358 break;
3359
3360#ifdef VBOX_WITH_64_BITS_GUESTS
3361 case PGMMODE_AMD64_NX:
3362 case PGMMODE_AMD64:
3363 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3364 break;
3365#endif
3366 default:
3367 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3368 }
3369
3370#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3371 /*
3372 * If a nested-guest is using EPT paging:
3373 * - Update the second-level address translation (SLAT) mode.
3374 * - Indicate that the CR3 is nested-guest physical address.
3375 */
3376 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
3377 {
3378 if (PGMMODE_WITH_PAGING(enmGuestMode))
3379 {
3380 /*
3381 * Translate CR3 to its guest-physical address.
3382 * We don't use pgmGstSlatTranslateCr3() here as we want to update GCPhysNstGstCR3 -after-
3383 * switching modes to keep it consistent with how GCPhysCR3 is updated.
3384 */
3385 PGMPTWALK Walk;
3386 PGMPTWALKGST GstWalk;
3387 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysCR3, &Walk, &GstWalk);
3388 if (RT_SUCCESS(rc))
3389 { /* likely */ }
3390 else
3391 {
3392 /*
3393 * SLAT failed but we avoid reporting this to the caller because the caller
3394 * is not supposed to fail. The only time the caller needs to indicate a
3395 * failure to software is when PAE paging is used by the nested-guest, but
3396 * we handle the PAE case separately (e.g., see VMX transition in IEM).
3397 * In all other cases, the failure will be indicated when CR3 tries to be
3398 * translated on the next linear-address memory access.
3399 * See Intel spec. 27.2.1 "EPT Overview".
3400 */
3401 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));
3402
3403 /* Trying to coax PGM to succeed for the time being... */
3404 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
3405 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3406 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
3407 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3408 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3409 return VINF_SUCCESS;
3410 }
3411 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3412 GCPhysCR3 = Walk.GCPhys;
3413 }
3414 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
3415 }
3416 else
3417 {
3418 Assert(pVCpu->pgm.s.GCPhysNstGstCR3 == NIL_RTGCPHYS);
3419 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;
3420 }
3421#endif
3422
3423 /*
3424 * Enter the new guest mode.
3425 */
3426 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3427 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3428 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3429
3430 /* Set the new guest CR3 (and nested-guest CR3). */
3431 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3432
3433 /* status codes. */
3434 AssertRC(rc);
3435 AssertRC(rc2);
3436 if (RT_SUCCESS(rc))
3437 {
3438 rc = rc2;
3439 if (RT_SUCCESS(rc)) /* no informational status codes. */
3440 rc = VINF_SUCCESS;
3441 }
3442
3443 /*
3444 * Notify HM.
3445 */
3446 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3447 return rc;
3448}
3449
3450
3451/**
3452 * Called by CPUM or REM when CR0.WP changes to 1.
3453 *
3454 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3455 * @thread EMT
3456 */
3457VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3458{
3459 /*
3460 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3461 *
3462 * Use the counter to judge whether there might be pool pages with active
3463 * hacks in them. If there are, we will be running the risk of messing up
3464 * the guest by allowing it to write to read-only pages. Thus, we have to
3465 * clear the page pool ASAP if there is the slightest chance.
3466 */
3467 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3468 {
3469 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3470
3471 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3472 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3473 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3474 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3475 }
3476}
3477
3478
3479/**
3480 * Gets the current guest paging mode.
3481 *
3482 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3483 *
3484 * @returns The current paging mode.
3485 * @param pVCpu The cross context virtual CPU structure.
3486 */
3487VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3488{
3489 return pVCpu->pgm.s.enmGuestMode;
3490}
3491
3492
3493/**
3494 * Gets the current shadow paging mode.
3495 *
3496 * @returns The current paging mode.
3497 * @param pVCpu The cross context virtual CPU structure.
3498 */
3499VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3500{
3501 return pVCpu->pgm.s.enmShadowMode;
3502}
3503
3504
3505/**
3506 * Gets the current host paging mode.
3507 *
3508 * @returns The current paging mode.
3509 * @param pVM The cross context VM structure.
3510 */
3511VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3512{
3513 switch (pVM->pgm.s.enmHostMode)
3514 {
3515 case SUPPAGINGMODE_32_BIT:
3516 case SUPPAGINGMODE_32_BIT_GLOBAL:
3517 return PGMMODE_32_BIT;
3518
3519 case SUPPAGINGMODE_PAE:
3520 case SUPPAGINGMODE_PAE_GLOBAL:
3521 return PGMMODE_PAE;
3522
3523 case SUPPAGINGMODE_PAE_NX:
3524 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3525 return PGMMODE_PAE_NX;
3526
3527 case SUPPAGINGMODE_AMD64:
3528 case SUPPAGINGMODE_AMD64_GLOBAL:
3529 return PGMMODE_AMD64;
3530
3531 case SUPPAGINGMODE_AMD64_NX:
3532 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3533 return PGMMODE_AMD64_NX;
3534
3535 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3536 }
3537
3538 return PGMMODE_INVALID;
3539}
3540
3541
3542/**
3543 * Get mode name.
3544 *
3545 * @returns read-only name string.
3546 * @param enmMode The mode which name is desired.
3547 */
3548VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3549{
3550 switch (enmMode)
3551 {
3552 case PGMMODE_REAL: return "Real";
3553 case PGMMODE_PROTECTED: return "Protected";
3554 case PGMMODE_32_BIT: return "32-bit";
3555 case PGMMODE_PAE: return "PAE";
3556 case PGMMODE_PAE_NX: return "PAE+NX";
3557 case PGMMODE_AMD64: return "AMD64";
3558 case PGMMODE_AMD64_NX: return "AMD64+NX";
3559 case PGMMODE_NESTED_32BIT: return "Nested-32";
3560 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3561 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3562 case PGMMODE_EPT: return "EPT";
3563 case PGMMODE_NONE: return "None";
3564 default: return "unknown mode value";
3565 }
3566}
3567
3568
3569#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3570/**
3571 * Gets the SLAT mode name.
3572 *
3573 * @returns The read-only SLAT mode descriptive string.
3574 * @param enmSlatMode The SLAT mode value.
3575 */
3576VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
3577{
3578 switch (enmSlatMode)
3579 {
3580 case PGMSLAT_DIRECT: return "Direct";
3581 case PGMSLAT_EPT: return "EPT";
3582 case PGMSLAT_32BIT: return "32-bit";
3583 case PGMSLAT_PAE: return "PAE";
3584 case PGMSLAT_AMD64: return "AMD64";
3585 default: return "Unknown";
3586 }
3587}
3588#endif
3589
3590
3591/**
3592 * Gets the physical address represented in the guest CR3 as PGM sees it.
3593 *
3594 * This is mainly for logging and debugging.
3595 *
3596 * @returns PGM's guest CR3 value.
3597 * @param pVCpu The cross context virtual CPU structure.
3598 */
3599VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3600{
3601 return pVCpu->pgm.s.GCPhysCR3;
3602}
3603
3604
3605
3606/**
3607 * Notification from CPUM that the EFER.NXE bit has changed.
3608 *
3609 * @param pVCpu The cross context virtual CPU structure of the CPU for
3610 * which EFER changed.
3611 * @param fNxe The new NXE state.
3612 */
3613VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3614{
3615/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3616 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3617
3618 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3619 if (fNxe)
3620 {
3621 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3622 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3623 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3624 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3625 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3626 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3627 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3628 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3629 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3630 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3631 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3632
3633 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3634 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3635 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3636 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3637 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3638 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3639 }
3640 else
3641 {
3642 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3643 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3644 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3645 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3646 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3647 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3648 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3649 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3650 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3651 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3652 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3653
3654 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3655 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3656 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3657 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3658 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3659 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3660 }
3661}
3662
3663
3664/**
3665 * Check if any pgm pool pages are marked dirty (not monitored)
3666 *
3667 * @returns bool locked/not locked
3668 * @param pVM The cross context VM structure.
3669 */
3670VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3671{
3672 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3673}
3674
3675
3676/**
3677 * Check if this VCPU currently owns the PGM lock.
3678 *
3679 * @returns bool owner/not owner
3680 * @param pVM The cross context VM structure.
3681 */
3682VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
3683{
3684 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
3685}
3686
3687
3688/**
3689 * Enable or disable large page usage
3690 *
3691 * @returns VBox status code.
3692 * @param pVM The cross context VM structure.
3693 * @param fUseLargePages Use/not use large pages
3694 */
3695VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3696{
3697 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3698
3699 pVM->pgm.s.fUseLargePages = fUseLargePages;
3700 return VINF_SUCCESS;
3701}
3702
3703
3704/**
3705 * Acquire the PGM lock.
3706 *
3707 * @returns VBox status code
3708 * @param pVM The cross context VM structure.
3709 * @param fVoid Set if the caller cannot handle failure returns.
3710 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3711 */
3712#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
3713int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
3714#else
3715int pgmLock(PVMCC pVM, bool fVoid)
3716#endif
3717{
3718#if defined(VBOX_STRICT)
3719 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3720#else
3721 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
3722#endif
3723 if (RT_SUCCESS(rc))
3724 return rc;
3725 if (fVoid)
3726 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
3727 else
3728 AssertRC(rc);
3729 return rc;
3730}
3731
3732
3733/**
3734 * Release the PGM lock.
3735 *
3736 * @returns VBox status code
3737 * @param pVM The cross context VM structure.
3738 */
3739void pgmUnlock(PVMCC pVM)
3740{
3741 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3742 pVM->pgm.s.cDeprecatedPageLocks = 0;
3743 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
3744 if (rc == VINF_SEM_NESTED)
3745 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3746}
3747
3748
3749#if !defined(IN_R0) || defined(LOG_ENABLED)
3750
3751/** Format handler for PGMPAGE.
3752 * @copydoc FNRTSTRFORMATTYPE */
3753static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3754 const char *pszType, void const *pvValue,
3755 int cchWidth, int cchPrecision, unsigned fFlags,
3756 void *pvUser)
3757{
3758 size_t cch;
3759 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3760 if (RT_VALID_PTR(pPage))
3761 {
3762 char szTmp[64+80];
3763
3764 cch = 0;
3765
3766 /* The single char state stuff. */
3767 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3768 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3769
3770# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3771 if (IS_PART_INCLUDED(5))
3772 {
3773 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
3774 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
3775 }
3776
3777 /* The type. */
3778 if (IS_PART_INCLUDED(4))
3779 {
3780 szTmp[cch++] = ':';
3781 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3782 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3783 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3784 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3785 }
3786
3787 /* The numbers. */
3788 if (IS_PART_INCLUDED(3))
3789 {
3790 szTmp[cch++] = ':';
3791 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3792 }
3793
3794 if (IS_PART_INCLUDED(2))
3795 {
3796 szTmp[cch++] = ':';
3797 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3798 }
3799
3800 if (IS_PART_INCLUDED(6))
3801 {
3802 szTmp[cch++] = ':';
3803 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3804 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3805 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3806 }
3807# undef IS_PART_INCLUDED
3808
3809 cch = pfnOutput(pvArgOutput, szTmp, cch);
3810 }
3811 else
3812 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3813 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3814 return cch;
3815}
3816
3817
3818/** Format handler for PGMRAMRANGE.
3819 * @copydoc FNRTSTRFORMATTYPE */
3820static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3821 const char *pszType, void const *pvValue,
3822 int cchWidth, int cchPrecision, unsigned fFlags,
3823 void *pvUser)
3824{
3825 size_t cch;
3826 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3827 if (RT_VALID_PTR(pRam))
3828 {
3829 char szTmp[80];
3830 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3831 cch = pfnOutput(pvArgOutput, szTmp, cch);
3832 }
3833 else
3834 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3835 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3836 return cch;
3837}
3838
3839/** Format type andlers to be registered/deregistered. */
3840static const struct
3841{
3842 char szType[24];
3843 PFNRTSTRFORMATTYPE pfnHandler;
3844} g_aPgmFormatTypes[] =
3845{
3846 { "pgmpage", pgmFormatTypeHandlerPage },
3847 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3848};
3849
3850#endif /* !IN_R0 || LOG_ENABLED */
3851
3852/**
3853 * Registers the global string format types.
3854 *
3855 * This should be called at module load time or in some other manner that ensure
3856 * that it's called exactly one time.
3857 *
3858 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3859 */
3860VMMDECL(int) PGMRegisterStringFormatTypes(void)
3861{
3862#if !defined(IN_R0) || defined(LOG_ENABLED)
3863 int rc = VINF_SUCCESS;
3864 unsigned i;
3865 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3866 {
3867 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3868# ifdef IN_RING0
3869 if (rc == VERR_ALREADY_EXISTS)
3870 {
3871 /* in case of cleanup failure in ring-0 */
3872 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3873 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3874 }
3875# endif
3876 }
3877 if (RT_FAILURE(rc))
3878 while (i-- > 0)
3879 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3880
3881 return rc;
3882#else
3883 return VINF_SUCCESS;
3884#endif
3885}
3886
3887
3888/**
3889 * Deregisters the global string format types.
3890 *
3891 * This should be called at module unload time or in some other manner that
3892 * ensure that it's called exactly one time.
3893 */
3894VMMDECL(void) PGMDeregisterStringFormatTypes(void)
3895{
3896#if !defined(IN_R0) || defined(LOG_ENABLED)
3897 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3898 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3899#endif
3900}
3901
3902
3903#ifdef VBOX_STRICT
3904/**
3905 * Asserts that everything related to the guest CR3 is correctly shadowed.
3906 *
3907 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3908 * and assert the correctness of the guest CR3 mapping before asserting that the
3909 * shadow page tables is in sync with the guest page tables.
3910 *
3911 * @returns Number of conflicts.
3912 * @param pVM The cross context VM structure.
3913 * @param pVCpu The cross context virtual CPU structure.
3914 * @param cr3 The current guest CR3 register value.
3915 * @param cr4 The current guest CR4 register value.
3916 */
3917VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
3918{
3919 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3920
3921 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
3922 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
3923 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
3924
3925 PGM_LOCK_VOID(pVM);
3926 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3927 PGM_UNLOCK(pVM);
3928
3929 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3930 return cErrors;
3931}
3932#endif /* VBOX_STRICT */
3933
3934
3935/**
3936 * Updates PGM's copy of the guest's EPT pointer.
3937 *
3938 * @param pVCpu The cross context virtual CPU structure.
3939 * @param uEptPtr The EPT pointer.
3940 *
3941 * @remarks This can be called as part of VM-entry so we might be in the midst of
3942 * switching to VMX non-root mode.
3943 */
3944VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
3945{
3946 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3947 PGM_LOCK_VOID(pVM);
3948 pVCpu->pgm.s.uEptPtr = uEptPtr;
3949 PGM_UNLOCK(pVM);
3950}
3951
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette