VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 93554

Last change on this file since 93554 was 93554, checked in by vboxsync, 3 years ago

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 141.6 KB
Line 
1/* $Id: PGMAll.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/sup.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/stam.h>
32#include <VBox/vmm/trpm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hm.h>
35#include <VBox/vmm/hm_vmx.h>
36#include "PGMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include "PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*********************************************************************************************************************************
48* Internal Functions *
49*********************************************************************************************************************************/
50DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
51DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
52DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3);
53#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
54static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk,
55 PPGMPTWALKGST pGstWalk);
56static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
57static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3);
58#endif
59static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
60static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
61
62
63#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
64/* Guest - EPT SLAT is identical for all guest paging mode. */
65# define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT
66# define PGM_GST_TYPE PGM_TYPE_EPT
67# include "PGMGstDefs.h"
68# include "PGMAllGstSlatEpt.cpp.h"
69# undef PGM_GST_TYPE
70#endif
71
72
73/*
74 * Shadow - 32-bit mode
75 */
76#define PGM_SHW_TYPE PGM_TYPE_32BIT
77#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
78#include "PGMAllShw.h"
79
80/* Guest - real mode */
81#define PGM_GST_TYPE PGM_TYPE_REAL
82#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
83#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
84#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
85#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
86#include "PGMGstDefs.h"
87#include "PGMAllGst.h"
88#include "PGMAllBth.h"
89#undef BTH_PGMPOOLKIND_PT_FOR_PT
90#undef BTH_PGMPOOLKIND_ROOT
91#undef PGM_BTH_NAME
92#undef PGM_GST_TYPE
93#undef PGM_GST_NAME
94
95/* Guest - protected mode */
96#define PGM_GST_TYPE PGM_TYPE_PROT
97#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
98#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
99#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
100#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
101#include "PGMGstDefs.h"
102#include "PGMAllGst.h"
103#include "PGMAllBth.h"
104#undef BTH_PGMPOOLKIND_PT_FOR_PT
105#undef BTH_PGMPOOLKIND_ROOT
106#undef PGM_BTH_NAME
107#undef PGM_GST_TYPE
108#undef PGM_GST_NAME
109
110/* Guest - 32-bit mode */
111#define PGM_GST_TYPE PGM_TYPE_32BIT
112#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
113#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
114#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
115#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
116#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
117#include "PGMGstDefs.h"
118#include "PGMAllGst.h"
119#include "PGMAllBth.h"
120#undef BTH_PGMPOOLKIND_PT_FOR_BIG
121#undef BTH_PGMPOOLKIND_PT_FOR_PT
122#undef BTH_PGMPOOLKIND_ROOT
123#undef PGM_BTH_NAME
124#undef PGM_GST_TYPE
125#undef PGM_GST_NAME
126
127#undef PGM_SHW_TYPE
128#undef PGM_SHW_NAME
129
130
131/*
132 * Shadow - PAE mode
133 */
134#define PGM_SHW_TYPE PGM_TYPE_PAE
135#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
136#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
137#include "PGMAllShw.h"
138
139/* Guest - real mode */
140#define PGM_GST_TYPE PGM_TYPE_REAL
141#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
142#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
143#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
144#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
145#include "PGMGstDefs.h"
146#include "PGMAllBth.h"
147#undef BTH_PGMPOOLKIND_PT_FOR_PT
148#undef BTH_PGMPOOLKIND_ROOT
149#undef PGM_BTH_NAME
150#undef PGM_GST_TYPE
151#undef PGM_GST_NAME
152
153/* Guest - protected mode */
154#define PGM_GST_TYPE PGM_TYPE_PROT
155#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
156#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
157#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
158#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
159#include "PGMGstDefs.h"
160#include "PGMAllBth.h"
161#undef BTH_PGMPOOLKIND_PT_FOR_PT
162#undef BTH_PGMPOOLKIND_ROOT
163#undef PGM_BTH_NAME
164#undef PGM_GST_TYPE
165#undef PGM_GST_NAME
166
167/* Guest - 32-bit mode */
168#define PGM_GST_TYPE PGM_TYPE_32BIT
169#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
170#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
171#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
172#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
173#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
174#include "PGMGstDefs.h"
175#include "PGMAllBth.h"
176#undef BTH_PGMPOOLKIND_PT_FOR_BIG
177#undef BTH_PGMPOOLKIND_PT_FOR_PT
178#undef BTH_PGMPOOLKIND_ROOT
179#undef PGM_BTH_NAME
180#undef PGM_GST_TYPE
181#undef PGM_GST_NAME
182
183
184/* Guest - PAE mode */
185#define PGM_GST_TYPE PGM_TYPE_PAE
186#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
187#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
188#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
189#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
190#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
191#include "PGMGstDefs.h"
192#include "PGMAllGst.h"
193#include "PGMAllBth.h"
194#undef BTH_PGMPOOLKIND_PT_FOR_BIG
195#undef BTH_PGMPOOLKIND_PT_FOR_PT
196#undef BTH_PGMPOOLKIND_ROOT
197#undef PGM_BTH_NAME
198#undef PGM_GST_TYPE
199#undef PGM_GST_NAME
200
201#undef PGM_SHW_TYPE
202#undef PGM_SHW_NAME
203
204
205/*
206 * Shadow - AMD64 mode
207 */
208#define PGM_SHW_TYPE PGM_TYPE_AMD64
209#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
210#include "PGMAllShw.h"
211
212/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
213/** @todo retire this hack. */
214#define PGM_GST_TYPE PGM_TYPE_PROT
215#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
216#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
217#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
218#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
219#include "PGMGstDefs.h"
220#include "PGMAllBth.h"
221#undef BTH_PGMPOOLKIND_PT_FOR_PT
222#undef BTH_PGMPOOLKIND_ROOT
223#undef PGM_BTH_NAME
224#undef PGM_GST_TYPE
225#undef PGM_GST_NAME
226
227#ifdef VBOX_WITH_64_BITS_GUESTS
228/* Guest - AMD64 mode */
229# define PGM_GST_TYPE PGM_TYPE_AMD64
230# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
231# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
232# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
233# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
234# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
235# include "PGMGstDefs.h"
236# include "PGMAllGst.h"
237# include "PGMAllBth.h"
238# undef BTH_PGMPOOLKIND_PT_FOR_BIG
239# undef BTH_PGMPOOLKIND_PT_FOR_PT
240# undef BTH_PGMPOOLKIND_ROOT
241# undef PGM_BTH_NAME
242# undef PGM_GST_TYPE
243# undef PGM_GST_NAME
244#endif /* VBOX_WITH_64_BITS_GUESTS */
245
246#undef PGM_SHW_TYPE
247#undef PGM_SHW_NAME
248
249
250/*
251 * Shadow - 32-bit nested paging mode.
252 */
253#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
254#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
255#include "PGMAllShw.h"
256
257/* Guest - real mode */
258#define PGM_GST_TYPE PGM_TYPE_REAL
259#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
260#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
261#include "PGMGstDefs.h"
262#include "PGMAllBth.h"
263#undef PGM_BTH_NAME
264#undef PGM_GST_TYPE
265#undef PGM_GST_NAME
266
267/* Guest - protected mode */
268#define PGM_GST_TYPE PGM_TYPE_PROT
269#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
270#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
271#include "PGMGstDefs.h"
272#include "PGMAllBth.h"
273#undef PGM_BTH_NAME
274#undef PGM_GST_TYPE
275#undef PGM_GST_NAME
276
277/* Guest - 32-bit mode */
278#define PGM_GST_TYPE PGM_TYPE_32BIT
279#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
280#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
281#include "PGMGstDefs.h"
282#include "PGMAllBth.h"
283#undef PGM_BTH_NAME
284#undef PGM_GST_TYPE
285#undef PGM_GST_NAME
286
287/* Guest - PAE mode */
288#define PGM_GST_TYPE PGM_TYPE_PAE
289#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
290#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
291#include "PGMGstDefs.h"
292#include "PGMAllBth.h"
293#undef PGM_BTH_NAME
294#undef PGM_GST_TYPE
295#undef PGM_GST_NAME
296
297#ifdef VBOX_WITH_64_BITS_GUESTS
298/* Guest - AMD64 mode */
299# define PGM_GST_TYPE PGM_TYPE_AMD64
300# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
301# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
302# include "PGMGstDefs.h"
303# include "PGMAllBth.h"
304# undef PGM_BTH_NAME
305# undef PGM_GST_TYPE
306# undef PGM_GST_NAME
307#endif /* VBOX_WITH_64_BITS_GUESTS */
308
309#undef PGM_SHW_TYPE
310#undef PGM_SHW_NAME
311
312
313/*
314 * Shadow - PAE nested paging mode.
315 */
316#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
317#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
318#include "PGMAllShw.h"
319
320/* Guest - real mode */
321#define PGM_GST_TYPE PGM_TYPE_REAL
322#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
323#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
324#include "PGMGstDefs.h"
325#include "PGMAllBth.h"
326#undef PGM_BTH_NAME
327#undef PGM_GST_TYPE
328#undef PGM_GST_NAME
329
330/* Guest - protected mode */
331#define PGM_GST_TYPE PGM_TYPE_PROT
332#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
333#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
334#include "PGMGstDefs.h"
335#include "PGMAllBth.h"
336#undef PGM_BTH_NAME
337#undef PGM_GST_TYPE
338#undef PGM_GST_NAME
339
340/* Guest - 32-bit mode */
341#define PGM_GST_TYPE PGM_TYPE_32BIT
342#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
343#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
344#include "PGMGstDefs.h"
345#include "PGMAllBth.h"
346#undef PGM_BTH_NAME
347#undef PGM_GST_TYPE
348#undef PGM_GST_NAME
349
350/* Guest - PAE mode */
351#define PGM_GST_TYPE PGM_TYPE_PAE
352#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
353#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
354#include "PGMGstDefs.h"
355#include "PGMAllBth.h"
356#undef PGM_BTH_NAME
357#undef PGM_GST_TYPE
358#undef PGM_GST_NAME
359
360#ifdef VBOX_WITH_64_BITS_GUESTS
361/* Guest - AMD64 mode */
362# define PGM_GST_TYPE PGM_TYPE_AMD64
363# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
364# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
365# include "PGMGstDefs.h"
366# include "PGMAllBth.h"
367# undef PGM_BTH_NAME
368# undef PGM_GST_TYPE
369# undef PGM_GST_NAME
370#endif /* VBOX_WITH_64_BITS_GUESTS */
371
372#undef PGM_SHW_TYPE
373#undef PGM_SHW_NAME
374
375
376/*
377 * Shadow - AMD64 nested paging mode.
378 */
379#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
380#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
381#include "PGMAllShw.h"
382
383/* Guest - real mode */
384#define PGM_GST_TYPE PGM_TYPE_REAL
385#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
386#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
387#include "PGMGstDefs.h"
388#include "PGMAllBth.h"
389#undef PGM_BTH_NAME
390#undef PGM_GST_TYPE
391#undef PGM_GST_NAME
392
393/* Guest - protected mode */
394#define PGM_GST_TYPE PGM_TYPE_PROT
395#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
396#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
397#include "PGMGstDefs.h"
398#include "PGMAllBth.h"
399#undef PGM_BTH_NAME
400#undef PGM_GST_TYPE
401#undef PGM_GST_NAME
402
403/* Guest - 32-bit mode */
404#define PGM_GST_TYPE PGM_TYPE_32BIT
405#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
406#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
407#include "PGMGstDefs.h"
408#include "PGMAllBth.h"
409#undef PGM_BTH_NAME
410#undef PGM_GST_TYPE
411#undef PGM_GST_NAME
412
413/* Guest - PAE mode */
414#define PGM_GST_TYPE PGM_TYPE_PAE
415#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
416#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
417#include "PGMGstDefs.h"
418#include "PGMAllBth.h"
419#undef PGM_BTH_NAME
420#undef PGM_GST_TYPE
421#undef PGM_GST_NAME
422
423#ifdef VBOX_WITH_64_BITS_GUESTS
424/* Guest - AMD64 mode */
425# define PGM_GST_TYPE PGM_TYPE_AMD64
426# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
427# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
428# include "PGMGstDefs.h"
429# include "PGMAllBth.h"
430# undef PGM_BTH_NAME
431# undef PGM_GST_TYPE
432# undef PGM_GST_NAME
433#endif /* VBOX_WITH_64_BITS_GUESTS */
434
435#undef PGM_SHW_TYPE
436#undef PGM_SHW_NAME
437
438
439/*
440 * Shadow - EPT.
441 */
442#define PGM_SHW_TYPE PGM_TYPE_EPT
443#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
444#include "PGMAllShw.h"
445
446/* Guest - real mode */
447#define PGM_GST_TYPE PGM_TYPE_REAL
448#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
449#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
450#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
451#include "PGMGstDefs.h"
452#include "PGMAllBth.h"
453#undef BTH_PGMPOOLKIND_PT_FOR_PT
454#undef PGM_BTH_NAME
455#undef PGM_GST_TYPE
456#undef PGM_GST_NAME
457
458/* Guest - protected mode */
459#define PGM_GST_TYPE PGM_TYPE_PROT
460#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
461#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
462#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
463#include "PGMGstDefs.h"
464#include "PGMAllBth.h"
465#undef BTH_PGMPOOLKIND_PT_FOR_PT
466#undef PGM_BTH_NAME
467#undef PGM_GST_TYPE
468#undef PGM_GST_NAME
469
470/* Guest - 32-bit mode */
471#define PGM_GST_TYPE PGM_TYPE_32BIT
472#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
473#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
474#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
475#include "PGMGstDefs.h"
476#include "PGMAllBth.h"
477#undef BTH_PGMPOOLKIND_PT_FOR_PT
478#undef PGM_BTH_NAME
479#undef PGM_GST_TYPE
480#undef PGM_GST_NAME
481
482/* Guest - PAE mode */
483#define PGM_GST_TYPE PGM_TYPE_PAE
484#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
485#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
486#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
487#include "PGMGstDefs.h"
488#include "PGMAllBth.h"
489#undef BTH_PGMPOOLKIND_PT_FOR_PT
490#undef PGM_BTH_NAME
491#undef PGM_GST_TYPE
492#undef PGM_GST_NAME
493
494#ifdef VBOX_WITH_64_BITS_GUESTS
495/* Guest - AMD64 mode */
496# define PGM_GST_TYPE PGM_TYPE_AMD64
497# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
498# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
499# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
500# include "PGMGstDefs.h"
501# include "PGMAllBth.h"
502# undef BTH_PGMPOOLKIND_PT_FOR_PT
503# undef PGM_BTH_NAME
504# undef PGM_GST_TYPE
505# undef PGM_GST_NAME
506#endif /* VBOX_WITH_64_BITS_GUESTS */
507
508#undef PGM_SHW_TYPE
509#undef PGM_SHW_NAME
510
511
512/*
513 * Shadow - NEM / None.
514 */
515#define PGM_SHW_TYPE PGM_TYPE_NONE
516#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
517#include "PGMAllShw.h"
518
519/* Guest - real mode */
520#define PGM_GST_TYPE PGM_TYPE_REAL
521#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
522#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
523#include "PGMGstDefs.h"
524#include "PGMAllBth.h"
525#undef PGM_BTH_NAME
526#undef PGM_GST_TYPE
527#undef PGM_GST_NAME
528
529/* Guest - protected mode */
530#define PGM_GST_TYPE PGM_TYPE_PROT
531#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
532#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
533#include "PGMGstDefs.h"
534#include "PGMAllBth.h"
535#undef PGM_BTH_NAME
536#undef PGM_GST_TYPE
537#undef PGM_GST_NAME
538
539/* Guest - 32-bit mode */
540#define PGM_GST_TYPE PGM_TYPE_32BIT
541#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
542#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
543#include "PGMGstDefs.h"
544#include "PGMAllBth.h"
545#undef PGM_BTH_NAME
546#undef PGM_GST_TYPE
547#undef PGM_GST_NAME
548
549/* Guest - PAE mode */
550#define PGM_GST_TYPE PGM_TYPE_PAE
551#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
552#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
553#include "PGMGstDefs.h"
554#include "PGMAllBth.h"
555#undef PGM_BTH_NAME
556#undef PGM_GST_TYPE
557#undef PGM_GST_NAME
558
559#ifdef VBOX_WITH_64_BITS_GUESTS
560/* Guest - AMD64 mode */
561# define PGM_GST_TYPE PGM_TYPE_AMD64
562# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
563# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
564# include "PGMGstDefs.h"
565# include "PGMAllBth.h"
566# undef PGM_BTH_NAME
567# undef PGM_GST_TYPE
568# undef PGM_GST_NAME
569#endif /* VBOX_WITH_64_BITS_GUESTS */
570
571#undef PGM_SHW_TYPE
572#undef PGM_SHW_NAME
573
574
575
576/**
577 * Guest mode data array.
578 */
579PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
580{
581 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* 0 */
582 {
583 PGM_TYPE_REAL,
584 PGM_GST_NAME_REAL(GetPage),
585 PGM_GST_NAME_REAL(ModifyPage),
586 PGM_GST_NAME_REAL(Enter),
587 PGM_GST_NAME_REAL(Exit),
588#ifdef IN_RING3
589 PGM_GST_NAME_REAL(Relocate),
590#endif
591 },
592 {
593 PGM_TYPE_PROT,
594 PGM_GST_NAME_PROT(GetPage),
595 PGM_GST_NAME_PROT(ModifyPage),
596 PGM_GST_NAME_PROT(Enter),
597 PGM_GST_NAME_PROT(Exit),
598#ifdef IN_RING3
599 PGM_GST_NAME_PROT(Relocate),
600#endif
601 },
602 {
603 PGM_TYPE_32BIT,
604 PGM_GST_NAME_32BIT(GetPage),
605 PGM_GST_NAME_32BIT(ModifyPage),
606 PGM_GST_NAME_32BIT(Enter),
607 PGM_GST_NAME_32BIT(Exit),
608#ifdef IN_RING3
609 PGM_GST_NAME_32BIT(Relocate),
610#endif
611 },
612 {
613 PGM_TYPE_PAE,
614 PGM_GST_NAME_PAE(GetPage),
615 PGM_GST_NAME_PAE(ModifyPage),
616 PGM_GST_NAME_PAE(Enter),
617 PGM_GST_NAME_PAE(Exit),
618#ifdef IN_RING3
619 PGM_GST_NAME_PAE(Relocate),
620#endif
621 },
622#ifdef VBOX_WITH_64_BITS_GUESTS
623 {
624 PGM_TYPE_AMD64,
625 PGM_GST_NAME_AMD64(GetPage),
626 PGM_GST_NAME_AMD64(ModifyPage),
627 PGM_GST_NAME_AMD64(Enter),
628 PGM_GST_NAME_AMD64(Exit),
629# ifdef IN_RING3
630 PGM_GST_NAME_AMD64(Relocate),
631# endif
632 },
633#endif
634};
635
636
637/**
638 * The shadow mode data array.
639 */
640PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
641{
642 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
643 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
644 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
645 {
646 PGM_TYPE_32BIT,
647 PGM_SHW_NAME_32BIT(GetPage),
648 PGM_SHW_NAME_32BIT(ModifyPage),
649 PGM_SHW_NAME_32BIT(Enter),
650 PGM_SHW_NAME_32BIT(Exit),
651#ifdef IN_RING3
652 PGM_SHW_NAME_32BIT(Relocate),
653#endif
654 },
655 {
656 PGM_TYPE_PAE,
657 PGM_SHW_NAME_PAE(GetPage),
658 PGM_SHW_NAME_PAE(ModifyPage),
659 PGM_SHW_NAME_PAE(Enter),
660 PGM_SHW_NAME_PAE(Exit),
661#ifdef IN_RING3
662 PGM_SHW_NAME_PAE(Relocate),
663#endif
664 },
665 {
666 PGM_TYPE_AMD64,
667 PGM_SHW_NAME_AMD64(GetPage),
668 PGM_SHW_NAME_AMD64(ModifyPage),
669 PGM_SHW_NAME_AMD64(Enter),
670 PGM_SHW_NAME_AMD64(Exit),
671#ifdef IN_RING3
672 PGM_SHW_NAME_AMD64(Relocate),
673#endif
674 },
675 {
676 PGM_TYPE_NESTED_32BIT,
677 PGM_SHW_NAME_NESTED_32BIT(GetPage),
678 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
679 PGM_SHW_NAME_NESTED_32BIT(Enter),
680 PGM_SHW_NAME_NESTED_32BIT(Exit),
681#ifdef IN_RING3
682 PGM_SHW_NAME_NESTED_32BIT(Relocate),
683#endif
684 },
685 {
686 PGM_TYPE_NESTED_PAE,
687 PGM_SHW_NAME_NESTED_PAE(GetPage),
688 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
689 PGM_SHW_NAME_NESTED_PAE(Enter),
690 PGM_SHW_NAME_NESTED_PAE(Exit),
691#ifdef IN_RING3
692 PGM_SHW_NAME_NESTED_PAE(Relocate),
693#endif
694 },
695 {
696 PGM_TYPE_NESTED_AMD64,
697 PGM_SHW_NAME_NESTED_AMD64(GetPage),
698 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
699 PGM_SHW_NAME_NESTED_AMD64(Enter),
700 PGM_SHW_NAME_NESTED_AMD64(Exit),
701#ifdef IN_RING3
702 PGM_SHW_NAME_NESTED_AMD64(Relocate),
703#endif
704 },
705 {
706 PGM_TYPE_EPT,
707 PGM_SHW_NAME_EPT(GetPage),
708 PGM_SHW_NAME_EPT(ModifyPage),
709 PGM_SHW_NAME_EPT(Enter),
710 PGM_SHW_NAME_EPT(Exit),
711#ifdef IN_RING3
712 PGM_SHW_NAME_EPT(Relocate),
713#endif
714 },
715 {
716 PGM_TYPE_NONE,
717 PGM_SHW_NAME_NONE(GetPage),
718 PGM_SHW_NAME_NONE(ModifyPage),
719 PGM_SHW_NAME_NONE(Enter),
720 PGM_SHW_NAME_NONE(Exit),
721#ifdef IN_RING3
722 PGM_SHW_NAME_NONE(Relocate),
723#endif
724 },
725};
726
727
728/**
729 * The guest+shadow mode data array.
730 */
731PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
732{
733#if !defined(IN_RING3) && !defined(VBOX_STRICT)
734# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
735# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
736 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler) }
737
738#elif !defined(IN_RING3) && defined(VBOX_STRICT)
739# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
740# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
741 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(AssertCR3) }
742
743#elif defined(IN_RING3) && !defined(VBOX_STRICT)
744# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
745# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
746 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
747
748#elif defined(IN_RING3) && defined(VBOX_STRICT)
749# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
750# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
751 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
752
753#else
754# error "Misconfig."
755#endif
756
757 /* 32-bit shadow paging mode: */
758 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
759 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
760 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
761 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
762 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
763 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
764 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
765 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
766 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
767 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
768 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
769
770 /* PAE shadow paging mode: */
771 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
772 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
773 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
774 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
775 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
776 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
777 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
778 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
779 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
780 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
781 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
782
783 /* AMD64 shadow paging mode: */
784 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
785 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
786 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
787 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
788 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
789#ifdef VBOX_WITH_64_BITS_GUESTS
790 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
791#else
792 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
793#endif
794 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
795 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
796 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
797 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
798 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
799
800 /* 32-bit nested paging mode: */
801 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
802 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
803 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
804 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
805 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
806#ifdef VBOX_WITH_64_BITS_GUESTS
807 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
808#else
809 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
810#endif
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
812 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
814 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
815 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
816
817 /* PAE nested paging mode: */
818 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
819 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
820 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
821 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
822 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
823#ifdef VBOX_WITH_64_BITS_GUESTS
824 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
825#else
826 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
827#endif
828 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
829 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
831 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
832 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
833
834 /* AMD64 nested paging mode: */
835 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
836 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
837 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
838 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
839 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
840#ifdef VBOX_WITH_64_BITS_GUESTS
841 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
842#else
843 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
844#endif
845 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
846 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
848 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
849 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
850
851 /* EPT nested paging mode: */
852 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
853 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
854 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
855 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
856 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
857#ifdef VBOX_WITH_64_BITS_GUESTS
858 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
859#else
860 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
861#endif
862 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
863 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
865 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
866 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
867
868 /* NONE / NEM: */
869 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
870 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
871 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
872 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
873 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
874#ifdef VBOX_WITH_64_BITS_GUESTS
875 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
876#else
877 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
878#endif
879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
880 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
881 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
882 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
883 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
884
885
886#undef PGMMODEDATABTH_ENTRY
887#undef PGMMODEDATABTH_NULL_ENTRY
888};
889
890
891#ifdef IN_RING0
892/**
893 * #PF Handler.
894 *
895 * @returns VBox status code (appropriate for trap handling and GC return).
896 * @param pVCpu The cross context virtual CPU structure.
897 * @param uErr The trap error code.
898 * @param pRegFrame Trap register frame.
899 * @param pvFault The fault address.
900 */
901VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
902{
903 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
904
905 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
906 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
907 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
908
909
910# ifdef VBOX_WITH_STATISTICS
911 /*
912 * Error code stats.
913 */
914 if (uErr & X86_TRAP_PF_US)
915 {
916 if (!(uErr & X86_TRAP_PF_P))
917 {
918 if (uErr & X86_TRAP_PF_RW)
919 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
920 else
921 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
922 }
923 else if (uErr & X86_TRAP_PF_RW)
924 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
925 else if (uErr & X86_TRAP_PF_RSVD)
926 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
927 else if (uErr & X86_TRAP_PF_ID)
928 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
929 else
930 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
931 }
932 else
933 { /* Supervisor */
934 if (!(uErr & X86_TRAP_PF_P))
935 {
936 if (uErr & X86_TRAP_PF_RW)
937 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
938 else
939 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
940 }
941 else if (uErr & X86_TRAP_PF_RW)
942 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
943 else if (uErr & X86_TRAP_PF_ID)
944 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
945 else if (uErr & X86_TRAP_PF_RSVD)
946 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
947 }
948# endif /* VBOX_WITH_STATISTICS */
949
950 /*
951 * Call the worker.
952 */
953 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
954 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
955 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
956 bool fLockTaken = false;
957 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
958 if (fLockTaken)
959 {
960 PGM_LOCK_ASSERT_OWNER(pVM);
961 PGM_UNLOCK(pVM);
962 }
963 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
964
965 /*
966 * Return code tweaks.
967 */
968 if (rc != VINF_SUCCESS)
969 {
970 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
971 rc = VINF_SUCCESS;
972
973 /* Note: hack alert for difficult to reproduce problem. */
974 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
975 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
976 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
977 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
978 {
979 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
980 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
981 rc = VINF_SUCCESS;
982 }
983 }
984
985 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
986 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
987 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
988 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
989 return rc;
990}
991#endif /* IN_RING0 */
992
993
994/**
995 * Prefetch a page
996 *
997 * Typically used to sync commonly used pages before entering raw mode
998 * after a CR3 reload.
999 *
1000 * @returns VBox status code suitable for scheduling.
1001 * @retval VINF_SUCCESS on success.
1002 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
1003 * @param pVCpu The cross context virtual CPU structure.
1004 * @param GCPtrPage Page to invalidate.
1005 */
1006VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1007{
1008 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1009
1010 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1011 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1012 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1013 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1014
1015 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1016 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1017 return rc;
1018}
1019
1020
1021/**
1022 * Emulation of the invlpg instruction (HC only actually).
1023 *
1024 * @returns Strict VBox status code, special care required.
1025 * @retval VINF_PGM_SYNC_CR3 - handled.
1026 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1027 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1028 *
1029 * @param pVCpu The cross context virtual CPU structure.
1030 * @param GCPtrPage Page to invalidate.
1031 *
1032 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1033 * safe, but there could be edge cases!
1034 *
1035 * @todo Flush page or page directory only if necessary!
1036 * @todo VBOXSTRICTRC
1037 */
1038VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1039{
1040 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1041 int rc;
1042 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1043
1044 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1045
1046 /*
1047 * Call paging mode specific worker.
1048 */
1049 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1050 PGM_LOCK_VOID(pVM);
1051
1052 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1053 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1054 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1055 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1056
1057 PGM_UNLOCK(pVM);
1058 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1059
1060 /* Ignore all irrelevant error codes. */
1061 if ( rc == VERR_PAGE_NOT_PRESENT
1062 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1063 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1064 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1065 rc = VINF_SUCCESS;
1066
1067 return rc;
1068}
1069
1070
1071/**
1072 * Executes an instruction using the interpreter.
1073 *
1074 * @returns VBox status code (appropriate for trap handling and GC return).
1075 * @param pVM The cross context VM structure.
1076 * @param pVCpu The cross context virtual CPU structure.
1077 * @param pRegFrame Register frame.
1078 * @param pvFault Fault address.
1079 */
1080VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1081{
1082 NOREF(pVM);
1083 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1084 if (rc == VERR_EM_INTERPRETER)
1085 rc = VINF_EM_RAW_EMULATE_INSTR;
1086 if (rc != VINF_SUCCESS)
1087 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1088 return rc;
1089}
1090
1091
1092/**
1093 * Gets effective page information (from the VMM page directory).
1094 *
1095 * @returns VBox status code.
1096 * @param pVCpu The cross context virtual CPU structure.
1097 * @param GCPtr Guest Context virtual address of the page.
1098 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1099 * @param pHCPhys Where to store the HC physical address of the page.
1100 * This is page aligned.
1101 * @remark You should use PGMMapGetPage() for pages in a mapping.
1102 */
1103VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1104{
1105 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1106 PGM_LOCK_VOID(pVM);
1107
1108 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1109 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1110 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1111 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1112
1113 PGM_UNLOCK(pVM);
1114 return rc;
1115}
1116
1117
1118/**
1119 * Modify page flags for a range of pages in the shadow context.
1120 *
1121 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1122 *
1123 * @returns VBox status code.
1124 * @param pVCpu The cross context virtual CPU structure.
1125 * @param GCPtr Virtual address of the first page in the range.
1126 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1127 * @param fMask The AND mask - page flags X86_PTE_*.
1128 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1129 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1130 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1131 */
1132DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1133{
1134 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1135 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1136
1137 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
1138
1139 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1140 PGM_LOCK_VOID(pVM);
1141
1142 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1143 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1144 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1145 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, GUEST_PAGE_SIZE, fFlags, fMask, fOpFlags);
1146
1147 PGM_UNLOCK(pVM);
1148 return rc;
1149}
1150
1151
1152/**
1153 * Changing the page flags for a single page in the shadow page tables so as to
1154 * make it read-only.
1155 *
1156 * @returns VBox status code.
1157 * @param pVCpu The cross context virtual CPU structure.
1158 * @param GCPtr Virtual address of the first page in the range.
1159 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1160 */
1161VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1162{
1163 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1164}
1165
1166
1167/**
1168 * Changing the page flags for a single page in the shadow page tables so as to
1169 * make it writable.
1170 *
1171 * The call must know with 101% certainty that the guest page tables maps this
1172 * as writable too. This function will deal shared, zero and write monitored
1173 * pages.
1174 *
1175 * @returns VBox status code.
1176 * @param pVCpu The cross context virtual CPU structure.
1177 * @param GCPtr Virtual address of the first page in the range.
1178 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1179 */
1180VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1181{
1182 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1183 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1184 return VINF_SUCCESS;
1185}
1186
1187
1188/**
1189 * Changing the page flags for a single page in the shadow page tables so as to
1190 * make it not present.
1191 *
1192 * @returns VBox status code.
1193 * @param pVCpu The cross context virtual CPU structure.
1194 * @param GCPtr Virtual address of the first page in the range.
1195 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1196 */
1197VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1198{
1199 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1200}
1201
1202
1203/**
1204 * Changing the page flags for a single page in the shadow page tables so as to
1205 * make it supervisor and writable.
1206 *
1207 * This if for dealing with CR0.WP=0 and readonly user pages.
1208 *
1209 * @returns VBox status code.
1210 * @param pVCpu The cross context virtual CPU structure.
1211 * @param GCPtr Virtual address of the first page in the range.
1212 * @param fBigPage Whether or not this is a big page. If it is, we have to
1213 * change the shadow PDE as well. If it isn't, the caller
1214 * has checked that the shadow PDE doesn't need changing.
1215 * We ASSUME 4KB pages backing the big page here!
1216 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1217 */
1218int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1219{
1220 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1221 if (rc == VINF_SUCCESS && fBigPage)
1222 {
1223 /* this is a bit ugly... */
1224 switch (pVCpu->pgm.s.enmShadowMode)
1225 {
1226 case PGMMODE_32_BIT:
1227 {
1228 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1229 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1230 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1231 pPde->u |= X86_PDE_RW;
1232 Log(("-> PDE=%#llx (32)\n", pPde->u));
1233 break;
1234 }
1235 case PGMMODE_PAE:
1236 case PGMMODE_PAE_NX:
1237 {
1238 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1239 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1240 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1241 pPde->u |= X86_PDE_RW;
1242 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1243 break;
1244 }
1245 default:
1246 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1247 }
1248 }
1249 return rc;
1250}
1251
1252
1253/**
1254 * Gets the shadow page directory for the specified address, PAE.
1255 *
1256 * @returns Pointer to the shadow PD.
1257 * @param pVCpu The cross context virtual CPU structure.
1258 * @param GCPtr The address.
1259 * @param uGstPdpe Guest PDPT entry. Valid.
1260 * @param ppPD Receives address of page directory
1261 */
1262int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1263{
1264 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1265 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1266 PPGMPOOLPAGE pShwPage;
1267 int rc;
1268 PGM_LOCK_ASSERT_OWNER(pVM);
1269
1270
1271 /* Allocate page directory if not present. */
1272 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1273 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1274 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1275 X86PGPAEUINT const uPdpe = pPdpe->u;
1276 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1277 {
1278 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1279 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1280 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1281
1282 pgmPoolCacheUsed(pPool, pShwPage);
1283
1284 /* Update the entry if necessary. */
1285 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1286 if (uPdpeNew == uPdpe)
1287 { /* likely */ }
1288 else
1289 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1290 }
1291 else
1292 {
1293 RTGCPTR64 GCPdPt;
1294 PGMPOOLKIND enmKind;
1295 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1296 {
1297 /* AMD-V nested paging or real/protected mode without paging. */
1298 GCPdPt = GCPtr & ~(RT_BIT_64(X86_PDPT_SHIFT) - 1);
1299 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1300 }
1301 else if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1302 {
1303 if (uGstPdpe & X86_PDPE_P)
1304 {
1305 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1306 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1307 }
1308 else
1309 {
1310 /* PD not present; guest must reload CR3 to change it.
1311 * No need to monitor anything in this case. */
1312 /** @todo r=bird: WTF is hit?!? */
1313 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1314 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1315 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1316 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1317 }
1318 }
1319 else
1320 {
1321 GCPdPt = CPUMGetGuestCR3(pVCpu);
1322 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1323 }
1324
1325 /* Create a reference back to the PDPT by using the index in its shadow page. */
1326 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1327 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1328 &pShwPage);
1329 AssertRCReturn(rc, rc);
1330
1331 /* Hook it up. */
1332 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1333 }
1334 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1335
1336 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1337 return VINF_SUCCESS;
1338}
1339
1340
1341/**
1342 * Gets the pointer to the shadow page directory entry for an address, PAE.
1343 *
1344 * @returns Pointer to the PDE.
1345 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1346 * @param GCPtr The address.
1347 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1348 */
1349DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1350{
1351 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1352 PGM_LOCK_ASSERT_OWNER(pVM);
1353
1354 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1355 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1356 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1357 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1358 if (!(uPdpe & X86_PDPE_P))
1359 {
1360 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1361 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1362 }
1363 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1364
1365 /* Fetch the pgm pool shadow descriptor. */
1366 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1367 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1368
1369 *ppShwPde = pShwPde;
1370 return VINF_SUCCESS;
1371}
1372
1373
1374/**
1375 * Syncs the SHADOW page directory pointer for the specified address.
1376 *
1377 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1378 *
1379 * The caller is responsible for making sure the guest has a valid PD before
1380 * calling this function.
1381 *
1382 * @returns VBox status code.
1383 * @param pVCpu The cross context virtual CPU structure.
1384 * @param GCPtr The address.
1385 * @param uGstPml4e Guest PML4 entry (valid).
1386 * @param uGstPdpe Guest PDPT entry (valid).
1387 * @param ppPD Receives address of page directory
1388 */
1389static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1390{
1391 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1392 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1393 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1394 int rc;
1395
1396 PGM_LOCK_ASSERT_OWNER(pVM);
1397
1398 /*
1399 * PML4.
1400 */
1401 PPGMPOOLPAGE pShwPage;
1402 {
1403 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1404 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1405 X86PGPAEUINT const uPml4e = pPml4e->u;
1406
1407 /* Allocate page directory pointer table if not present. */
1408 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1409 {
1410 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1411 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1412
1413 pgmPoolCacheUsed(pPool, pShwPage);
1414
1415 /* Update the entry if needed. */
1416 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1417 | (uPml4e & PGM_PML4_FLAGS);
1418 if (uPml4e == uPml4eNew)
1419 { /* likely */ }
1420 else
1421 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1422 }
1423 else
1424 {
1425 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1426
1427 RTGCPTR64 GCPml4;
1428 PGMPOOLKIND enmKind;
1429 if (fNestedPagingOrNoGstPaging)
1430 {
1431 /* AMD-V nested paging or real/protected mode without paging */
1432 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; /** @todo bogus calculation for PML5 */
1433 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1434 }
1435 else
1436 {
1437 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1438 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1439 }
1440
1441 /* Create a reference back to the PDPT by using the index in its shadow page. */
1442 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1443 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1444 &pShwPage);
1445 AssertRCReturn(rc, rc);
1446
1447 /* Hook it up. */
1448 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1449 | (uPml4e & PGM_PML4_FLAGS));
1450 }
1451 }
1452
1453 /*
1454 * PDPT.
1455 */
1456 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1457 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1458 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1459 X86PGPAEUINT const uPdpe = pPdpe->u;
1460
1461 /* Allocate page directory if not present. */
1462 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1463 {
1464 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1465 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1466
1467 pgmPoolCacheUsed(pPool, pShwPage);
1468
1469 /* Update the entry if needed. */
1470 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1471 | (uPdpe & PGM_PDPT_FLAGS);
1472 if (uPdpe == uPdpeNew)
1473 { /* likely */ }
1474 else
1475 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1476 }
1477 else
1478 {
1479 RTGCPTR64 GCPdPt;
1480 PGMPOOLKIND enmKind;
1481 if (fNestedPagingOrNoGstPaging)
1482 {
1483 /* AMD-V nested paging or real/protected mode without paging */
1484 GCPdPt = GCPtr & ~(RT_BIT_64(iPdPt << X86_PDPT_SHIFT) - 1);
1485 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1486 }
1487 else
1488 {
1489 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1490 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1491 }
1492
1493 /* Create a reference back to the PDPT by using the index in its shadow page. */
1494 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1495 pShwPage->idx, iPdPt, false /*fLockPage*/,
1496 &pShwPage);
1497 AssertRCReturn(rc, rc);
1498
1499 /* Hook it up. */
1500 ASMAtomicWriteU64(&pPdpe->u,
1501 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1502 }
1503
1504 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1505 return VINF_SUCCESS;
1506}
1507
1508
1509/**
1510 * Gets the SHADOW page directory pointer for the specified address (long mode).
1511 *
1512 * @returns VBox status code.
1513 * @param pVCpu The cross context virtual CPU structure.
1514 * @param GCPtr The address.
1515 * @param ppPml4e Receives the address of the page map level 4 entry.
1516 * @param ppPdpt Receives the address of the page directory pointer table.
1517 * @param ppPD Receives the address of the page directory.
1518 */
1519DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1520{
1521 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1522 PGM_LOCK_ASSERT_OWNER(pVM);
1523
1524 /*
1525 * PML4
1526 */
1527 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1528 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1529 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1530 if (ppPml4e)
1531 *ppPml4e = (PX86PML4E)pPml4e;
1532 X86PGPAEUINT const uPml4e = pPml4e->u;
1533 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1534 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1535 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1536
1537 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1538 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1539 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1540
1541 /*
1542 * PDPT
1543 */
1544 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1545 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1546 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1547 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1548 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1549
1550 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1551 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1552
1553 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1554 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1555 return VINF_SUCCESS;
1556}
1557
1558
1559/**
1560 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1561 * backing pages in case the PDPT or PML4 entry is missing.
1562 *
1563 * @returns VBox status code.
1564 * @param pVCpu The cross context virtual CPU structure.
1565 * @param GCPtr The address.
1566 * @param ppPdpt Receives address of pdpt
1567 * @param ppPD Receives address of page directory
1568 */
1569static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1570{
1571 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1572 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1573 int rc;
1574
1575 Assert(pVM->pgm.s.fNestedPaging);
1576 PGM_LOCK_ASSERT_OWNER(pVM);
1577
1578 /*
1579 * PML4 level.
1580 */
1581 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1582 Assert(pPml4);
1583
1584 /* Allocate page directory pointer table if not present. */
1585 PPGMPOOLPAGE pShwPage;
1586 {
1587 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1588 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1589 EPTPML4E Pml4e;
1590 Pml4e.u = pPml4e->u;
1591 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1592 {
1593 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1594 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1595 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1596 &pShwPage);
1597 AssertRCReturn(rc, rc);
1598
1599 /* Hook up the new PDPT now. */
1600 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1601 }
1602 else
1603 {
1604 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1605 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1606
1607 pgmPoolCacheUsed(pPool, pShwPage);
1608
1609 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1610 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1611 { }
1612 else
1613 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1614 }
1615 }
1616
1617 /*
1618 * PDPT level.
1619 */
1620 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1621 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1622 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1623
1624 if (ppPdpt)
1625 *ppPdpt = pPdpt;
1626
1627 /* Allocate page directory if not present. */
1628 EPTPDPTE Pdpe;
1629 Pdpe.u = pPdpe->u;
1630 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1631 {
1632 RTGCPTR64 const GCPdPt = GCPtr & ~(RT_BIT_64(EPT_PDPT_SHIFT) - 1);
1633 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1634 pShwPage->idx, iPdPt, false /*fLockPage*/,
1635 &pShwPage);
1636 AssertRCReturn(rc, rc);
1637
1638 /* Hook up the new PD now. */
1639 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1640 }
1641 else
1642 {
1643 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1644 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1645
1646 pgmPoolCacheUsed(pPool, pShwPage);
1647
1648 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1649 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1650 { }
1651 else
1652 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1653 }
1654
1655 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1656 return VINF_SUCCESS;
1657}
1658
1659
1660#ifdef IN_RING0
1661/**
1662 * Synchronizes a range of nested page table entries.
1663 *
1664 * The caller must own the PGM lock.
1665 *
1666 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1667 * @param GCPhys Where to start.
1668 * @param cPages How many pages which entries should be synced.
1669 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1670 * host paging mode for AMD-V).
1671 */
1672int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1673{
1674 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1675
1676/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1677 int rc;
1678 switch (enmShwPagingMode)
1679 {
1680 case PGMMODE_32_BIT:
1681 {
1682 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1683 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1684 break;
1685 }
1686
1687 case PGMMODE_PAE:
1688 case PGMMODE_PAE_NX:
1689 {
1690 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1691 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1692 break;
1693 }
1694
1695 case PGMMODE_AMD64:
1696 case PGMMODE_AMD64_NX:
1697 {
1698 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1699 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1700 break;
1701 }
1702
1703 case PGMMODE_EPT:
1704 {
1705 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1706 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1707 break;
1708 }
1709
1710 default:
1711 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1712 }
1713 return rc;
1714}
1715#endif /* IN_RING0 */
1716
1717
1718/**
1719 * Gets effective Guest OS page information.
1720 *
1721 * When GCPtr is in a big page, the function will return as if it was a normal
1722 * 4KB page. If the need for distinguishing between big and normal page becomes
1723 * necessary at a later point, a PGMGstGetPage() will be created for that
1724 * purpose.
1725 *
1726 * @returns VBox status code.
1727 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1728 * @param GCPtr Guest Context virtual address of the page.
1729 * @param pWalk Where to store the page walk information.
1730 */
1731VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
1732{
1733 VMCPU_ASSERT_EMT(pVCpu);
1734 Assert(pWalk);
1735 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1736 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1737 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1738 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
1739}
1740
1741
1742/**
1743 * Maps the guest CR3.
1744 *
1745 * @returns VBox status code.
1746 * @param pVCpu The cross context virtual CPU structure.
1747 * @param GCPhysCr3 The guest CR3 value.
1748 * @param pHCPtrGuestCr3 Where to store the mapped memory.
1749 */
1750DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3)
1751{
1752 /** @todo this needs some reworking wrt. locking? */
1753 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1754 PGM_LOCK_VOID(pVM);
1755 PPGMPAGE pPageCr3 = pgmPhysGetPage(pVM, GCPhysCr3);
1756 AssertReturnStmt(pPageCr3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
1757
1758 RTHCPTR HCPtrGuestCr3;
1759 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCr3, GCPhysCr3, (void **)&HCPtrGuestCr3);
1760 PGM_UNLOCK(pVM);
1761
1762 *pHCPtrGuestCr3 = HCPtrGuestCr3;
1763 return rc;
1764}
1765
1766
1767/**
1768 * Unmaps the guest CR3.
1769 *
1770 * @returns VBox status code.
1771 * @param pVCpu The cross context virtual CPU structure.
1772 */
1773DECLINLINE(int) pgmGstUnmapCr3(PVMCPUCC pVCpu)
1774{
1775 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1776 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1777 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
1778 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
1779}
1780
1781
1782/**
1783 * Performs a guest page table walk.
1784 *
1785 * The guest should be in paged protect mode or long mode when making a call to
1786 * this function.
1787 *
1788 * @returns VBox status code.
1789 * @retval VINF_SUCCESS on success.
1790 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1791 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1792 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1793 *
1794 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1795 * @param GCPtr The guest virtual address to walk by.
1796 * @param pWalk Where to return the walk result. This is valid for some
1797 * error codes as well.
1798 * @param pGstWalk The guest mode specific page walk information.
1799 */
1800int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1801{
1802 VMCPU_ASSERT_EMT(pVCpu);
1803 switch (pVCpu->pgm.s.enmGuestMode)
1804 {
1805 case PGMMODE_32_BIT:
1806 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1807 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy);
1808
1809 case PGMMODE_PAE:
1810 case PGMMODE_PAE_NX:
1811 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1812 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae);
1813
1814 case PGMMODE_AMD64:
1815 case PGMMODE_AMD64_NX:
1816 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1817 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64);
1818
1819 case PGMMODE_REAL:
1820 case PGMMODE_PROTECTED:
1821 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1822 return VERR_PGM_NOT_USED_IN_MODE;
1823
1824 case PGMMODE_EPT:
1825 case PGMMODE_NESTED_32BIT:
1826 case PGMMODE_NESTED_PAE:
1827 case PGMMODE_NESTED_AMD64:
1828 default:
1829 AssertFailed();
1830 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1831 return VERR_PGM_NOT_USED_IN_MODE;
1832 }
1833}
1834
1835
1836#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1837/**
1838 * Performs a guest second-level address translation (SLAT).
1839 *
1840 * The guest paging mode must be 32-bit, PAE or AMD64 when making a call to this
1841 * function.
1842 *
1843 * @returns VBox status code.
1844 * @retval VINF_SUCCESS on success.
1845 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1846 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1847 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1848 *
1849 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1850 * @param GCPhysNested The nested-guest physical address being translated
1851 * (input).
1852 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is
1853 * valid. This indicates the SLAT is caused when
1854 * translating a nested-guest linear address.
1855 * @param GCPtrNested The nested-guest virtual address that initiated the
1856 * SLAT. If none, pass NIL_RTGCPTR.
1857 * @param pWalk Where to return the walk result. This is valid for
1858 * some error codes as well.
1859 * @param pGstWalk The second-level paging-mode specific walk
1860 * information.
1861 */
1862static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
1863 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1864{
1865 /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
1866 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
1867 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
1868 switch (pVCpu->pgm.s.enmGuestSlatMode)
1869 {
1870 case PGMSLAT_EPT:
1871 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1872 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
1873
1874 default:
1875 AssertFailed();
1876 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1877 return VERR_PGM_NOT_USED_IN_MODE;
1878 }
1879}
1880
1881
1882/**
1883 * Performs a guest second-level address translation (SLAT) for a nested-guest
1884 * physical address.
1885 *
1886 * This version requires the SLAT mode to be provided by the caller because we could
1887 * be in the process of switching paging modes (MOV CRX) and cannot presume control
1888 * register values.
1889 *
1890 * @returns VBox status code.
1891 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1892 * @param enmSlatMode The second-level paging mode to use.
1893 * @param GCPhysNested The nested-guest physical address to translate.
1894 * @param pWalk Where to store the walk result.
1895 * @param pGstWalk Where to store the second-level paging-mode specific
1896 * walk information.
1897 */
1898static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
1899 PPGMPTWALKGST pGstWalk)
1900{
1901 AssertPtr(pWalk);
1902 AssertPtr(pGstWalk);
1903 switch (enmSlatMode)
1904 {
1905 case PGMSLAT_EPT:
1906 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1907 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, NIL_RTGCPTR, pWalk,
1908 &pGstWalk->u.Ept);
1909
1910 default:
1911 AssertFailed();
1912 return VERR_PGM_NOT_USED_IN_MODE;
1913 }
1914}
1915#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1916
1917
1918/**
1919 * Tries to continue the previous walk.
1920 *
1921 * @note Requires the caller to hold the PGM lock from the first
1922 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
1923 * we cannot use the pointers.
1924 *
1925 * @returns VBox status code.
1926 * @retval VINF_SUCCESS on success.
1927 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1928 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1929 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1930 *
1931 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1932 * @param GCPtr The guest virtual address to walk by.
1933 * @param pWalk Pointer to the previous walk result and where to return
1934 * the result of this walk. This is valid for some error
1935 * codes as well.
1936 * @param pGstWalk The guest-mode specific walk information.
1937 */
1938int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1939{
1940 /*
1941 * We can only handle successfully walks.
1942 * We also limit ourselves to the next page.
1943 */
1944 if ( pWalk->fSucceeded
1945 && GCPtr - pWalk->GCPtr == GUEST_PAGE_SIZE)
1946 {
1947 Assert(pWalk->uLevel == 0);
1948 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
1949 {
1950 /*
1951 * AMD64
1952 */
1953 if (!pWalk->fGigantPage && !pWalk->fBigPage)
1954 {
1955 /*
1956 * We fall back to full walk if the PDE table changes, if any
1957 * reserved bits are set, or if the effective page access changes.
1958 */
1959 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
1960 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
1961 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
1962 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
1963
1964 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT))
1965 {
1966 if (pGstWalk->u.Amd64.pPte)
1967 {
1968 X86PTEPAE Pte;
1969 Pte.u = pGstWalk->u.Amd64.pPte[1].u;
1970 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
1971 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1972 {
1973 pWalk->GCPtr = GCPtr;
1974 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1975 pGstWalk->u.Amd64.Pte.u = Pte.u;
1976 pGstWalk->u.Amd64.pPte++;
1977 return VINF_SUCCESS;
1978 }
1979 }
1980 }
1981 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT))
1982 {
1983 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
1984 if (pGstWalk->u.Amd64.pPde)
1985 {
1986 X86PDEPAE Pde;
1987 Pde.u = pGstWalk->u.Amd64.pPde[1].u;
1988 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame)
1989 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
1990 {
1991 /* Get the new PTE and check out the first entry. */
1992 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
1993 &pGstWalk->u.Amd64.pPt);
1994 if (RT_SUCCESS(rc))
1995 {
1996 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0];
1997 X86PTEPAE Pte;
1998 Pte.u = pGstWalk->u.Amd64.pPte->u;
1999 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2000 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2001 {
2002 pWalk->GCPtr = GCPtr;
2003 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2004 pGstWalk->u.Amd64.Pte.u = Pte.u;
2005 pGstWalk->u.Amd64.Pde.u = Pde.u;
2006 pGstWalk->u.Amd64.pPde++;
2007 return VINF_SUCCESS;
2008 }
2009 }
2010 }
2011 }
2012 }
2013 }
2014 else if (!pWalk->fGigantPage)
2015 {
2016 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK))
2017 {
2018 pWalk->GCPtr = GCPtr;
2019 pWalk->GCPhys += GUEST_PAGE_SIZE;
2020 return VINF_SUCCESS;
2021 }
2022 }
2023 else
2024 {
2025 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK))
2026 {
2027 pWalk->GCPtr = GCPtr;
2028 pWalk->GCPhys += GUEST_PAGE_SIZE;
2029 return VINF_SUCCESS;
2030 }
2031 }
2032 }
2033 }
2034 /* Case we don't handle. Do full walk. */
2035 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk);
2036}
2037
2038
2039/**
2040 * Modify page flags for a range of pages in the guest's tables
2041 *
2042 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2043 *
2044 * @returns VBox status code.
2045 * @param pVCpu The cross context virtual CPU structure.
2046 * @param GCPtr Virtual address of the first page in the range.
2047 * @param cb Size (in bytes) of the range to apply the modification to.
2048 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2049 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2050 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2051 */
2052VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2053{
2054 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2055 VMCPU_ASSERT_EMT(pVCpu);
2056
2057 /*
2058 * Validate input.
2059 */
2060 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2061 Assert(cb);
2062
2063 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2064
2065 /*
2066 * Adjust input.
2067 */
2068 cb += GCPtr & GUEST_PAGE_OFFSET_MASK;
2069 cb = RT_ALIGN_Z(cb, GUEST_PAGE_SIZE);
2070 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
2071
2072 /*
2073 * Call worker.
2074 */
2075 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2076 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2077 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2078 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2079
2080 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2081 return rc;
2082}
2083
2084
2085/**
2086 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2087 *
2088 * @returns @c true if the PDPE is valid, @c false otherwise.
2089 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2090 * @param paPaePdpes The PAE PDPEs to validate.
2091 *
2092 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2093 */
2094VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2095{
2096 Assert(paPaePdpes);
2097 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2098 {
2099 X86PDPE const PaePdpe = paPaePdpes[i];
2100 if ( !(PaePdpe.u & X86_PDPE_P)
2101 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2102 { /* likely */ }
2103 else
2104 return false;
2105 }
2106 return true;
2107}
2108
2109
2110/**
2111 * Performs the lazy mapping of the 32-bit guest PD.
2112 *
2113 * @returns VBox status code.
2114 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2115 * @param ppPd Where to return the pointer to the mapping. This is
2116 * always set.
2117 */
2118int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2119{
2120 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2121 PGM_LOCK_VOID(pVM);
2122
2123 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2124
2125 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
2126 PPGMPAGE pPage;
2127 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2128 if (RT_SUCCESS(rc))
2129 {
2130 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2131 if (RT_SUCCESS(rc))
2132 {
2133# ifdef IN_RING3
2134 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2135 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2136# else
2137 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2138 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2139# endif
2140 PGM_UNLOCK(pVM);
2141 return VINF_SUCCESS;
2142 }
2143 AssertRC(rc);
2144 }
2145 PGM_UNLOCK(pVM);
2146
2147 *ppPd = NULL;
2148 return rc;
2149}
2150
2151
2152/**
2153 * Performs the lazy mapping of the PAE guest PDPT.
2154 *
2155 * @returns VBox status code.
2156 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2157 * @param ppPdpt Where to return the pointer to the mapping. This is
2158 * always set.
2159 */
2160int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2161{
2162 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2163 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2164 PGM_LOCK_VOID(pVM);
2165
2166 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
2167 PPGMPAGE pPage;
2168 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to
2169 * guest-physical address here. */
2170 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2171 if (RT_SUCCESS(rc))
2172 {
2173 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2174 if (RT_SUCCESS(rc))
2175 {
2176# ifdef IN_RING3
2177 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2178 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2179# else
2180 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2181 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2182# endif
2183 PGM_UNLOCK(pVM);
2184 return VINF_SUCCESS;
2185 }
2186 AssertRC(rc);
2187 }
2188
2189 PGM_UNLOCK(pVM);
2190 *ppPdpt = NULL;
2191 return rc;
2192}
2193
2194
2195/**
2196 * Performs the lazy mapping / updating of a PAE guest PD.
2197 *
2198 * @returns Pointer to the mapping.
2199 * @returns VBox status code.
2200 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2201 * @param iPdpt Which PD entry to map (0..3).
2202 * @param ppPd Where to return the pointer to the mapping. This is
2203 * always set.
2204 */
2205int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2206{
2207 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2208 PGM_LOCK_VOID(pVM);
2209
2210 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2211 Assert(pGuestPDPT);
2212 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2213 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2214 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2215
2216 PPGMPAGE pPage;
2217 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2218 if (RT_SUCCESS(rc))
2219 {
2220 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2221 AssertRC(rc);
2222 if (RT_SUCCESS(rc))
2223 {
2224# ifdef IN_RING3
2225 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2226 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2227# else
2228 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2229 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2230# endif
2231 if (fChanged)
2232 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2233 PGM_UNLOCK(pVM);
2234 return VINF_SUCCESS;
2235 }
2236 }
2237
2238 /* Invalid page or some failure, invalidate the entry. */
2239 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2240 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2241 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2242
2243 PGM_UNLOCK(pVM);
2244 return rc;
2245}
2246
2247
2248/**
2249 * Performs the lazy mapping of the 32-bit guest PD.
2250 *
2251 * @returns VBox status code.
2252 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2253 * @param ppPml4 Where to return the pointer to the mapping. This will
2254 * always be set.
2255 */
2256int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2257{
2258 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2259 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2260 PGM_LOCK_VOID(pVM);
2261
2262 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
2263 PPGMPAGE pPage;
2264 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2265 if (RT_SUCCESS(rc))
2266 {
2267 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2268 if (RT_SUCCESS(rc))
2269 {
2270# ifdef IN_RING3
2271 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2272 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2273# else
2274 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2275 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2276# endif
2277 PGM_UNLOCK(pVM);
2278 return VINF_SUCCESS;
2279 }
2280 }
2281
2282 PGM_UNLOCK(pVM);
2283 *ppPml4 = NULL;
2284 return rc;
2285}
2286
2287
2288#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2289 /**
2290 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
2291 *
2292 * @returns VBox status code.
2293 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2294 * @param ppEptPml4 Where to return the pointer to the mapping. This will
2295 * always be set.
2296 */
2297int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
2298{
2299 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
2300 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2301 PGM_LOCK_VOID(pVM);
2302
2303 RTGCPHYS const GCPhysEpt = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
2304 PPGMPAGE pPage;
2305 int rc = pgmPhysGetPageEx(pVM, GCPhysEpt, &pPage);
2306 if (RT_SUCCESS(rc))
2307 {
2308 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysEpt, (void **)ppEptPml4);
2309 if (RT_SUCCESS(rc))
2310 {
2311# ifdef IN_RING3
2312 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
2313 pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
2314# else
2315 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
2316 pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
2317# endif
2318 PGM_UNLOCK(pVM);
2319 return VINF_SUCCESS;
2320 }
2321 }
2322
2323 PGM_UNLOCK(pVM);
2324 *ppEptPml4 = NULL;
2325 return rc;
2326}
2327#endif
2328
2329
2330/**
2331 * Gets the current CR3 register value for the shadow memory context.
2332 * @returns CR3 value.
2333 * @param pVCpu The cross context virtual CPU structure.
2334 */
2335VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2336{
2337 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2338 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2339 return pPoolPage->Core.Key;
2340}
2341
2342
2343/**
2344 * Forces lazy remapping of the guest's PAE page-directory structures.
2345 *
2346 * @param pVCpu The cross context virtual CPU structure.
2347 */
2348static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2349{
2350 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2351 {
2352 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2353 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2354 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2355 }
2356}
2357
2358
2359/**
2360 * Gets the CR3 mask corresponding to the given paging mode.
2361 *
2362 * @returns The CR3 mask.
2363 * @param enmMode The paging mode.
2364 */
2365DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode)
2366{
2367 /** @todo This work can be optimized either by storing the masks in
2368 * pVCpu->pgm.s.afGstCr3Masks[] for all PGMMODEs -or- just do this once and
2369 * store the result when entering guest mode since we currently use it only
2370 * for enmGuestMode. */
2371 switch (enmMode)
2372 {
2373 case PGMMODE_PAE:
2374 case PGMMODE_PAE_NX:
2375 return X86_CR3_PAE_PAGE_MASK;
2376 case PGMMODE_AMD64:
2377 case PGMMODE_AMD64_NX:
2378 return X86_CR3_AMD64_PAGE_MASK;
2379 case PGMMODE_EPT:
2380 return X86_CR3_EPT_PAGE_MASK;
2381 default:
2382 return X86_CR3_PAGE_MASK;
2383 }
2384}
2385
2386
2387/**
2388 * Gets the masked CR3 value according to the current guest paging mode.
2389 *
2390 * @returns The masked PGM CR3 value.
2391 * @param pVCpu The cross context virtual CPU structure.
2392 * @param uCr3 The raw guest CR3 value.
2393 */
2394DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
2395{
2396 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode);
2397 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);
2398 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2399 return GCPhysCR3;
2400}
2401
2402
2403#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2404/**
2405 * Performs second-level address translation for the given CR3 and updates the
2406 * nested-guest CR3 when successful.
2407 *
2408 * @returns VBox status code.
2409 * @param pVCpu The cross context virtual CPU structure.
2410 * @param uCr3 The masked nested-guest CR3 value.
2411 * @param pGCPhysCR3 Where to store the translated CR3.
2412 *
2413 * @warning This updates PGMCPU::GCPhysNstGstCR3 when the translation succeeds. Be
2414 * mindful of this in code that's hyper sensitive to the order of
2415 * operations.
2416 */
2417static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3)
2418{
2419 if (uCr3 != pVCpu->pgm.s.GCPhysNstGstCR3)
2420 {
2421 PGMPTWALK Walk;
2422 PGMPTWALKGST GstWalk;
2423 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, uCr3, &Walk, &GstWalk);
2424 if (RT_SUCCESS(rc))
2425 {
2426 /* Update nested-guest CR3. */
2427 pVCpu->pgm.s.GCPhysNstGstCR3 = uCr3;
2428
2429 /* Pass back the translated result. */
2430 *pGCPhysCr3 = Walk.GCPhys;
2431 return VINF_SUCCESS;
2432 }
2433
2434 /* Translation failed. */
2435 *pGCPhysCr3 = NIL_RTGCPHYS;
2436 return rc;
2437 }
2438
2439 /*
2440 * If the nested-guest CR3 has not changed, then the previously
2441 * translated CR3 result (i.e. GCPhysCR3) is passed back.
2442 */
2443 *pGCPhysCr3 = pVCpu->pgm.s.GCPhysCR3;
2444 return VINF_SUCCESS;
2445}
2446#endif
2447
2448
2449/**
2450 * Performs and schedules necessary updates following a CR3 load or reload.
2451 *
2452 * This will normally involve mapping the guest PD or nPDPT
2453 *
2454 * @returns VBox status code.
2455 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2456 * safely be ignored and overridden since the FF will be set too then.
2457 * @param pVCpu The cross context virtual CPU structure.
2458 * @param cr3 The new cr3.
2459 * @param fGlobal Indicates whether this is a global flush or not.
2460 */
2461VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal)
2462{
2463 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2464 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2465
2466 VMCPU_ASSERT_EMT(pVCpu);
2467
2468 /*
2469 * Always flag the necessary updates; necessary for hardware acceleration
2470 */
2471 /** @todo optimize this, it shouldn't always be necessary. */
2472 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2473 if (fGlobal)
2474 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2475
2476 /*
2477 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2478 */
2479 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2480 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2481#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2482 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2483 {
2484 LogFlowFunc(("nested_cr3=%RX64 old=%RX64\n", GCPhysCR3, pVCpu->pgm.s.GCPhysNstGstCR3));
2485 RTGCPHYS GCPhysOut;
2486 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2487 if (RT_SUCCESS(rc))
2488 GCPhysCR3 = GCPhysOut;
2489 else
2490 {
2491 /* CR3 SLAT translation failed but we try to pretend it
2492 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2493 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2494 int const rc2 = pgmGstUnmapCr3(pVCpu);
2495 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2496 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2497 return rc2;
2498 }
2499 }
2500#endif
2501
2502 LogFlowFunc(("cr3=%RX64 old=%RX64 fGlobal=%d\n", cr3, GCPhysOldCR3, fGlobal));
2503 int rc = VINF_SUCCESS;
2504 if (GCPhysOldCR3 != GCPhysCR3)
2505 {
2506 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2507 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2508 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2509
2510 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2511 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2512 if (RT_LIKELY(rc == VINF_SUCCESS))
2513 { }
2514 else
2515 {
2516 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2517 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2518 pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped) = false;
2519 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2520 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2521 }
2522
2523 if (fGlobal)
2524 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2525 else
2526 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2527 }
2528 else
2529 {
2530#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2531 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2532 if (pPool->cDirtyPages)
2533 {
2534 PGM_LOCK_VOID(pVM);
2535 pgmPoolResetDirtyPages(pVM);
2536 PGM_UNLOCK(pVM);
2537 }
2538#endif
2539 if (fGlobal)
2540 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2541 else
2542 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2543
2544 /*
2545 * Flush PAE PDPTEs.
2546 */
2547 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2548 pgmGstFlushPaePdpes(pVCpu);
2549 }
2550
2551 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2552 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2553 return rc;
2554}
2555
2556
2557/**
2558 * Performs and schedules necessary updates following a CR3 load or reload when
2559 * using nested or extended paging.
2560 *
2561 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2562 * TLB and triggering a SyncCR3.
2563 *
2564 * This will normally involve mapping the guest PD or nPDPT
2565 *
2566 * @returns VBox status code.
2567 * @retval VINF_SUCCESS.
2568 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2569 * paging modes). This can safely be ignored and overridden since the
2570 * FF will be set too then.
2571 * @param pVCpu The cross context virtual CPU structure.
2572 * @param cr3 The new CR3.
2573 */
2574VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3)
2575{
2576 VMCPU_ASSERT_EMT(pVCpu);
2577
2578 /* We assume we're only called in nested paging mode. */
2579 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2580
2581 /*
2582 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2583 */
2584 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2585 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2586#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2587 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2588 {
2589 LogFlowFunc(("nested_cr3=%RX64 old_nested_cr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysNstGstCR3));
2590 RTGCPHYS GCPhysOut;
2591 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2592 if (RT_SUCCESS(rc))
2593 GCPhysCR3 = GCPhysOut;
2594 else
2595 {
2596 /* CR3 SLAT translation failed but we try to pretend it
2597 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2598 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2599 int const rc2 = pgmGstUnmapCr3(pVCpu);
2600 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2601 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2602 return rc2;
2603 }
2604 }
2605#endif
2606
2607 LogFlowFunc(("cr3=%RX64 old=%RX64\n", cr3, GCPhysOldCR3));
2608 int rc = VINF_SUCCESS;
2609 if (GCPhysOldCR3 != GCPhysCR3)
2610 {
2611 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2612 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2613 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2614
2615 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2616 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2617
2618 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2619 }
2620 /*
2621 * Flush PAE PDPTEs.
2622 */
2623 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2624 pgmGstFlushPaePdpes(pVCpu);
2625
2626 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2627 return rc;
2628}
2629
2630
2631/**
2632 * Synchronize the paging structures.
2633 *
2634 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2635 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2636 * in several places, most importantly whenever the CR3 is loaded.
2637 *
2638 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2639 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2640 * the VMM into guest context.
2641 * @param pVCpu The cross context virtual CPU structure.
2642 * @param cr0 Guest context CR0 register
2643 * @param cr3 Guest context CR3 register
2644 * @param cr4 Guest context CR4 register
2645 * @param fGlobal Including global page directories or not
2646 */
2647VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2648{
2649 int rc;
2650
2651 VMCPU_ASSERT_EMT(pVCpu);
2652
2653 /*
2654 * The pool may have pending stuff and even require a return to ring-3 to
2655 * clear the whole thing.
2656 */
2657 rc = pgmPoolSyncCR3(pVCpu);
2658 if (rc != VINF_SUCCESS)
2659 return rc;
2660
2661 /*
2662 * We might be called when we shouldn't.
2663 *
2664 * The mode switching will ensure that the PD is resynced after every mode
2665 * switch. So, if we find ourselves here when in protected or real mode
2666 * we can safely clear the FF and return immediately.
2667 */
2668 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2669 {
2670 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2671 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2672 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2673 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2674 return VINF_SUCCESS;
2675 }
2676
2677 /* If global pages are not supported, then all flushes are global. */
2678 if (!(cr4 & X86_CR4_PGE))
2679 fGlobal = true;
2680 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2681 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2682
2683 /*
2684 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2685 * This should be done before SyncCR3.
2686 */
2687 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2688 {
2689 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2690
2691 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2692 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2693#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2694 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2695 {
2696 RTGCPHYS GCPhysOut;
2697 int rc2 = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2698 if (RT_SUCCESS(rc2))
2699 GCPhysCR3 = GCPhysOut;
2700 else
2701 {
2702 /* CR3 SLAT translation failed but we try to pretend it
2703 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2704 AssertMsgFailed(("Failed to translate CR3 %#RX64. rc=%Rrc\n", cr3, rc2));
2705 rc2 = pgmGstUnmapCr3(pVCpu);
2706 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2707 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2708 return rc2;
2709 }
2710 }
2711#endif
2712 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
2713 if (GCPhysOldCR3 != GCPhysCR3)
2714 {
2715 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2716 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2717 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2718 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2719 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2720 }
2721
2722 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2723 if ( rc == VINF_PGM_SYNC_CR3
2724 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2725 {
2726 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2727#ifdef IN_RING3
2728 rc = pgmPoolSyncCR3(pVCpu);
2729#else
2730 if (rc == VINF_PGM_SYNC_CR3)
2731 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2732 return VINF_PGM_SYNC_CR3;
2733#endif
2734 }
2735 AssertRCReturn(rc, rc);
2736 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2737 }
2738
2739 /*
2740 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2741 */
2742 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2743
2744 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2745 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2746 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2747 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2748
2749 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2750 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2751 if (rc == VINF_SUCCESS)
2752 {
2753 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2754 {
2755 /* Go back to ring 3 if a pgm pool sync is again pending. */
2756 return VINF_PGM_SYNC_CR3;
2757 }
2758
2759 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2760 {
2761 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2762 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2763 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2764 }
2765 }
2766
2767 /*
2768 * Now flush the CR3 (guest context).
2769 */
2770 if (rc == VINF_SUCCESS)
2771 PGM_INVL_VCPU_TLBS(pVCpu);
2772 return rc;
2773}
2774
2775
2776/**
2777 * Maps all the PAE PDPE entries.
2778 *
2779 * @returns VBox status code.
2780 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2781 * @param paPaePdpes The new PAE PDPE values.
2782 *
2783 * @remarks This function may be invoked during the process of changing the guest
2784 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
2785 * reflect PAE paging just yet.
2786 */
2787VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2788{
2789 Assert(paPaePdpes);
2790 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2791 {
2792 X86PDPE const PaePdpe = paPaePdpes[i];
2793
2794 /*
2795 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
2796 * are deferred.[1] Also, different situations require different handling of invalid
2797 * PDPE entries. Here we assume the caller has already validated or doesn't require
2798 * validation of the PDPEs.
2799 *
2800 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
2801 */
2802 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
2803 {
2804 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2805 RTHCPTR HCPtr;
2806 RTGCPHYS const GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
2807
2808 PGM_LOCK_VOID(pVM);
2809 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2810 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
2811 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
2812 PGM_UNLOCK(pVM);
2813 if (RT_SUCCESS(rc))
2814 {
2815#ifdef IN_RING3
2816 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
2817 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
2818#else
2819 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
2820 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
2821#endif
2822 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
2823 continue;
2824 }
2825 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
2826 }
2827 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2828 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2829 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2830 }
2831
2832 return VINF_SUCCESS;
2833}
2834
2835
2836/**
2837 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
2838 *
2839 * @returns VBox status code.
2840 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2841 * @param cr3 The guest CR3 value.
2842 *
2843 * @remarks This function may be invoked during the process of changing the guest
2844 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
2845 * PAE paging just yet.
2846 */
2847VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
2848{
2849 /*
2850 * Read the page-directory-pointer table (PDPT) at CR3.
2851 */
2852 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
2853 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2854
2855#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2856 if (CPUMIsGuestVmxEptPaePagingEnabled(pVCpu))
2857 {
2858 RTGCPHYS GCPhysOut;
2859 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2860 if (RT_SUCCESS(rc))
2861 GCPhysCR3 = GCPhysOut;
2862 else
2863 {
2864 AssertMsgFailed(("Failed to load CR3 at %#RX64. rc=%Rrc\n", GCPhysCR3, rc));
2865 return rc;
2866 }
2867 }
2868#endif
2869
2870 RTHCPTR HCPtrGuestCr3;
2871 int rc = pgmGstMapCr3(pVCpu, GCPhysCR3, &HCPtrGuestCr3);
2872 if (RT_SUCCESS(rc))
2873 {
2874 /*
2875 * Validate the page-directory-pointer table entries (PDPE).
2876 */
2877 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
2878 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
2879 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
2880 {
2881 /*
2882 * Map the PDPT.
2883 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
2884 * that PGMFlushTLB will be called soon and only a change to CR3 then
2885 * will cause the shadow page tables to be updated.
2886 */
2887#ifdef IN_RING3
2888 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
2889 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2890#else
2891 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2892 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
2893#endif
2894
2895 /*
2896 * Update CPUM.
2897 * We do this prior to mapping the PDPEs to keep the order consistent
2898 * with what's used in HM. In practice, it doesn't really matter.
2899 */
2900 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
2901
2902 /*
2903 * Map the PDPEs.
2904 */
2905 rc = PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
2906 if (RT_SUCCESS(rc))
2907 {
2908#ifdef IN_RING3
2909 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = true;
2910 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = false;
2911#else
2912 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = false;
2913 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = true;
2914#endif
2915 }
2916 }
2917 else
2918 rc = VERR_PGM_PAE_PDPE_RSVD;
2919 }
2920 return rc;
2921}
2922
2923
2924/**
2925 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2926 *
2927 * @returns VBox status code, with the following informational code for
2928 * VM scheduling.
2929 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2930 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2931 *
2932 * @param pVCpu The cross context virtual CPU structure.
2933 * @param cr0 The new cr0.
2934 * @param cr4 The new cr4.
2935 * @param efer The new extended feature enable register.
2936 * @param fForce Whether to force a mode change.
2937 */
2938VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce)
2939{
2940 VMCPU_ASSERT_EMT(pVCpu);
2941
2942 /*
2943 * Calc the new guest mode.
2944 *
2945 * Note! We check PG before PE and without requiring PE because of the
2946 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2947 */
2948 PGMMODE enmGuestMode;
2949 if (cr0 & X86_CR0_PG)
2950 {
2951 if (!(cr4 & X86_CR4_PAE))
2952 {
2953 bool const fPse = !!(cr4 & X86_CR4_PSE);
2954 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2955 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2956 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2957 enmGuestMode = PGMMODE_32_BIT;
2958 }
2959 else if (!(efer & MSR_K6_EFER_LME))
2960 {
2961 if (!(efer & MSR_K6_EFER_NXE))
2962 enmGuestMode = PGMMODE_PAE;
2963 else
2964 enmGuestMode = PGMMODE_PAE_NX;
2965 }
2966 else
2967 {
2968 if (!(efer & MSR_K6_EFER_NXE))
2969 enmGuestMode = PGMMODE_AMD64;
2970 else
2971 enmGuestMode = PGMMODE_AMD64_NX;
2972 }
2973 }
2974 else if (!(cr0 & X86_CR0_PE))
2975 enmGuestMode = PGMMODE_REAL;
2976 else
2977 enmGuestMode = PGMMODE_PROTECTED;
2978
2979 /*
2980 * Did it change?
2981 */
2982 if ( !fForce
2983 && pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2984 return VINF_SUCCESS;
2985
2986 /* Flush the TLB */
2987 PGM_INVL_VCPU_TLBS(pVCpu);
2988 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2989}
2990
2991
2992/**
2993 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
2994 *
2995 * @returns PGM_TYPE_*.
2996 * @param pgmMode The mode value to convert.
2997 */
2998DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
2999{
3000 switch (pgmMode)
3001 {
3002 case PGMMODE_REAL: return PGM_TYPE_REAL;
3003 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
3004 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
3005 case PGMMODE_PAE:
3006 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
3007 case PGMMODE_AMD64:
3008 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
3009 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
3010 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
3011 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
3012 case PGMMODE_EPT: return PGM_TYPE_EPT;
3013 case PGMMODE_NONE: return PGM_TYPE_NONE;
3014 default:
3015 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
3016 }
3017}
3018
3019
3020/**
3021 * Calculates the shadow paging mode.
3022 *
3023 * @returns The shadow paging mode.
3024 * @param pVM The cross context VM structure.
3025 * @param enmGuestMode The guest mode.
3026 * @param enmHostMode The host mode.
3027 * @param enmShadowMode The current shadow mode.
3028 */
3029static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
3030{
3031 switch (enmGuestMode)
3032 {
3033 /*
3034 * When switching to real or protected mode we don't change
3035 * anything since it's likely that we'll switch back pretty soon.
3036 *
3037 * During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
3038 * and is supposed to determine which shadow paging and switcher to
3039 * use during init.
3040 */
3041 case PGMMODE_REAL:
3042 case PGMMODE_PROTECTED:
3043 if ( enmShadowMode != PGMMODE_INVALID
3044 && VM_IS_RAW_MODE_ENABLED(pVM) /* always switch in hm and nem modes! */)
3045 break; /* (no change) */
3046
3047 switch (enmHostMode)
3048 {
3049 case SUPPAGINGMODE_32_BIT:
3050 case SUPPAGINGMODE_32_BIT_GLOBAL:
3051 enmShadowMode = PGMMODE_32_BIT;
3052 break;
3053
3054 case SUPPAGINGMODE_PAE:
3055 case SUPPAGINGMODE_PAE_NX:
3056 case SUPPAGINGMODE_PAE_GLOBAL:
3057 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3058 enmShadowMode = PGMMODE_PAE;
3059 break;
3060
3061 case SUPPAGINGMODE_AMD64:
3062 case SUPPAGINGMODE_AMD64_GLOBAL:
3063 case SUPPAGINGMODE_AMD64_NX:
3064 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3065 enmShadowMode = PGMMODE_PAE;
3066 break;
3067
3068 default:
3069 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3070 }
3071 break;
3072
3073 case PGMMODE_32_BIT:
3074 switch (enmHostMode)
3075 {
3076 case SUPPAGINGMODE_32_BIT:
3077 case SUPPAGINGMODE_32_BIT_GLOBAL:
3078 enmShadowMode = PGMMODE_32_BIT;
3079 break;
3080
3081 case SUPPAGINGMODE_PAE:
3082 case SUPPAGINGMODE_PAE_NX:
3083 case SUPPAGINGMODE_PAE_GLOBAL:
3084 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3085 enmShadowMode = PGMMODE_PAE;
3086 break;
3087
3088 case SUPPAGINGMODE_AMD64:
3089 case SUPPAGINGMODE_AMD64_GLOBAL:
3090 case SUPPAGINGMODE_AMD64_NX:
3091 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3092 enmShadowMode = PGMMODE_PAE;
3093 break;
3094
3095 default:
3096 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3097 }
3098 break;
3099
3100 case PGMMODE_PAE:
3101 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3102 switch (enmHostMode)
3103 {
3104 case SUPPAGINGMODE_32_BIT:
3105 case SUPPAGINGMODE_32_BIT_GLOBAL:
3106 enmShadowMode = PGMMODE_PAE;
3107 break;
3108
3109 case SUPPAGINGMODE_PAE:
3110 case SUPPAGINGMODE_PAE_NX:
3111 case SUPPAGINGMODE_PAE_GLOBAL:
3112 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3113 enmShadowMode = PGMMODE_PAE;
3114 break;
3115
3116 case SUPPAGINGMODE_AMD64:
3117 case SUPPAGINGMODE_AMD64_GLOBAL:
3118 case SUPPAGINGMODE_AMD64_NX:
3119 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3120 enmShadowMode = PGMMODE_PAE;
3121 break;
3122
3123 default:
3124 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3125 }
3126 break;
3127
3128 case PGMMODE_AMD64:
3129 case PGMMODE_AMD64_NX:
3130 switch (enmHostMode)
3131 {
3132 case SUPPAGINGMODE_32_BIT:
3133 case SUPPAGINGMODE_32_BIT_GLOBAL:
3134 enmShadowMode = PGMMODE_AMD64;
3135 break;
3136
3137 case SUPPAGINGMODE_PAE:
3138 case SUPPAGINGMODE_PAE_NX:
3139 case SUPPAGINGMODE_PAE_GLOBAL:
3140 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3141 enmShadowMode = PGMMODE_AMD64;
3142 break;
3143
3144 case SUPPAGINGMODE_AMD64:
3145 case SUPPAGINGMODE_AMD64_GLOBAL:
3146 case SUPPAGINGMODE_AMD64_NX:
3147 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3148 enmShadowMode = PGMMODE_AMD64;
3149 break;
3150
3151 default:
3152 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3153 }
3154 break;
3155
3156 default:
3157 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3158 }
3159
3160 /*
3161 * Override the shadow mode when NEM or nested paging is active.
3162 */
3163 if (VM_IS_NEM_ENABLED(pVM))
3164 {
3165 pVM->pgm.s.fNestedPaging = true;
3166 enmShadowMode = PGMMODE_NONE;
3167 }
3168 else
3169 {
3170 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3171 pVM->pgm.s.fNestedPaging = fNestedPaging;
3172 if (fNestedPaging)
3173 {
3174 if (HMIsVmxActive(pVM))
3175 enmShadowMode = PGMMODE_EPT;
3176 else
3177 {
3178 /* The nested SVM paging depends on the host one. */
3179 Assert(HMIsSvmActive(pVM));
3180 if ( enmGuestMode == PGMMODE_AMD64
3181 || enmGuestMode == PGMMODE_AMD64_NX)
3182 enmShadowMode = PGMMODE_NESTED_AMD64;
3183 else
3184 switch (pVM->pgm.s.enmHostMode)
3185 {
3186 case SUPPAGINGMODE_32_BIT:
3187 case SUPPAGINGMODE_32_BIT_GLOBAL:
3188 enmShadowMode = PGMMODE_NESTED_32BIT;
3189 break;
3190
3191 case SUPPAGINGMODE_PAE:
3192 case SUPPAGINGMODE_PAE_GLOBAL:
3193 case SUPPAGINGMODE_PAE_NX:
3194 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3195 enmShadowMode = PGMMODE_NESTED_PAE;
3196 break;
3197
3198 case SUPPAGINGMODE_AMD64:
3199 case SUPPAGINGMODE_AMD64_GLOBAL:
3200 case SUPPAGINGMODE_AMD64_NX:
3201 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3202 enmShadowMode = PGMMODE_NESTED_AMD64;
3203 break;
3204
3205 default:
3206 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3207 }
3208 }
3209 }
3210#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3211 else
3212 {
3213 /* Nested paging is a requirement for nested VT-x. */
3214 AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3215 }
3216#endif
3217 }
3218
3219 return enmShadowMode;
3220}
3221
3222
3223/**
3224 * Performs the actual mode change.
3225 * This is called by PGMChangeMode and pgmR3InitPaging().
3226 *
3227 * @returns VBox status code. May suspend or power off the VM on error, but this
3228 * will trigger using FFs and not informational status codes.
3229 *
3230 * @param pVM The cross context VM structure.
3231 * @param pVCpu The cross context virtual CPU structure.
3232 * @param enmGuestMode The new guest mode. This is assumed to be different from
3233 * the current mode.
3234 */
3235VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode)
3236{
3237 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3238 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3239
3240 /*
3241 * Calc the shadow mode and switcher.
3242 */
3243 PGMMODE enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3244
3245 /*
3246 * Exit old mode(s).
3247 */
3248 /* shadow */
3249 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3250 {
3251 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3252 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3253 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3254 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3255 {
3256 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3257 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3258 }
3259 }
3260 else
3261 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3262
3263 /* guest */
3264 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3265 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3266 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3267 {
3268 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3269 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3270 }
3271 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3272 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
3273 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
3274
3275 /*
3276 * Change the paging mode data indexes.
3277 */
3278 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3279 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3280 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3281 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3282 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3283 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3284 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3285#ifdef IN_RING3
3286 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3287#endif
3288
3289 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3290 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3291 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3292 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3293 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3294 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3295 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3296#ifdef IN_RING3
3297 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3298#endif
3299
3300 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3301 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3302 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3303 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3304 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3305 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3306 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3307 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3308 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3309 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3310#ifdef VBOX_STRICT
3311 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3312#endif
3313
3314 /*
3315 * Enter new shadow mode (if changed).
3316 */
3317 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3318 {
3319 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3320 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu, enmGuestMode >= PGMMODE_AMD64);
3321 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3322 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3323 }
3324
3325 /*
3326 * Always flag the necessary updates
3327 */
3328 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3329
3330 /*
3331 * Enter the new guest and shadow+guest modes.
3332 */
3333 /* Calc the new CR3 value. */
3334 RTGCPHYS GCPhysCR3;
3335 switch (enmGuestMode)
3336 {
3337 case PGMMODE_REAL:
3338 case PGMMODE_PROTECTED:
3339 GCPhysCR3 = NIL_RTGCPHYS;
3340 break;
3341
3342 case PGMMODE_32_BIT:
3343 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3344 break;
3345
3346 case PGMMODE_PAE_NX:
3347 case PGMMODE_PAE:
3348 if (!pVM->cpum.ro.GuestFeatures.fPae)
3349#ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3350 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3351 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3352#else
3353 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3354
3355#endif
3356 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3357 break;
3358
3359#ifdef VBOX_WITH_64_BITS_GUESTS
3360 case PGMMODE_AMD64_NX:
3361 case PGMMODE_AMD64:
3362 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3363 break;
3364#endif
3365 default:
3366 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3367 }
3368
3369#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3370 /*
3371 * If a nested-guest is using EPT paging:
3372 * - Update the second-level address translation (SLAT) mode.
3373 * - Indicate that the CR3 is nested-guest physical address.
3374 */
3375 if ( CPUMIsGuestVmxEptPagingEnabled(pVCpu)
3376 && PGMMODE_WITH_PAGING(enmGuestMode))
3377 {
3378 /*
3379 * Translate CR3 to its guest-physical address.
3380 * We don't use pgmGstSlatTranslateCr3() here as we want to update GCPhysNstGstCR3 -after-
3381 * switching modes to keep it consistent with how GCPhysCR3 is updated.
3382 */
3383 PGMPTWALK Walk;
3384 PGMPTWALKGST GstWalk;
3385 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysCR3, &Walk, &GstWalk);
3386 if (RT_SUCCESS(rc))
3387 { /* likely */ }
3388 else
3389 {
3390 /*
3391 * SLAT failed but we avoid reporting this to the caller because the caller
3392 * is not supposed to fail. The only time the caller needs to indicate a
3393 * failure to software is when PAE paging is used by the nested-guest, but
3394 * we handle the PAE case separately (e.g., see VMX transition in IEM).
3395 * In all other cases, the failure will be indicated when CR3 tries to be
3396 * translated on the next linear-address memory access.
3397 * See Intel spec. 27.2.1 "EPT Overview".
3398 */
3399 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));
3400
3401 /* Trying to coax PGM to succeed for the time being... */
3402 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
3403 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3404 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
3405 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3406 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3407 return VINF_SUCCESS;
3408 }
3409
3410 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3411 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
3412 GCPhysCR3 = Walk.GCPhys;
3413 }
3414 else
3415 {
3416 Assert(pVCpu->pgm.s.GCPhysNstGstCR3 == NIL_RTGCPHYS);
3417 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;
3418 }
3419#endif
3420
3421 /*
3422 * Enter the new guest mode.
3423 */
3424 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3425 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3426 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3427
3428 /* Set the new guest CR3 (and nested-guest CR3). */
3429 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3430
3431 /* status codes. */
3432 AssertRC(rc);
3433 AssertRC(rc2);
3434 if (RT_SUCCESS(rc))
3435 {
3436 rc = rc2;
3437 if (RT_SUCCESS(rc)) /* no informational status codes. */
3438 rc = VINF_SUCCESS;
3439 }
3440
3441 /*
3442 * Notify HM.
3443 */
3444 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3445 return rc;
3446}
3447
3448
3449/**
3450 * Called by CPUM or REM when CR0.WP changes to 1.
3451 *
3452 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3453 * @thread EMT
3454 */
3455VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3456{
3457 /*
3458 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3459 *
3460 * Use the counter to judge whether there might be pool pages with active
3461 * hacks in them. If there are, we will be running the risk of messing up
3462 * the guest by allowing it to write to read-only pages. Thus, we have to
3463 * clear the page pool ASAP if there is the slightest chance.
3464 */
3465 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3466 {
3467 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3468
3469 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3470 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3471 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3472 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3473 }
3474}
3475
3476
3477/**
3478 * Gets the current guest paging mode.
3479 *
3480 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3481 *
3482 * @returns The current paging mode.
3483 * @param pVCpu The cross context virtual CPU structure.
3484 */
3485VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3486{
3487 return pVCpu->pgm.s.enmGuestMode;
3488}
3489
3490
3491/**
3492 * Gets the current shadow paging mode.
3493 *
3494 * @returns The current paging mode.
3495 * @param pVCpu The cross context virtual CPU structure.
3496 */
3497VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3498{
3499 return pVCpu->pgm.s.enmShadowMode;
3500}
3501
3502
3503/**
3504 * Gets the current host paging mode.
3505 *
3506 * @returns The current paging mode.
3507 * @param pVM The cross context VM structure.
3508 */
3509VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3510{
3511 switch (pVM->pgm.s.enmHostMode)
3512 {
3513 case SUPPAGINGMODE_32_BIT:
3514 case SUPPAGINGMODE_32_BIT_GLOBAL:
3515 return PGMMODE_32_BIT;
3516
3517 case SUPPAGINGMODE_PAE:
3518 case SUPPAGINGMODE_PAE_GLOBAL:
3519 return PGMMODE_PAE;
3520
3521 case SUPPAGINGMODE_PAE_NX:
3522 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3523 return PGMMODE_PAE_NX;
3524
3525 case SUPPAGINGMODE_AMD64:
3526 case SUPPAGINGMODE_AMD64_GLOBAL:
3527 return PGMMODE_AMD64;
3528
3529 case SUPPAGINGMODE_AMD64_NX:
3530 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3531 return PGMMODE_AMD64_NX;
3532
3533 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3534 }
3535
3536 return PGMMODE_INVALID;
3537}
3538
3539
3540/**
3541 * Get mode name.
3542 *
3543 * @returns read-only name string.
3544 * @param enmMode The mode which name is desired.
3545 */
3546VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3547{
3548 switch (enmMode)
3549 {
3550 case PGMMODE_REAL: return "Real";
3551 case PGMMODE_PROTECTED: return "Protected";
3552 case PGMMODE_32_BIT: return "32-bit";
3553 case PGMMODE_PAE: return "PAE";
3554 case PGMMODE_PAE_NX: return "PAE+NX";
3555 case PGMMODE_AMD64: return "AMD64";
3556 case PGMMODE_AMD64_NX: return "AMD64+NX";
3557 case PGMMODE_NESTED_32BIT: return "Nested-32";
3558 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3559 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3560 case PGMMODE_EPT: return "EPT";
3561 case PGMMODE_NONE: return "None";
3562 default: return "unknown mode value";
3563 }
3564}
3565
3566
3567#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3568/**
3569 * Gets the SLAT mode name.
3570 *
3571 * @returns The read-only SLAT mode descriptive string.
3572 * @param enmSlatMode The SLAT mode value.
3573 */
3574VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
3575{
3576 switch (enmSlatMode)
3577 {
3578 case PGMSLAT_DIRECT: return "Direct";
3579 case PGMSLAT_EPT: return "EPT";
3580 case PGMSLAT_32BIT: return "32-bit";
3581 case PGMSLAT_PAE: return "PAE";
3582 case PGMSLAT_AMD64: return "AMD64";
3583 default: return "Unknown";
3584 }
3585}
3586#endif
3587
3588
3589/**
3590 * Gets the physical address represented in the guest CR3 as PGM sees it.
3591 *
3592 * This is mainly for logging and debugging.
3593 *
3594 * @returns PGM's guest CR3 value.
3595 * @param pVCpu The cross context virtual CPU structure.
3596 */
3597VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3598{
3599 return pVCpu->pgm.s.GCPhysCR3;
3600}
3601
3602
3603
3604/**
3605 * Notification from CPUM that the EFER.NXE bit has changed.
3606 *
3607 * @param pVCpu The cross context virtual CPU structure of the CPU for
3608 * which EFER changed.
3609 * @param fNxe The new NXE state.
3610 */
3611VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3612{
3613/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3614 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3615
3616 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3617 if (fNxe)
3618 {
3619 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3620 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3621 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3622 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3623 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3624 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3625 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3626 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3627 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3628 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3629 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3630
3631 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3632 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3633 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3634 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3635 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3636 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3637 }
3638 else
3639 {
3640 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3641 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3642 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3643 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3644 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3645 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3646 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3647 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3648 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3649 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3650 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3651
3652 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3653 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3654 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3655 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3656 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3657 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3658 }
3659}
3660
3661
3662/**
3663 * Check if any pgm pool pages are marked dirty (not monitored)
3664 *
3665 * @returns bool locked/not locked
3666 * @param pVM The cross context VM structure.
3667 */
3668VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3669{
3670 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3671}
3672
3673
3674/**
3675 * Check if this VCPU currently owns the PGM lock.
3676 *
3677 * @returns bool owner/not owner
3678 * @param pVM The cross context VM structure.
3679 */
3680VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
3681{
3682 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
3683}
3684
3685
3686/**
3687 * Enable or disable large page usage
3688 *
3689 * @returns VBox status code.
3690 * @param pVM The cross context VM structure.
3691 * @param fUseLargePages Use/not use large pages
3692 */
3693VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3694{
3695 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3696
3697 pVM->pgm.s.fUseLargePages = fUseLargePages;
3698 return VINF_SUCCESS;
3699}
3700
3701
3702/**
3703 * Acquire the PGM lock.
3704 *
3705 * @returns VBox status code
3706 * @param pVM The cross context VM structure.
3707 * @param fVoid Set if the caller cannot handle failure returns.
3708 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3709 */
3710#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
3711int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
3712#else
3713int pgmLock(PVMCC pVM, bool fVoid)
3714#endif
3715{
3716#if defined(VBOX_STRICT)
3717 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3718#else
3719 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
3720#endif
3721 if (RT_SUCCESS(rc))
3722 return rc;
3723 if (fVoid)
3724 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
3725 else
3726 AssertRC(rc);
3727 return rc;
3728}
3729
3730
3731/**
3732 * Release the PGM lock.
3733 *
3734 * @returns VBox status code
3735 * @param pVM The cross context VM structure.
3736 */
3737void pgmUnlock(PVMCC pVM)
3738{
3739 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3740 pVM->pgm.s.cDeprecatedPageLocks = 0;
3741 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
3742 if (rc == VINF_SEM_NESTED)
3743 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3744}
3745
3746
3747#if !defined(IN_R0) || defined(LOG_ENABLED)
3748
3749/** Format handler for PGMPAGE.
3750 * @copydoc FNRTSTRFORMATTYPE */
3751static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3752 const char *pszType, void const *pvValue,
3753 int cchWidth, int cchPrecision, unsigned fFlags,
3754 void *pvUser)
3755{
3756 size_t cch;
3757 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3758 if (RT_VALID_PTR(pPage))
3759 {
3760 char szTmp[64+80];
3761
3762 cch = 0;
3763
3764 /* The single char state stuff. */
3765 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3766 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3767
3768# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3769 if (IS_PART_INCLUDED(5))
3770 {
3771 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
3772 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
3773 }
3774
3775 /* The type. */
3776 if (IS_PART_INCLUDED(4))
3777 {
3778 szTmp[cch++] = ':';
3779 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3780 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3781 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3782 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3783 }
3784
3785 /* The numbers. */
3786 if (IS_PART_INCLUDED(3))
3787 {
3788 szTmp[cch++] = ':';
3789 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3790 }
3791
3792 if (IS_PART_INCLUDED(2))
3793 {
3794 szTmp[cch++] = ':';
3795 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3796 }
3797
3798 if (IS_PART_INCLUDED(6))
3799 {
3800 szTmp[cch++] = ':';
3801 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3802 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3803 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3804 }
3805# undef IS_PART_INCLUDED
3806
3807 cch = pfnOutput(pvArgOutput, szTmp, cch);
3808 }
3809 else
3810 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3811 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3812 return cch;
3813}
3814
3815
3816/** Format handler for PGMRAMRANGE.
3817 * @copydoc FNRTSTRFORMATTYPE */
3818static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3819 const char *pszType, void const *pvValue,
3820 int cchWidth, int cchPrecision, unsigned fFlags,
3821 void *pvUser)
3822{
3823 size_t cch;
3824 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3825 if (RT_VALID_PTR(pRam))
3826 {
3827 char szTmp[80];
3828 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3829 cch = pfnOutput(pvArgOutput, szTmp, cch);
3830 }
3831 else
3832 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3833 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3834 return cch;
3835}
3836
3837/** Format type andlers to be registered/deregistered. */
3838static const struct
3839{
3840 char szType[24];
3841 PFNRTSTRFORMATTYPE pfnHandler;
3842} g_aPgmFormatTypes[] =
3843{
3844 { "pgmpage", pgmFormatTypeHandlerPage },
3845 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3846};
3847
3848#endif /* !IN_R0 || LOG_ENABLED */
3849
3850/**
3851 * Registers the global string format types.
3852 *
3853 * This should be called at module load time or in some other manner that ensure
3854 * that it's called exactly one time.
3855 *
3856 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3857 */
3858VMMDECL(int) PGMRegisterStringFormatTypes(void)
3859{
3860#if !defined(IN_R0) || defined(LOG_ENABLED)
3861 int rc = VINF_SUCCESS;
3862 unsigned i;
3863 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3864 {
3865 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3866# ifdef IN_RING0
3867 if (rc == VERR_ALREADY_EXISTS)
3868 {
3869 /* in case of cleanup failure in ring-0 */
3870 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3871 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3872 }
3873# endif
3874 }
3875 if (RT_FAILURE(rc))
3876 while (i-- > 0)
3877 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3878
3879 return rc;
3880#else
3881 return VINF_SUCCESS;
3882#endif
3883}
3884
3885
3886/**
3887 * Deregisters the global string format types.
3888 *
3889 * This should be called at module unload time or in some other manner that
3890 * ensure that it's called exactly one time.
3891 */
3892VMMDECL(void) PGMDeregisterStringFormatTypes(void)
3893{
3894#if !defined(IN_R0) || defined(LOG_ENABLED)
3895 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3896 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3897#endif
3898}
3899
3900
3901#ifdef VBOX_STRICT
3902/**
3903 * Asserts that everything related to the guest CR3 is correctly shadowed.
3904 *
3905 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3906 * and assert the correctness of the guest CR3 mapping before asserting that the
3907 * shadow page tables is in sync with the guest page tables.
3908 *
3909 * @returns Number of conflicts.
3910 * @param pVM The cross context VM structure.
3911 * @param pVCpu The cross context virtual CPU structure.
3912 * @param cr3 The current guest CR3 register value.
3913 * @param cr4 The current guest CR4 register value.
3914 */
3915VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
3916{
3917 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3918
3919 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
3920 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
3921 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
3922
3923 PGM_LOCK_VOID(pVM);
3924 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3925 PGM_UNLOCK(pVM);
3926
3927 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3928 return cErrors;
3929}
3930#endif /* VBOX_STRICT */
3931
3932
3933/**
3934 * Updates PGM's copy of the guest's EPT pointer.
3935 *
3936 * @param pVCpu The cross context virtual CPU structure.
3937 * @param uEptPtr The EPT pointer.
3938 *
3939 * @remarks This can be called as part of VM-entry so we might be in the midst of
3940 * switching to VMX non-root mode.
3941 */
3942VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
3943{
3944 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3945 PGM_LOCK_VOID(pVM);
3946 pVCpu->pgm.s.uEptPtr = uEptPtr;
3947 PGM_UNLOCK(pVM);
3948}
3949
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette