VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 95085

Last change on this file since 95085 was 94800, checked in by vboxsync, 3 years ago

VMM/IEM,PGM: TLB work, esp. on the data one. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 141.8 KB
Line 
1/* $Id: PGMAll.cpp 94800 2022-05-03 21:49:43Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/sup.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/stam.h>
32#include <VBox/vmm/trpm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hm.h>
35#include <VBox/vmm/hm_vmx.h>
36#include "PGMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include "PGMInline.h"
39#include <iprt/assert.h>
40#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
41# include <iprt/asm-amd64-x86.h>
42#endif
43#include <iprt/string.h>
44#include <VBox/log.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47
48
49/*********************************************************************************************************************************
50* Internal Functions *
51*********************************************************************************************************************************/
52DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
53DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
54DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3);
55#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
56static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk,
57 PPGMPTWALKGST pGstWalk);
58static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
59static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3);
60#endif
61static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
62static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
63
64
65#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
66/* Guest - EPT SLAT is identical for all guest paging mode. */
67# define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT
68# define PGM_GST_TYPE PGM_TYPE_EPT
69# include "PGMGstDefs.h"
70# include "PGMAllGstSlatEpt.cpp.h"
71# undef PGM_GST_TYPE
72#endif
73
74
75/*
76 * Shadow - 32-bit mode
77 */
78#define PGM_SHW_TYPE PGM_TYPE_32BIT
79#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
80#include "PGMAllShw.h"
81
82/* Guest - real mode */
83#define PGM_GST_TYPE PGM_TYPE_REAL
84#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
85#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
86#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
87#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
88#include "PGMGstDefs.h"
89#include "PGMAllGst.h"
90#include "PGMAllBth.h"
91#undef BTH_PGMPOOLKIND_PT_FOR_PT
92#undef BTH_PGMPOOLKIND_ROOT
93#undef PGM_BTH_NAME
94#undef PGM_GST_TYPE
95#undef PGM_GST_NAME
96
97/* Guest - protected mode */
98#define PGM_GST_TYPE PGM_TYPE_PROT
99#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
100#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
101#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
102#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
103#include "PGMGstDefs.h"
104#include "PGMAllGst.h"
105#include "PGMAllBth.h"
106#undef BTH_PGMPOOLKIND_PT_FOR_PT
107#undef BTH_PGMPOOLKIND_ROOT
108#undef PGM_BTH_NAME
109#undef PGM_GST_TYPE
110#undef PGM_GST_NAME
111
112/* Guest - 32-bit mode */
113#define PGM_GST_TYPE PGM_TYPE_32BIT
114#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
115#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
116#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
117#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
118#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
119#include "PGMGstDefs.h"
120#include "PGMAllGst.h"
121#include "PGMAllBth.h"
122#undef BTH_PGMPOOLKIND_PT_FOR_BIG
123#undef BTH_PGMPOOLKIND_PT_FOR_PT
124#undef BTH_PGMPOOLKIND_ROOT
125#undef PGM_BTH_NAME
126#undef PGM_GST_TYPE
127#undef PGM_GST_NAME
128
129#undef PGM_SHW_TYPE
130#undef PGM_SHW_NAME
131
132
133/*
134 * Shadow - PAE mode
135 */
136#define PGM_SHW_TYPE PGM_TYPE_PAE
137#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
138#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
139#include "PGMAllShw.h"
140
141/* Guest - real mode */
142#define PGM_GST_TYPE PGM_TYPE_REAL
143#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
144#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
145#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
146#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
147#include "PGMGstDefs.h"
148#include "PGMAllBth.h"
149#undef BTH_PGMPOOLKIND_PT_FOR_PT
150#undef BTH_PGMPOOLKIND_ROOT
151#undef PGM_BTH_NAME
152#undef PGM_GST_TYPE
153#undef PGM_GST_NAME
154
155/* Guest - protected mode */
156#define PGM_GST_TYPE PGM_TYPE_PROT
157#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
158#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
159#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
160#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
161#include "PGMGstDefs.h"
162#include "PGMAllBth.h"
163#undef BTH_PGMPOOLKIND_PT_FOR_PT
164#undef BTH_PGMPOOLKIND_ROOT
165#undef PGM_BTH_NAME
166#undef PGM_GST_TYPE
167#undef PGM_GST_NAME
168
169/* Guest - 32-bit mode */
170#define PGM_GST_TYPE PGM_TYPE_32BIT
171#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
172#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
173#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
174#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
175#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
176#include "PGMGstDefs.h"
177#include "PGMAllBth.h"
178#undef BTH_PGMPOOLKIND_PT_FOR_BIG
179#undef BTH_PGMPOOLKIND_PT_FOR_PT
180#undef BTH_PGMPOOLKIND_ROOT
181#undef PGM_BTH_NAME
182#undef PGM_GST_TYPE
183#undef PGM_GST_NAME
184
185
186/* Guest - PAE mode */
187#define PGM_GST_TYPE PGM_TYPE_PAE
188#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
189#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
190#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
191#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
192#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
193#include "PGMGstDefs.h"
194#include "PGMAllGst.h"
195#include "PGMAllBth.h"
196#undef BTH_PGMPOOLKIND_PT_FOR_BIG
197#undef BTH_PGMPOOLKIND_PT_FOR_PT
198#undef BTH_PGMPOOLKIND_ROOT
199#undef PGM_BTH_NAME
200#undef PGM_GST_TYPE
201#undef PGM_GST_NAME
202
203#undef PGM_SHW_TYPE
204#undef PGM_SHW_NAME
205
206
207/*
208 * Shadow - AMD64 mode
209 */
210#define PGM_SHW_TYPE PGM_TYPE_AMD64
211#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
212#include "PGMAllShw.h"
213
214/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
215/** @todo retire this hack. */
216#define PGM_GST_TYPE PGM_TYPE_PROT
217#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
218#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
219#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
220#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
221#include "PGMGstDefs.h"
222#include "PGMAllBth.h"
223#undef BTH_PGMPOOLKIND_PT_FOR_PT
224#undef BTH_PGMPOOLKIND_ROOT
225#undef PGM_BTH_NAME
226#undef PGM_GST_TYPE
227#undef PGM_GST_NAME
228
229#ifdef VBOX_WITH_64_BITS_GUESTS
230/* Guest - AMD64 mode */
231# define PGM_GST_TYPE PGM_TYPE_AMD64
232# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
233# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
234# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
235# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
236# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
237# include "PGMGstDefs.h"
238# include "PGMAllGst.h"
239# include "PGMAllBth.h"
240# undef BTH_PGMPOOLKIND_PT_FOR_BIG
241# undef BTH_PGMPOOLKIND_PT_FOR_PT
242# undef BTH_PGMPOOLKIND_ROOT
243# undef PGM_BTH_NAME
244# undef PGM_GST_TYPE
245# undef PGM_GST_NAME
246#endif /* VBOX_WITH_64_BITS_GUESTS */
247
248#undef PGM_SHW_TYPE
249#undef PGM_SHW_NAME
250
251
252/*
253 * Shadow - 32-bit nested paging mode.
254 */
255#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
256#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
257#include "PGMAllShw.h"
258
259/* Guest - real mode */
260#define PGM_GST_TYPE PGM_TYPE_REAL
261#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
262#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
263#include "PGMGstDefs.h"
264#include "PGMAllBth.h"
265#undef PGM_BTH_NAME
266#undef PGM_GST_TYPE
267#undef PGM_GST_NAME
268
269/* Guest - protected mode */
270#define PGM_GST_TYPE PGM_TYPE_PROT
271#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
272#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
273#include "PGMGstDefs.h"
274#include "PGMAllBth.h"
275#undef PGM_BTH_NAME
276#undef PGM_GST_TYPE
277#undef PGM_GST_NAME
278
279/* Guest - 32-bit mode */
280#define PGM_GST_TYPE PGM_TYPE_32BIT
281#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
282#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
283#include "PGMGstDefs.h"
284#include "PGMAllBth.h"
285#undef PGM_BTH_NAME
286#undef PGM_GST_TYPE
287#undef PGM_GST_NAME
288
289/* Guest - PAE mode */
290#define PGM_GST_TYPE PGM_TYPE_PAE
291#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
292#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
293#include "PGMGstDefs.h"
294#include "PGMAllBth.h"
295#undef PGM_BTH_NAME
296#undef PGM_GST_TYPE
297#undef PGM_GST_NAME
298
299#ifdef VBOX_WITH_64_BITS_GUESTS
300/* Guest - AMD64 mode */
301# define PGM_GST_TYPE PGM_TYPE_AMD64
302# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
303# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
304# include "PGMGstDefs.h"
305# include "PGMAllBth.h"
306# undef PGM_BTH_NAME
307# undef PGM_GST_TYPE
308# undef PGM_GST_NAME
309#endif /* VBOX_WITH_64_BITS_GUESTS */
310
311#undef PGM_SHW_TYPE
312#undef PGM_SHW_NAME
313
314
315/*
316 * Shadow - PAE nested paging mode.
317 */
318#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
319#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
320#include "PGMAllShw.h"
321
322/* Guest - real mode */
323#define PGM_GST_TYPE PGM_TYPE_REAL
324#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
325#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
326#include "PGMGstDefs.h"
327#include "PGMAllBth.h"
328#undef PGM_BTH_NAME
329#undef PGM_GST_TYPE
330#undef PGM_GST_NAME
331
332/* Guest - protected mode */
333#define PGM_GST_TYPE PGM_TYPE_PROT
334#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
335#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
336#include "PGMGstDefs.h"
337#include "PGMAllBth.h"
338#undef PGM_BTH_NAME
339#undef PGM_GST_TYPE
340#undef PGM_GST_NAME
341
342/* Guest - 32-bit mode */
343#define PGM_GST_TYPE PGM_TYPE_32BIT
344#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
345#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
346#include "PGMGstDefs.h"
347#include "PGMAllBth.h"
348#undef PGM_BTH_NAME
349#undef PGM_GST_TYPE
350#undef PGM_GST_NAME
351
352/* Guest - PAE mode */
353#define PGM_GST_TYPE PGM_TYPE_PAE
354#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
355#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
356#include "PGMGstDefs.h"
357#include "PGMAllBth.h"
358#undef PGM_BTH_NAME
359#undef PGM_GST_TYPE
360#undef PGM_GST_NAME
361
362#ifdef VBOX_WITH_64_BITS_GUESTS
363/* Guest - AMD64 mode */
364# define PGM_GST_TYPE PGM_TYPE_AMD64
365# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
366# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
367# include "PGMGstDefs.h"
368# include "PGMAllBth.h"
369# undef PGM_BTH_NAME
370# undef PGM_GST_TYPE
371# undef PGM_GST_NAME
372#endif /* VBOX_WITH_64_BITS_GUESTS */
373
374#undef PGM_SHW_TYPE
375#undef PGM_SHW_NAME
376
377
378/*
379 * Shadow - AMD64 nested paging mode.
380 */
381#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
382#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
383#include "PGMAllShw.h"
384
385/* Guest - real mode */
386#define PGM_GST_TYPE PGM_TYPE_REAL
387#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
388#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
389#include "PGMGstDefs.h"
390#include "PGMAllBth.h"
391#undef PGM_BTH_NAME
392#undef PGM_GST_TYPE
393#undef PGM_GST_NAME
394
395/* Guest - protected mode */
396#define PGM_GST_TYPE PGM_TYPE_PROT
397#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
398#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
399#include "PGMGstDefs.h"
400#include "PGMAllBth.h"
401#undef PGM_BTH_NAME
402#undef PGM_GST_TYPE
403#undef PGM_GST_NAME
404
405/* Guest - 32-bit mode */
406#define PGM_GST_TYPE PGM_TYPE_32BIT
407#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
408#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
409#include "PGMGstDefs.h"
410#include "PGMAllBth.h"
411#undef PGM_BTH_NAME
412#undef PGM_GST_TYPE
413#undef PGM_GST_NAME
414
415/* Guest - PAE mode */
416#define PGM_GST_TYPE PGM_TYPE_PAE
417#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
418#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
419#include "PGMGstDefs.h"
420#include "PGMAllBth.h"
421#undef PGM_BTH_NAME
422#undef PGM_GST_TYPE
423#undef PGM_GST_NAME
424
425#ifdef VBOX_WITH_64_BITS_GUESTS
426/* Guest - AMD64 mode */
427# define PGM_GST_TYPE PGM_TYPE_AMD64
428# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
429# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
430# include "PGMGstDefs.h"
431# include "PGMAllBth.h"
432# undef PGM_BTH_NAME
433# undef PGM_GST_TYPE
434# undef PGM_GST_NAME
435#endif /* VBOX_WITH_64_BITS_GUESTS */
436
437#undef PGM_SHW_TYPE
438#undef PGM_SHW_NAME
439
440
441/*
442 * Shadow - EPT.
443 */
444#define PGM_SHW_TYPE PGM_TYPE_EPT
445#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
446#include "PGMAllShw.h"
447
448/* Guest - real mode */
449#define PGM_GST_TYPE PGM_TYPE_REAL
450#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
451#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
452#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
453#include "PGMGstDefs.h"
454#include "PGMAllBth.h"
455#undef BTH_PGMPOOLKIND_PT_FOR_PT
456#undef PGM_BTH_NAME
457#undef PGM_GST_TYPE
458#undef PGM_GST_NAME
459
460/* Guest - protected mode */
461#define PGM_GST_TYPE PGM_TYPE_PROT
462#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
463#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
464#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
465#include "PGMGstDefs.h"
466#include "PGMAllBth.h"
467#undef BTH_PGMPOOLKIND_PT_FOR_PT
468#undef PGM_BTH_NAME
469#undef PGM_GST_TYPE
470#undef PGM_GST_NAME
471
472/* Guest - 32-bit mode */
473#define PGM_GST_TYPE PGM_TYPE_32BIT
474#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
475#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
476#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
477#include "PGMGstDefs.h"
478#include "PGMAllBth.h"
479#undef BTH_PGMPOOLKIND_PT_FOR_PT
480#undef PGM_BTH_NAME
481#undef PGM_GST_TYPE
482#undef PGM_GST_NAME
483
484/* Guest - PAE mode */
485#define PGM_GST_TYPE PGM_TYPE_PAE
486#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
487#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
488#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
489#include "PGMGstDefs.h"
490#include "PGMAllBth.h"
491#undef BTH_PGMPOOLKIND_PT_FOR_PT
492#undef PGM_BTH_NAME
493#undef PGM_GST_TYPE
494#undef PGM_GST_NAME
495
496#ifdef VBOX_WITH_64_BITS_GUESTS
497/* Guest - AMD64 mode */
498# define PGM_GST_TYPE PGM_TYPE_AMD64
499# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
500# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
501# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
502# include "PGMGstDefs.h"
503# include "PGMAllBth.h"
504# undef BTH_PGMPOOLKIND_PT_FOR_PT
505# undef PGM_BTH_NAME
506# undef PGM_GST_TYPE
507# undef PGM_GST_NAME
508#endif /* VBOX_WITH_64_BITS_GUESTS */
509
510#undef PGM_SHW_TYPE
511#undef PGM_SHW_NAME
512
513
514/*
515 * Shadow - NEM / None.
516 */
517#define PGM_SHW_TYPE PGM_TYPE_NONE
518#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
519#include "PGMAllShw.h"
520
521/* Guest - real mode */
522#define PGM_GST_TYPE PGM_TYPE_REAL
523#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
524#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
525#include "PGMGstDefs.h"
526#include "PGMAllBth.h"
527#undef PGM_BTH_NAME
528#undef PGM_GST_TYPE
529#undef PGM_GST_NAME
530
531/* Guest - protected mode */
532#define PGM_GST_TYPE PGM_TYPE_PROT
533#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
534#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
535#include "PGMGstDefs.h"
536#include "PGMAllBth.h"
537#undef PGM_BTH_NAME
538#undef PGM_GST_TYPE
539#undef PGM_GST_NAME
540
541/* Guest - 32-bit mode */
542#define PGM_GST_TYPE PGM_TYPE_32BIT
543#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
544#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
545#include "PGMGstDefs.h"
546#include "PGMAllBth.h"
547#undef PGM_BTH_NAME
548#undef PGM_GST_TYPE
549#undef PGM_GST_NAME
550
551/* Guest - PAE mode */
552#define PGM_GST_TYPE PGM_TYPE_PAE
553#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
554#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
555#include "PGMGstDefs.h"
556#include "PGMAllBth.h"
557#undef PGM_BTH_NAME
558#undef PGM_GST_TYPE
559#undef PGM_GST_NAME
560
561#ifdef VBOX_WITH_64_BITS_GUESTS
562/* Guest - AMD64 mode */
563# define PGM_GST_TYPE PGM_TYPE_AMD64
564# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
565# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
566# include "PGMGstDefs.h"
567# include "PGMAllBth.h"
568# undef PGM_BTH_NAME
569# undef PGM_GST_TYPE
570# undef PGM_GST_NAME
571#endif /* VBOX_WITH_64_BITS_GUESTS */
572
573#undef PGM_SHW_TYPE
574#undef PGM_SHW_NAME
575
576
577
578/**
579 * Guest mode data array.
580 */
581PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
582{
583 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* 0 */
584 {
585 PGM_TYPE_REAL,
586 PGM_GST_NAME_REAL(GetPage),
587 PGM_GST_NAME_REAL(ModifyPage),
588 PGM_GST_NAME_REAL(Enter),
589 PGM_GST_NAME_REAL(Exit),
590#ifdef IN_RING3
591 PGM_GST_NAME_REAL(Relocate),
592#endif
593 },
594 {
595 PGM_TYPE_PROT,
596 PGM_GST_NAME_PROT(GetPage),
597 PGM_GST_NAME_PROT(ModifyPage),
598 PGM_GST_NAME_PROT(Enter),
599 PGM_GST_NAME_PROT(Exit),
600#ifdef IN_RING3
601 PGM_GST_NAME_PROT(Relocate),
602#endif
603 },
604 {
605 PGM_TYPE_32BIT,
606 PGM_GST_NAME_32BIT(GetPage),
607 PGM_GST_NAME_32BIT(ModifyPage),
608 PGM_GST_NAME_32BIT(Enter),
609 PGM_GST_NAME_32BIT(Exit),
610#ifdef IN_RING3
611 PGM_GST_NAME_32BIT(Relocate),
612#endif
613 },
614 {
615 PGM_TYPE_PAE,
616 PGM_GST_NAME_PAE(GetPage),
617 PGM_GST_NAME_PAE(ModifyPage),
618 PGM_GST_NAME_PAE(Enter),
619 PGM_GST_NAME_PAE(Exit),
620#ifdef IN_RING3
621 PGM_GST_NAME_PAE(Relocate),
622#endif
623 },
624#ifdef VBOX_WITH_64_BITS_GUESTS
625 {
626 PGM_TYPE_AMD64,
627 PGM_GST_NAME_AMD64(GetPage),
628 PGM_GST_NAME_AMD64(ModifyPage),
629 PGM_GST_NAME_AMD64(Enter),
630 PGM_GST_NAME_AMD64(Exit),
631# ifdef IN_RING3
632 PGM_GST_NAME_AMD64(Relocate),
633# endif
634 },
635#endif
636};
637
638
639/**
640 * The shadow mode data array.
641 */
642PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
643{
644 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
645 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
646 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
647 {
648 PGM_TYPE_32BIT,
649 PGM_SHW_NAME_32BIT(GetPage),
650 PGM_SHW_NAME_32BIT(ModifyPage),
651 PGM_SHW_NAME_32BIT(Enter),
652 PGM_SHW_NAME_32BIT(Exit),
653#ifdef IN_RING3
654 PGM_SHW_NAME_32BIT(Relocate),
655#endif
656 },
657 {
658 PGM_TYPE_PAE,
659 PGM_SHW_NAME_PAE(GetPage),
660 PGM_SHW_NAME_PAE(ModifyPage),
661 PGM_SHW_NAME_PAE(Enter),
662 PGM_SHW_NAME_PAE(Exit),
663#ifdef IN_RING3
664 PGM_SHW_NAME_PAE(Relocate),
665#endif
666 },
667 {
668 PGM_TYPE_AMD64,
669 PGM_SHW_NAME_AMD64(GetPage),
670 PGM_SHW_NAME_AMD64(ModifyPage),
671 PGM_SHW_NAME_AMD64(Enter),
672 PGM_SHW_NAME_AMD64(Exit),
673#ifdef IN_RING3
674 PGM_SHW_NAME_AMD64(Relocate),
675#endif
676 },
677 {
678 PGM_TYPE_NESTED_32BIT,
679 PGM_SHW_NAME_NESTED_32BIT(GetPage),
680 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
681 PGM_SHW_NAME_NESTED_32BIT(Enter),
682 PGM_SHW_NAME_NESTED_32BIT(Exit),
683#ifdef IN_RING3
684 PGM_SHW_NAME_NESTED_32BIT(Relocate),
685#endif
686 },
687 {
688 PGM_TYPE_NESTED_PAE,
689 PGM_SHW_NAME_NESTED_PAE(GetPage),
690 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
691 PGM_SHW_NAME_NESTED_PAE(Enter),
692 PGM_SHW_NAME_NESTED_PAE(Exit),
693#ifdef IN_RING3
694 PGM_SHW_NAME_NESTED_PAE(Relocate),
695#endif
696 },
697 {
698 PGM_TYPE_NESTED_AMD64,
699 PGM_SHW_NAME_NESTED_AMD64(GetPage),
700 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
701 PGM_SHW_NAME_NESTED_AMD64(Enter),
702 PGM_SHW_NAME_NESTED_AMD64(Exit),
703#ifdef IN_RING3
704 PGM_SHW_NAME_NESTED_AMD64(Relocate),
705#endif
706 },
707 {
708 PGM_TYPE_EPT,
709 PGM_SHW_NAME_EPT(GetPage),
710 PGM_SHW_NAME_EPT(ModifyPage),
711 PGM_SHW_NAME_EPT(Enter),
712 PGM_SHW_NAME_EPT(Exit),
713#ifdef IN_RING3
714 PGM_SHW_NAME_EPT(Relocate),
715#endif
716 },
717 {
718 PGM_TYPE_NONE,
719 PGM_SHW_NAME_NONE(GetPage),
720 PGM_SHW_NAME_NONE(ModifyPage),
721 PGM_SHW_NAME_NONE(Enter),
722 PGM_SHW_NAME_NONE(Exit),
723#ifdef IN_RING3
724 PGM_SHW_NAME_NONE(Relocate),
725#endif
726 },
727};
728
729
730/**
731 * The guest+shadow mode data array.
732 */
733PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
734{
735#if !defined(IN_RING3) && !defined(VBOX_STRICT)
736# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
737# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
738 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler) }
739
740#elif !defined(IN_RING3) && defined(VBOX_STRICT)
741# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
742# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
743 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler), Nm(AssertCR3) }
744
745#elif defined(IN_RING3) && !defined(VBOX_STRICT)
746# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
747# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
748 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
749
750#elif defined(IN_RING3) && defined(VBOX_STRICT)
751# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
752# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
753 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
754
755#else
756# error "Misconfig."
757#endif
758
759 /* 32-bit shadow paging mode: */
760 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
761 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
762 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
763 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
764 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
765 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
766 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
767 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
768 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
769 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
770 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
771
772 /* PAE shadow paging mode: */
773 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
774 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
775 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
776 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
777 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
778 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
779 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
780 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
781 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
782 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
783 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
784
785 /* AMD64 shadow paging mode: */
786 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
787 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
788 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
789 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
790 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
791#ifdef VBOX_WITH_64_BITS_GUESTS
792 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
793#else
794 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
795#endif
796 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
797 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
798 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
799 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
800 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
801
802 /* 32-bit nested paging mode: */
803 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
804 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
805 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
806 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
807 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
808#ifdef VBOX_WITH_64_BITS_GUESTS
809 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
810#else
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
812#endif
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
814 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
815 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
816 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
817 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
818
819 /* PAE nested paging mode: */
820 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
821 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
822 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
823 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
824 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
825#ifdef VBOX_WITH_64_BITS_GUESTS
826 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
827#else
828 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
829#endif
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
831 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
832 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
833 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
834 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
835
836 /* AMD64 nested paging mode: */
837 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
838 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
839 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
840 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
841 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
842#ifdef VBOX_WITH_64_BITS_GUESTS
843 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
844#else
845 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
846#endif
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
848 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
849 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
850 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
851 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
852
853 /* EPT nested paging mode: */
854 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
855 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
856 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
857 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
858 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
859#ifdef VBOX_WITH_64_BITS_GUESTS
860 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
861#else
862 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
863#endif
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
865 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
866 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
867 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
868 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
869
870 /* NONE / NEM: */
871 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
872 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
873 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
874 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
875 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
876#ifdef VBOX_WITH_64_BITS_GUESTS
877 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
878#else
879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
880#endif
881 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
882 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
883 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
884 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
885 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
886
887
888#undef PGMMODEDATABTH_ENTRY
889#undef PGMMODEDATABTH_NULL_ENTRY
890};
891
892
893#ifdef IN_RING0
894/**
895 * #PF Handler.
896 *
897 * @returns VBox status code (appropriate for trap handling and GC return).
898 * @param pVCpu The cross context virtual CPU structure.
899 * @param uErr The trap error code.
900 * @param pRegFrame Trap register frame.
901 * @param pvFault The fault address.
902 */
903VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
904{
905 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
906
907 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
908 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
909 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
910
911
912# ifdef VBOX_WITH_STATISTICS
913 /*
914 * Error code stats.
915 */
916 if (uErr & X86_TRAP_PF_US)
917 {
918 if (!(uErr & X86_TRAP_PF_P))
919 {
920 if (uErr & X86_TRAP_PF_RW)
921 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
922 else
923 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
924 }
925 else if (uErr & X86_TRAP_PF_RW)
926 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
927 else if (uErr & X86_TRAP_PF_RSVD)
928 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
929 else if (uErr & X86_TRAP_PF_ID)
930 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
931 else
932 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
933 }
934 else
935 { /* Supervisor */
936 if (!(uErr & X86_TRAP_PF_P))
937 {
938 if (uErr & X86_TRAP_PF_RW)
939 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
940 else
941 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
942 }
943 else if (uErr & X86_TRAP_PF_RW)
944 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
945 else if (uErr & X86_TRAP_PF_ID)
946 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
947 else if (uErr & X86_TRAP_PF_RSVD)
948 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
949 }
950# endif /* VBOX_WITH_STATISTICS */
951
952 /*
953 * Call the worker.
954 */
955 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
956 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
957 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
958 bool fLockTaken = false;
959 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
960 if (fLockTaken)
961 {
962 PGM_LOCK_ASSERT_OWNER(pVM);
963 PGM_UNLOCK(pVM);
964 }
965 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
966
967 /*
968 * Return code tweaks.
969 */
970 if (rc != VINF_SUCCESS)
971 {
972 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
973 rc = VINF_SUCCESS;
974
975 /* Note: hack alert for difficult to reproduce problem. */
976 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
977 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
978 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
979 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
980 {
981 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
982 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
983 rc = VINF_SUCCESS;
984 }
985 }
986
987 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
988 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
989 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
990 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
991 return rc;
992}
993#endif /* IN_RING0 */
994
995
996/**
997 * Prefetch a page
998 *
999 * Typically used to sync commonly used pages before entering raw mode
1000 * after a CR3 reload.
1001 *
1002 * @returns VBox status code suitable for scheduling.
1003 * @retval VINF_SUCCESS on success.
1004 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
1005 * @param pVCpu The cross context virtual CPU structure.
1006 * @param GCPtrPage Page to invalidate.
1007 */
1008VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1009{
1010 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1011
1012 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1013 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1014 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1015 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1016
1017 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1018 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1019 return rc;
1020}
1021
1022
1023/**
1024 * Emulation of the invlpg instruction (HC only actually).
1025 *
1026 * @returns Strict VBox status code, special care required.
1027 * @retval VINF_PGM_SYNC_CR3 - handled.
1028 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1029 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1030 *
1031 * @param pVCpu The cross context virtual CPU structure.
1032 * @param GCPtrPage Page to invalidate.
1033 *
1034 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1035 * safe, but there could be edge cases!
1036 *
1037 * @todo Flush page or page directory only if necessary!
1038 * @todo VBOXSTRICTRC
1039 */
1040VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1041{
1042 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1043 int rc;
1044 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1045
1046 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1047
1048 /*
1049 * Call paging mode specific worker.
1050 */
1051 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1052 PGM_LOCK_VOID(pVM);
1053
1054 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1055 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1056 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1057 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1058
1059 PGM_UNLOCK(pVM);
1060 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1061
1062 /* Ignore all irrelevant error codes. */
1063 if ( rc == VERR_PAGE_NOT_PRESENT
1064 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1065 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1066 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1067 rc = VINF_SUCCESS;
1068
1069 return rc;
1070}
1071
1072
1073/**
1074 * Executes an instruction using the interpreter.
1075 *
1076 * @returns VBox status code (appropriate for trap handling and GC return).
1077 * @param pVM The cross context VM structure.
1078 * @param pVCpu The cross context virtual CPU structure.
1079 * @param pRegFrame Register frame.
1080 * @param pvFault Fault address.
1081 */
1082VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1083{
1084 NOREF(pVM);
1085 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1086 if (rc == VERR_EM_INTERPRETER)
1087 rc = VINF_EM_RAW_EMULATE_INSTR;
1088 if (rc != VINF_SUCCESS)
1089 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1090 return rc;
1091}
1092
1093
1094/**
1095 * Gets effective page information (from the VMM page directory).
1096 *
1097 * @returns VBox status code.
1098 * @param pVCpu The cross context virtual CPU structure.
1099 * @param GCPtr Guest Context virtual address of the page.
1100 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1101 * @param pHCPhys Where to store the HC physical address of the page.
1102 * This is page aligned.
1103 * @remark You should use PGMMapGetPage() for pages in a mapping.
1104 */
1105VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1106{
1107 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1108 PGM_LOCK_VOID(pVM);
1109
1110 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1111 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1112 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1113 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1114
1115 PGM_UNLOCK(pVM);
1116 return rc;
1117}
1118
1119
1120/**
1121 * Modify page flags for a range of pages in the shadow context.
1122 *
1123 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1124 *
1125 * @returns VBox status code.
1126 * @param pVCpu The cross context virtual CPU structure.
1127 * @param GCPtr Virtual address of the first page in the range.
1128 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1129 * @param fMask The AND mask - page flags X86_PTE_*.
1130 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1131 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1132 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1133 */
1134DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1135{
1136 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1137 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1138
1139 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK; /** @todo this ain't necessary, right... */
1140
1141 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1142 PGM_LOCK_VOID(pVM);
1143
1144 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1145 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1146 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1147 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, GUEST_PAGE_SIZE, fFlags, fMask, fOpFlags);
1148
1149 PGM_UNLOCK(pVM);
1150 return rc;
1151}
1152
1153
1154/**
1155 * Changing the page flags for a single page in the shadow page tables so as to
1156 * make it read-only.
1157 *
1158 * @returns VBox status code.
1159 * @param pVCpu The cross context virtual CPU structure.
1160 * @param GCPtr Virtual address of the first page in the range.
1161 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1162 */
1163VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1164{
1165 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1166}
1167
1168
1169/**
1170 * Changing the page flags for a single page in the shadow page tables so as to
1171 * make it writable.
1172 *
1173 * The call must know with 101% certainty that the guest page tables maps this
1174 * as writable too. This function will deal shared, zero and write monitored
1175 * pages.
1176 *
1177 * @returns VBox status code.
1178 * @param pVCpu The cross context virtual CPU structure.
1179 * @param GCPtr Virtual address of the first page in the range.
1180 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1181 */
1182VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1183{
1184 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1185 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1186 return VINF_SUCCESS;
1187}
1188
1189
1190/**
1191 * Changing the page flags for a single page in the shadow page tables so as to
1192 * make it not present.
1193 *
1194 * @returns VBox status code.
1195 * @param pVCpu The cross context virtual CPU structure.
1196 * @param GCPtr Virtual address of the first page in the range.
1197 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1198 */
1199VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1200{
1201 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1202}
1203
1204
1205/**
1206 * Changing the page flags for a single page in the shadow page tables so as to
1207 * make it supervisor and writable.
1208 *
1209 * This if for dealing with CR0.WP=0 and readonly user pages.
1210 *
1211 * @returns VBox status code.
1212 * @param pVCpu The cross context virtual CPU structure.
1213 * @param GCPtr Virtual address of the first page in the range.
1214 * @param fBigPage Whether or not this is a big page. If it is, we have to
1215 * change the shadow PDE as well. If it isn't, the caller
1216 * has checked that the shadow PDE doesn't need changing.
1217 * We ASSUME 4KB pages backing the big page here!
1218 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1219 */
1220int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1221{
1222 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1223 if (rc == VINF_SUCCESS && fBigPage)
1224 {
1225 /* this is a bit ugly... */
1226 switch (pVCpu->pgm.s.enmShadowMode)
1227 {
1228 case PGMMODE_32_BIT:
1229 {
1230 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1231 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1232 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1233 pPde->u |= X86_PDE_RW;
1234 Log(("-> PDE=%#llx (32)\n", pPde->u));
1235 break;
1236 }
1237 case PGMMODE_PAE:
1238 case PGMMODE_PAE_NX:
1239 {
1240 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1241 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1242 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1243 pPde->u |= X86_PDE_RW;
1244 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1245 break;
1246 }
1247 default:
1248 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1249 }
1250 }
1251 return rc;
1252}
1253
1254
1255/**
1256 * Gets the shadow page directory for the specified address, PAE.
1257 *
1258 * @returns Pointer to the shadow PD.
1259 * @param pVCpu The cross context virtual CPU structure.
1260 * @param GCPtr The address.
1261 * @param uGstPdpe Guest PDPT entry. Valid.
1262 * @param ppPD Receives address of page directory
1263 */
1264int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1265{
1266 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1267 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1268 PPGMPOOLPAGE pShwPage;
1269 int rc;
1270 PGM_LOCK_ASSERT_OWNER(pVM);
1271
1272
1273 /* Allocate page directory if not present. */
1274 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1275 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1276 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1277 X86PGPAEUINT const uPdpe = pPdpe->u;
1278 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1279 {
1280 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1281 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1282 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1283
1284 pgmPoolCacheUsed(pPool, pShwPage);
1285
1286 /* Update the entry if necessary. */
1287 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1288 if (uPdpeNew == uPdpe)
1289 { /* likely */ }
1290 else
1291 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1292 }
1293 else
1294 {
1295 RTGCPTR64 GCPdPt;
1296 PGMPOOLKIND enmKind;
1297 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1298 {
1299 /* AMD-V nested paging or real/protected mode without paging. */
1300 GCPdPt = GCPtr & ~(RT_BIT_64(X86_PDPT_SHIFT) - 1);
1301 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1302 }
1303 else if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1304 {
1305 if (uGstPdpe & X86_PDPE_P)
1306 {
1307 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1308 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1309 }
1310 else
1311 {
1312 /* PD not present; guest must reload CR3 to change it.
1313 * No need to monitor anything in this case. */
1314 /** @todo r=bird: WTF is hit?!? */
1315 /*Assert(VM_IS_RAW_MODE_ENABLED(pVM)); - ??? */
1316 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1317 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1318 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1319 }
1320 }
1321 else
1322 {
1323 GCPdPt = CPUMGetGuestCR3(pVCpu);
1324 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1325 }
1326
1327 /* Create a reference back to the PDPT by using the index in its shadow page. */
1328 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1329 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1330 &pShwPage);
1331 AssertRCReturn(rc, rc);
1332
1333 /* Hook it up. */
1334 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1335 }
1336 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1337
1338 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1339 return VINF_SUCCESS;
1340}
1341
1342
1343/**
1344 * Gets the pointer to the shadow page directory entry for an address, PAE.
1345 *
1346 * @returns Pointer to the PDE.
1347 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1348 * @param GCPtr The address.
1349 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1350 */
1351DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1352{
1353 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1354 PGM_LOCK_ASSERT_OWNER(pVM);
1355
1356 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1357 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1358 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1359 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1360 if (!(uPdpe & X86_PDPE_P))
1361 {
1362 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1363 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1364 }
1365 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1366
1367 /* Fetch the pgm pool shadow descriptor. */
1368 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1369 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1370
1371 *ppShwPde = pShwPde;
1372 return VINF_SUCCESS;
1373}
1374
1375
1376/**
1377 * Syncs the SHADOW page directory pointer for the specified address.
1378 *
1379 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1380 *
1381 * The caller is responsible for making sure the guest has a valid PD before
1382 * calling this function.
1383 *
1384 * @returns VBox status code.
1385 * @param pVCpu The cross context virtual CPU structure.
1386 * @param GCPtr The address.
1387 * @param uGstPml4e Guest PML4 entry (valid).
1388 * @param uGstPdpe Guest PDPT entry (valid).
1389 * @param ppPD Receives address of page directory
1390 */
1391static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1392{
1393 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1394 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1395 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1396 int rc;
1397
1398 PGM_LOCK_ASSERT_OWNER(pVM);
1399
1400 /*
1401 * PML4.
1402 */
1403 PPGMPOOLPAGE pShwPage;
1404 {
1405 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1406 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1407 X86PGPAEUINT const uPml4e = pPml4e->u;
1408
1409 /* Allocate page directory pointer table if not present. */
1410 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1411 {
1412 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1413 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1414
1415 pgmPoolCacheUsed(pPool, pShwPage);
1416
1417 /* Update the entry if needed. */
1418 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1419 | (uPml4e & PGM_PML4_FLAGS);
1420 if (uPml4e == uPml4eNew)
1421 { /* likely */ }
1422 else
1423 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1424 }
1425 else
1426 {
1427 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1428
1429 RTGCPTR64 GCPml4;
1430 PGMPOOLKIND enmKind;
1431 if (fNestedPagingOrNoGstPaging)
1432 {
1433 /* AMD-V nested paging or real/protected mode without paging */
1434 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; /** @todo bogus calculation for PML5 */
1435 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1436 }
1437 else
1438 {
1439 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1440 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1441 }
1442
1443 /* Create a reference back to the PDPT by using the index in its shadow page. */
1444 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1445 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1446 &pShwPage);
1447 AssertRCReturn(rc, rc);
1448
1449 /* Hook it up. */
1450 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1451 | (uPml4e & PGM_PML4_FLAGS));
1452 }
1453 }
1454
1455 /*
1456 * PDPT.
1457 */
1458 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1459 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1460 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1461 X86PGPAEUINT const uPdpe = pPdpe->u;
1462
1463 /* Allocate page directory if not present. */
1464 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1465 {
1466 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1467 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1468
1469 pgmPoolCacheUsed(pPool, pShwPage);
1470
1471 /* Update the entry if needed. */
1472 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1473 | (uPdpe & PGM_PDPT_FLAGS);
1474 if (uPdpe == uPdpeNew)
1475 { /* likely */ }
1476 else
1477 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1478 }
1479 else
1480 {
1481 RTGCPTR64 GCPdPt;
1482 PGMPOOLKIND enmKind;
1483 if (fNestedPagingOrNoGstPaging)
1484 {
1485 /* AMD-V nested paging or real/protected mode without paging */
1486 GCPdPt = GCPtr & ~(RT_BIT_64(iPdPt << X86_PDPT_SHIFT) - 1);
1487 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1488 }
1489 else
1490 {
1491 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1492 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1493 }
1494
1495 /* Create a reference back to the PDPT by using the index in its shadow page. */
1496 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1497 pShwPage->idx, iPdPt, false /*fLockPage*/,
1498 &pShwPage);
1499 AssertRCReturn(rc, rc);
1500
1501 /* Hook it up. */
1502 ASMAtomicWriteU64(&pPdpe->u,
1503 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1504 }
1505
1506 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1507 return VINF_SUCCESS;
1508}
1509
1510
1511/**
1512 * Gets the SHADOW page directory pointer for the specified address (long mode).
1513 *
1514 * @returns VBox status code.
1515 * @param pVCpu The cross context virtual CPU structure.
1516 * @param GCPtr The address.
1517 * @param ppPml4e Receives the address of the page map level 4 entry.
1518 * @param ppPdpt Receives the address of the page directory pointer table.
1519 * @param ppPD Receives the address of the page directory.
1520 */
1521DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1522{
1523 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1524 PGM_LOCK_ASSERT_OWNER(pVM);
1525
1526 /*
1527 * PML4
1528 */
1529 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1530 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1531 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1532 if (ppPml4e)
1533 *ppPml4e = (PX86PML4E)pPml4e;
1534 X86PGPAEUINT const uPml4e = pPml4e->u;
1535 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1536 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1537 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1538
1539 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1540 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1541 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1542
1543 /*
1544 * PDPT
1545 */
1546 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1547 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1548 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1549 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1550 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1551
1552 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1553 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1554
1555 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1556 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1557 return VINF_SUCCESS;
1558}
1559
1560
1561/**
1562 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1563 * backing pages in case the PDPT or PML4 entry is missing.
1564 *
1565 * @returns VBox status code.
1566 * @param pVCpu The cross context virtual CPU structure.
1567 * @param GCPtr The address.
1568 * @param ppPdpt Receives address of pdpt
1569 * @param ppPD Receives address of page directory
1570 */
1571static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1572{
1573 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1574 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1575 int rc;
1576
1577 Assert(pVM->pgm.s.fNestedPaging);
1578 PGM_LOCK_ASSERT_OWNER(pVM);
1579
1580 /*
1581 * PML4 level.
1582 */
1583 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1584 Assert(pPml4);
1585
1586 /* Allocate page directory pointer table if not present. */
1587 PPGMPOOLPAGE pShwPage;
1588 {
1589 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1590 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1591 EPTPML4E Pml4e;
1592 Pml4e.u = pPml4e->u;
1593 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1594 {
1595 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1596 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1597 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1598 &pShwPage);
1599 AssertRCReturn(rc, rc);
1600
1601 /* Hook up the new PDPT now. */
1602 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1603 }
1604 else
1605 {
1606 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1607 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1608
1609 pgmPoolCacheUsed(pPool, pShwPage);
1610
1611 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1612 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1613 { }
1614 else
1615 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1616 }
1617 }
1618
1619 /*
1620 * PDPT level.
1621 */
1622 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1623 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1624 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1625
1626 if (ppPdpt)
1627 *ppPdpt = pPdpt;
1628
1629 /* Allocate page directory if not present. */
1630 EPTPDPTE Pdpe;
1631 Pdpe.u = pPdpe->u;
1632 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1633 {
1634 RTGCPTR64 const GCPdPt = GCPtr & ~(RT_BIT_64(EPT_PDPT_SHIFT) - 1);
1635 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1636 pShwPage->idx, iPdPt, false /*fLockPage*/,
1637 &pShwPage);
1638 AssertRCReturn(rc, rc);
1639
1640 /* Hook up the new PD now. */
1641 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1642 }
1643 else
1644 {
1645 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1646 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1647
1648 pgmPoolCacheUsed(pPool, pShwPage);
1649
1650 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1651 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1652 { }
1653 else
1654 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1655 }
1656
1657 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1658 return VINF_SUCCESS;
1659}
1660
1661
1662#ifdef IN_RING0
1663/**
1664 * Synchronizes a range of nested page table entries.
1665 *
1666 * The caller must own the PGM lock.
1667 *
1668 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1669 * @param GCPhys Where to start.
1670 * @param cPages How many pages which entries should be synced.
1671 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1672 * host paging mode for AMD-V).
1673 */
1674int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1675{
1676 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1677
1678/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1679 int rc;
1680 switch (enmShwPagingMode)
1681 {
1682 case PGMMODE_32_BIT:
1683 {
1684 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1685 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1686 break;
1687 }
1688
1689 case PGMMODE_PAE:
1690 case PGMMODE_PAE_NX:
1691 {
1692 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1693 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1694 break;
1695 }
1696
1697 case PGMMODE_AMD64:
1698 case PGMMODE_AMD64_NX:
1699 {
1700 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1701 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1702 break;
1703 }
1704
1705 case PGMMODE_EPT:
1706 {
1707 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1708 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1709 break;
1710 }
1711
1712 default:
1713 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1714 }
1715 return rc;
1716}
1717#endif /* IN_RING0 */
1718
1719
1720/**
1721 * Gets effective Guest OS page information.
1722 *
1723 * When GCPtr is in a big page, the function will return as if it was a normal
1724 * 4KB page. If the need for distinguishing between big and normal page becomes
1725 * necessary at a later point, a PGMGstGetPage() will be created for that
1726 * purpose.
1727 *
1728 * @returns VBox status code.
1729 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1730 * @param GCPtr Guest Context virtual address of the page.
1731 * @param pWalk Where to store the page walk information.
1732 */
1733VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
1734{
1735 VMCPU_ASSERT_EMT(pVCpu);
1736 Assert(pWalk);
1737 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1738 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1739 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1740 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
1741}
1742
1743
1744/**
1745 * Maps the guest CR3.
1746 *
1747 * @returns VBox status code.
1748 * @param pVCpu The cross context virtual CPU structure.
1749 * @param GCPhysCr3 The guest CR3 value.
1750 * @param pHCPtrGuestCr3 Where to store the mapped memory.
1751 */
1752DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3)
1753{
1754 /** @todo this needs some reworking wrt. locking? */
1755 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1756 PGM_LOCK_VOID(pVM);
1757 PPGMPAGE pPageCr3 = pgmPhysGetPage(pVM, GCPhysCr3);
1758 AssertReturnStmt(pPageCr3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
1759
1760 RTHCPTR HCPtrGuestCr3;
1761 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCr3, GCPhysCr3, (void **)&HCPtrGuestCr3);
1762 PGM_UNLOCK(pVM);
1763
1764 *pHCPtrGuestCr3 = HCPtrGuestCr3;
1765 return rc;
1766}
1767
1768
1769/**
1770 * Unmaps the guest CR3.
1771 *
1772 * @returns VBox status code.
1773 * @param pVCpu The cross context virtual CPU structure.
1774 */
1775DECLINLINE(int) pgmGstUnmapCr3(PVMCPUCC pVCpu)
1776{
1777 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1778 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1779 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
1780 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
1781}
1782
1783
1784/**
1785 * Performs a guest page table walk.
1786 *
1787 * The guest should be in paged protect mode or long mode when making a call to
1788 * this function.
1789 *
1790 * @returns VBox status code.
1791 * @retval VINF_SUCCESS on success.
1792 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1793 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1794 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1795 *
1796 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1797 * @param GCPtr The guest virtual address to walk by.
1798 * @param pWalk Where to return the walk result. This is valid for some
1799 * error codes as well.
1800 * @param pGstWalk The guest mode specific page walk information.
1801 */
1802int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1803{
1804 VMCPU_ASSERT_EMT(pVCpu);
1805 switch (pVCpu->pgm.s.enmGuestMode)
1806 {
1807 case PGMMODE_32_BIT:
1808 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1809 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy);
1810
1811 case PGMMODE_PAE:
1812 case PGMMODE_PAE_NX:
1813 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1814 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae);
1815
1816 case PGMMODE_AMD64:
1817 case PGMMODE_AMD64_NX:
1818 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1819 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64);
1820
1821 case PGMMODE_REAL:
1822 case PGMMODE_PROTECTED:
1823 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1824 return VERR_PGM_NOT_USED_IN_MODE;
1825
1826 case PGMMODE_EPT:
1827 case PGMMODE_NESTED_32BIT:
1828 case PGMMODE_NESTED_PAE:
1829 case PGMMODE_NESTED_AMD64:
1830 default:
1831 AssertFailed();
1832 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1833 return VERR_PGM_NOT_USED_IN_MODE;
1834 }
1835}
1836
1837
1838#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1839/**
1840 * Performs a guest second-level address translation (SLAT).
1841 *
1842 * The guest paging mode must be 32-bit, PAE or AMD64 when making a call to this
1843 * function.
1844 *
1845 * @returns VBox status code.
1846 * @retval VINF_SUCCESS on success.
1847 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1848 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1849 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1850 *
1851 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1852 * @param GCPhysNested The nested-guest physical address being translated
1853 * (input).
1854 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is
1855 * valid. This indicates the SLAT is caused when
1856 * translating a nested-guest linear address.
1857 * @param GCPtrNested The nested-guest virtual address that initiated the
1858 * SLAT. If none, pass NIL_RTGCPTR.
1859 * @param pWalk Where to return the walk result. This is valid for
1860 * some error codes as well.
1861 * @param pGstWalk The second-level paging-mode specific walk
1862 * information.
1863 */
1864static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
1865 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1866{
1867 /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
1868 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
1869 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
1870 switch (pVCpu->pgm.s.enmGuestSlatMode)
1871 {
1872 case PGMSLAT_EPT:
1873 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1874 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
1875
1876 default:
1877 AssertFailed();
1878 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1879 return VERR_PGM_NOT_USED_IN_MODE;
1880 }
1881}
1882
1883
1884/**
1885 * Performs a guest second-level address translation (SLAT) for a nested-guest
1886 * physical address.
1887 *
1888 * This version requires the SLAT mode to be provided by the caller because we could
1889 * be in the process of switching paging modes (MOV CRX) and cannot presume control
1890 * register values.
1891 *
1892 * @returns VBox status code.
1893 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1894 * @param enmSlatMode The second-level paging mode to use.
1895 * @param GCPhysNested The nested-guest physical address to translate.
1896 * @param pWalk Where to store the walk result.
1897 * @param pGstWalk Where to store the second-level paging-mode specific
1898 * walk information.
1899 */
1900static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
1901 PPGMPTWALKGST pGstWalk)
1902{
1903 AssertPtr(pWalk);
1904 AssertPtr(pGstWalk);
1905 switch (enmSlatMode)
1906 {
1907 case PGMSLAT_EPT:
1908 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1909 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, 0 /* GCPtrNested */,
1910 pWalk, &pGstWalk->u.Ept);
1911
1912 default:
1913 AssertFailed();
1914 return VERR_PGM_NOT_USED_IN_MODE;
1915 }
1916}
1917#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1918
1919
1920/**
1921 * Tries to continue the previous walk.
1922 *
1923 * @note Requires the caller to hold the PGM lock from the first
1924 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
1925 * we cannot use the pointers.
1926 *
1927 * @returns VBox status code.
1928 * @retval VINF_SUCCESS on success.
1929 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1930 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1931 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1932 *
1933 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1934 * @param GCPtr The guest virtual address to walk by.
1935 * @param pWalk Pointer to the previous walk result and where to return
1936 * the result of this walk. This is valid for some error
1937 * codes as well.
1938 * @param pGstWalk The guest-mode specific walk information.
1939 */
1940int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1941{
1942 /*
1943 * We can only handle successfully walks.
1944 * We also limit ourselves to the next page.
1945 */
1946 if ( pWalk->fSucceeded
1947 && GCPtr - pWalk->GCPtr == GUEST_PAGE_SIZE)
1948 {
1949 Assert(pWalk->uLevel == 0);
1950 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
1951 {
1952 /*
1953 * AMD64
1954 */
1955 if (!pWalk->fGigantPage && !pWalk->fBigPage)
1956 {
1957 /*
1958 * We fall back to full walk if the PDE table changes, if any
1959 * reserved bits are set, or if the effective page access changes.
1960 */
1961 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
1962 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
1963 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
1964 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
1965
1966 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT))
1967 {
1968 if (pGstWalk->u.Amd64.pPte)
1969 {
1970 X86PTEPAE Pte;
1971 Pte.u = pGstWalk->u.Amd64.pPte[1].u;
1972 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
1973 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1974 {
1975 pWalk->GCPtr = GCPtr;
1976 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1977 pGstWalk->u.Amd64.Pte.u = Pte.u;
1978 pGstWalk->u.Amd64.pPte++;
1979 return VINF_SUCCESS;
1980 }
1981 }
1982 }
1983 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT))
1984 {
1985 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
1986 if (pGstWalk->u.Amd64.pPde)
1987 {
1988 X86PDEPAE Pde;
1989 Pde.u = pGstWalk->u.Amd64.pPde[1].u;
1990 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame)
1991 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
1992 {
1993 /* Get the new PTE and check out the first entry. */
1994 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
1995 &pGstWalk->u.Amd64.pPt);
1996 if (RT_SUCCESS(rc))
1997 {
1998 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0];
1999 X86PTEPAE Pte;
2000 Pte.u = pGstWalk->u.Amd64.pPte->u;
2001 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2002 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2003 {
2004 pWalk->GCPtr = GCPtr;
2005 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2006 pGstWalk->u.Amd64.Pte.u = Pte.u;
2007 pGstWalk->u.Amd64.Pde.u = Pde.u;
2008 pGstWalk->u.Amd64.pPde++;
2009 return VINF_SUCCESS;
2010 }
2011 }
2012 }
2013 }
2014 }
2015 }
2016 else if (!pWalk->fGigantPage)
2017 {
2018 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK))
2019 {
2020 pWalk->GCPtr = GCPtr;
2021 pWalk->GCPhys += GUEST_PAGE_SIZE;
2022 return VINF_SUCCESS;
2023 }
2024 }
2025 else
2026 {
2027 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK))
2028 {
2029 pWalk->GCPtr = GCPtr;
2030 pWalk->GCPhys += GUEST_PAGE_SIZE;
2031 return VINF_SUCCESS;
2032 }
2033 }
2034 }
2035 }
2036 /* Case we don't handle. Do full walk. */
2037 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk);
2038}
2039
2040
2041/**
2042 * Modify page flags for a range of pages in the guest's tables
2043 *
2044 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2045 *
2046 * @returns VBox status code.
2047 * @param pVCpu The cross context virtual CPU structure.
2048 * @param GCPtr Virtual address of the first page in the range.
2049 * @param cb Size (in bytes) of the range to apply the modification to.
2050 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2051 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2052 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2053 */
2054VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2055{
2056 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2057 VMCPU_ASSERT_EMT(pVCpu);
2058
2059 /*
2060 * Validate input.
2061 */
2062 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2063 Assert(cb);
2064
2065 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2066
2067 /*
2068 * Adjust input.
2069 */
2070 cb += GCPtr & GUEST_PAGE_OFFSET_MASK;
2071 cb = RT_ALIGN_Z(cb, GUEST_PAGE_SIZE);
2072 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
2073
2074 /*
2075 * Call worker.
2076 */
2077 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2078 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2079 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2080 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2081
2082 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2083 return rc;
2084}
2085
2086
2087/**
2088 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2089 *
2090 * @returns @c true if the PDPE is valid, @c false otherwise.
2091 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2092 * @param paPaePdpes The PAE PDPEs to validate.
2093 *
2094 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2095 */
2096VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2097{
2098 Assert(paPaePdpes);
2099 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2100 {
2101 X86PDPE const PaePdpe = paPaePdpes[i];
2102 if ( !(PaePdpe.u & X86_PDPE_P)
2103 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2104 { /* likely */ }
2105 else
2106 return false;
2107 }
2108 return true;
2109}
2110
2111
2112/**
2113 * Performs the lazy mapping of the 32-bit guest PD.
2114 *
2115 * @returns VBox status code.
2116 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2117 * @param ppPd Where to return the pointer to the mapping. This is
2118 * always set.
2119 */
2120int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2121{
2122 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2123 PGM_LOCK_VOID(pVM);
2124
2125 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2126
2127 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
2128 PPGMPAGE pPage;
2129 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2130 if (RT_SUCCESS(rc))
2131 {
2132 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2133 if (RT_SUCCESS(rc))
2134 {
2135# ifdef IN_RING3
2136 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2137 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2138# else
2139 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2140 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2141# endif
2142 PGM_UNLOCK(pVM);
2143 return VINF_SUCCESS;
2144 }
2145 AssertRC(rc);
2146 }
2147 PGM_UNLOCK(pVM);
2148
2149 *ppPd = NULL;
2150 return rc;
2151}
2152
2153
2154/**
2155 * Performs the lazy mapping of the PAE guest PDPT.
2156 *
2157 * @returns VBox status code.
2158 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2159 * @param ppPdpt Where to return the pointer to the mapping. This is
2160 * always set.
2161 */
2162int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2163{
2164 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2165 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2166 PGM_LOCK_VOID(pVM);
2167
2168 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
2169 PPGMPAGE pPage;
2170 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to
2171 * guest-physical address here. */
2172 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2173 if (RT_SUCCESS(rc))
2174 {
2175 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2176 if (RT_SUCCESS(rc))
2177 {
2178# ifdef IN_RING3
2179 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2180 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2181# else
2182 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2183 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2184# endif
2185 PGM_UNLOCK(pVM);
2186 return VINF_SUCCESS;
2187 }
2188 AssertRC(rc);
2189 }
2190
2191 PGM_UNLOCK(pVM);
2192 *ppPdpt = NULL;
2193 return rc;
2194}
2195
2196
2197/**
2198 * Performs the lazy mapping / updating of a PAE guest PD.
2199 *
2200 * @returns Pointer to the mapping.
2201 * @returns VBox status code.
2202 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2203 * @param iPdpt Which PD entry to map (0..3).
2204 * @param ppPd Where to return the pointer to the mapping. This is
2205 * always set.
2206 */
2207int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2208{
2209 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2210 PGM_LOCK_VOID(pVM);
2211
2212 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2213 Assert(pGuestPDPT);
2214 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2215 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2216 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2217
2218 PPGMPAGE pPage;
2219 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2220 if (RT_SUCCESS(rc))
2221 {
2222 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2223 AssertRC(rc);
2224 if (RT_SUCCESS(rc))
2225 {
2226# ifdef IN_RING3
2227 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2228 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2229# else
2230 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2231 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2232# endif
2233 if (fChanged)
2234 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2235 PGM_UNLOCK(pVM);
2236 return VINF_SUCCESS;
2237 }
2238 }
2239
2240 /* Invalid page or some failure, invalidate the entry. */
2241 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2242 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2243 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2244
2245 PGM_UNLOCK(pVM);
2246 return rc;
2247}
2248
2249
2250/**
2251 * Performs the lazy mapping of the 32-bit guest PD.
2252 *
2253 * @returns VBox status code.
2254 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2255 * @param ppPml4 Where to return the pointer to the mapping. This will
2256 * always be set.
2257 */
2258int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2259{
2260 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2261 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2262 PGM_LOCK_VOID(pVM);
2263
2264 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
2265 PPGMPAGE pPage;
2266 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2267 if (RT_SUCCESS(rc))
2268 {
2269 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2270 if (RT_SUCCESS(rc))
2271 {
2272# ifdef IN_RING3
2273 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2274 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2275# else
2276 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2277 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2278# endif
2279 PGM_UNLOCK(pVM);
2280 return VINF_SUCCESS;
2281 }
2282 }
2283
2284 PGM_UNLOCK(pVM);
2285 *ppPml4 = NULL;
2286 return rc;
2287}
2288
2289
2290#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2291 /**
2292 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
2293 *
2294 * @returns VBox status code.
2295 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2296 * @param ppEptPml4 Where to return the pointer to the mapping. This will
2297 * always be set.
2298 */
2299int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
2300{
2301 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
2302 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2303 PGM_LOCK_VOID(pVM);
2304
2305 RTGCPHYS const GCPhysEpt = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
2306 PPGMPAGE pPage;
2307 int rc = pgmPhysGetPageEx(pVM, GCPhysEpt, &pPage);
2308 if (RT_SUCCESS(rc))
2309 {
2310 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysEpt, (void **)ppEptPml4);
2311 if (RT_SUCCESS(rc))
2312 {
2313# ifdef IN_RING3
2314 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
2315 pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
2316# else
2317 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
2318 pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
2319# endif
2320 PGM_UNLOCK(pVM);
2321 return VINF_SUCCESS;
2322 }
2323 }
2324
2325 PGM_UNLOCK(pVM);
2326 *ppEptPml4 = NULL;
2327 return rc;
2328}
2329#endif
2330
2331
2332/**
2333 * Gets the current CR3 register value for the shadow memory context.
2334 * @returns CR3 value.
2335 * @param pVCpu The cross context virtual CPU structure.
2336 */
2337VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2338{
2339 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2340 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2341 return pPoolPage->Core.Key;
2342}
2343
2344
2345/**
2346 * Forces lazy remapping of the guest's PAE page-directory structures.
2347 *
2348 * @param pVCpu The cross context virtual CPU structure.
2349 */
2350static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2351{
2352 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2353 {
2354 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2355 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2356 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2357 }
2358}
2359
2360
2361/**
2362 * Gets the CR3 mask corresponding to the given paging mode.
2363 *
2364 * @returns The CR3 mask.
2365 * @param enmMode The paging mode.
2366 */
2367DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode)
2368{
2369 /** @todo This work can be optimized either by storing the masks in
2370 * pVCpu->pgm.s.afGstCr3Masks[] for all PGMMODEs -or- just do this once and
2371 * store the result when entering guest mode since we currently use it only
2372 * for enmGuestMode. */
2373 switch (enmMode)
2374 {
2375 case PGMMODE_PAE:
2376 case PGMMODE_PAE_NX:
2377 return X86_CR3_PAE_PAGE_MASK;
2378 case PGMMODE_AMD64:
2379 case PGMMODE_AMD64_NX:
2380 return X86_CR3_AMD64_PAGE_MASK;
2381 case PGMMODE_EPT:
2382 return X86_CR3_EPT_PAGE_MASK;
2383 default:
2384 return X86_CR3_PAGE_MASK;
2385 }
2386}
2387
2388
2389/**
2390 * Gets the masked CR3 value according to the current guest paging mode.
2391 *
2392 * @returns The masked PGM CR3 value.
2393 * @param pVCpu The cross context virtual CPU structure.
2394 * @param uCr3 The raw guest CR3 value.
2395 */
2396DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
2397{
2398 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode);
2399 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);
2400 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2401 return GCPhysCR3;
2402}
2403
2404
2405#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2406/**
2407 * Performs second-level address translation for the given CR3 and updates the
2408 * nested-guest CR3 when successful.
2409 *
2410 * @returns VBox status code.
2411 * @param pVCpu The cross context virtual CPU structure.
2412 * @param uCr3 The masked nested-guest CR3 value.
2413 * @param pGCPhysCR3 Where to store the translated CR3.
2414 *
2415 * @warning This updates PGMCPU::GCPhysNstGstCR3 when the translation succeeds. Be
2416 * mindful of this in code that's hyper sensitive to the order of
2417 * operations.
2418 */
2419static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3)
2420{
2421 if (uCr3 != pVCpu->pgm.s.GCPhysNstGstCR3)
2422 {
2423 PGMPTWALK Walk;
2424 PGMPTWALKGST GstWalk;
2425 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, uCr3, &Walk, &GstWalk);
2426 if (RT_SUCCESS(rc))
2427 {
2428 /* Update nested-guest CR3. */
2429 pVCpu->pgm.s.GCPhysNstGstCR3 = uCr3;
2430
2431 /* Pass back the translated result. */
2432 *pGCPhysCr3 = Walk.GCPhys;
2433 return VINF_SUCCESS;
2434 }
2435
2436 /* Translation failed. */
2437 *pGCPhysCr3 = NIL_RTGCPHYS;
2438 return rc;
2439 }
2440
2441 /*
2442 * If the nested-guest CR3 has not changed, then the previously
2443 * translated CR3 result (i.e. GCPhysCR3) is passed back.
2444 */
2445 *pGCPhysCr3 = pVCpu->pgm.s.GCPhysCR3;
2446 return VINF_SUCCESS;
2447}
2448#endif
2449
2450
2451/**
2452 * Performs and schedules necessary updates following a CR3 load or reload.
2453 *
2454 * This will normally involve mapping the guest PD or nPDPT
2455 *
2456 * @returns VBox status code.
2457 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2458 * safely be ignored and overridden since the FF will be set too then.
2459 * @param pVCpu The cross context virtual CPU structure.
2460 * @param cr3 The new cr3.
2461 * @param fGlobal Indicates whether this is a global flush or not.
2462 */
2463VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal)
2464{
2465 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2466 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2467
2468 VMCPU_ASSERT_EMT(pVCpu);
2469
2470 /*
2471 * Always flag the necessary updates; necessary for hardware acceleration
2472 */
2473 /** @todo optimize this, it shouldn't always be necessary. */
2474 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2475 if (fGlobal)
2476 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2477
2478 /*
2479 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2480 */
2481 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2482 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2483#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2484 if ( pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT
2485 && PGMMODE_WITH_PAGING(pVCpu->pgm.s.enmGuestMode))
2486 {
2487 LogFlowFunc(("nested_cr3=%RX64 old=%RX64\n", GCPhysCR3, pVCpu->pgm.s.GCPhysNstGstCR3));
2488 RTGCPHYS GCPhysOut;
2489 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2490 if (RT_SUCCESS(rc))
2491 GCPhysCR3 = GCPhysOut;
2492 else
2493 {
2494 /* CR3 SLAT translation failed but we try to pretend it
2495 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2496 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2497 int const rc2 = pgmGstUnmapCr3(pVCpu);
2498 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2499 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2500 return rc2;
2501 }
2502 }
2503#endif
2504
2505 LogFlowFunc(("cr3=%RX64 old=%RX64 fGlobal=%d\n", cr3, GCPhysOldCR3, fGlobal));
2506 int rc = VINF_SUCCESS;
2507 if (GCPhysOldCR3 != GCPhysCR3)
2508 {
2509 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2510 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2511 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2512
2513 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2514 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2515 if (RT_LIKELY(rc == VINF_SUCCESS))
2516 { }
2517 else
2518 {
2519 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2520 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2521 pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped) = false;
2522 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
2523 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2524 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2525 }
2526
2527 if (fGlobal)
2528 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2529 else
2530 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2531 }
2532 else
2533 {
2534#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2535 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2536 if (pPool->cDirtyPages)
2537 {
2538 PGM_LOCK_VOID(pVM);
2539 pgmPoolResetDirtyPages(pVM);
2540 PGM_UNLOCK(pVM);
2541 }
2542#endif
2543 if (fGlobal)
2544 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2545 else
2546 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2547
2548 /*
2549 * Flush PAE PDPTEs.
2550 */
2551 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2552 pgmGstFlushPaePdpes(pVCpu);
2553 }
2554
2555 IEMTlbInvalidateAll(pVCpu);
2556 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2557 return rc;
2558}
2559
2560
2561/**
2562 * Performs and schedules necessary updates following a CR3 load or reload when
2563 * using nested or extended paging.
2564 *
2565 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2566 * TLB and triggering a SyncCR3.
2567 *
2568 * This will normally involve mapping the guest PD or nPDPT
2569 *
2570 * @returns VBox status code.
2571 * @retval VINF_SUCCESS.
2572 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2573 * paging modes). This can safely be ignored and overridden since the
2574 * FF will be set too then.
2575 * @param pVCpu The cross context virtual CPU structure.
2576 * @param cr3 The new CR3.
2577 */
2578VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3)
2579{
2580 VMCPU_ASSERT_EMT(pVCpu);
2581
2582 /* We assume we're only called in nested paging mode. */
2583 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2584
2585 /*
2586 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2587 */
2588 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2589 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2590#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2591 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2592 {
2593 LogFlowFunc(("nested_cr3=%RX64 old_nested_cr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysNstGstCR3));
2594 RTGCPHYS GCPhysOut;
2595 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2596 if (RT_SUCCESS(rc))
2597 GCPhysCR3 = GCPhysOut;
2598 else
2599 {
2600 /* CR3 SLAT translation failed but we try to pretend it
2601 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2602 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2603 int const rc2 = pgmGstUnmapCr3(pVCpu);
2604 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2605 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2606 return rc2;
2607 }
2608 }
2609#endif
2610
2611 LogFlowFunc(("cr3=%RX64 old=%RX64\n", cr3, GCPhysOldCR3));
2612 int rc = VINF_SUCCESS;
2613 if (GCPhysOldCR3 != GCPhysCR3)
2614 {
2615 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2616 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2617 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2618
2619 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2620 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2621
2622 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2623 }
2624 /*
2625 * Flush PAE PDPTEs.
2626 */
2627 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2628 pgmGstFlushPaePdpes(pVCpu);
2629
2630 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2631 return rc;
2632}
2633
2634
2635/**
2636 * Synchronize the paging structures.
2637 *
2638 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2639 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2640 * in several places, most importantly whenever the CR3 is loaded.
2641 *
2642 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2643 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2644 * the VMM into guest context.
2645 * @param pVCpu The cross context virtual CPU structure.
2646 * @param cr0 Guest context CR0 register
2647 * @param cr3 Guest context CR3 register
2648 * @param cr4 Guest context CR4 register
2649 * @param fGlobal Including global page directories or not
2650 */
2651VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2652{
2653 int rc;
2654
2655 VMCPU_ASSERT_EMT(pVCpu);
2656
2657 /*
2658 * The pool may have pending stuff and even require a return to ring-3 to
2659 * clear the whole thing.
2660 */
2661 rc = pgmPoolSyncCR3(pVCpu);
2662 if (rc != VINF_SUCCESS)
2663 return rc;
2664
2665 /*
2666 * We might be called when we shouldn't.
2667 *
2668 * The mode switching will ensure that the PD is resynced after every mode
2669 * switch. So, if we find ourselves here when in protected or real mode
2670 * we can safely clear the FF and return immediately.
2671 */
2672 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2673 {
2674 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2675 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2676 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2677 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2678 return VINF_SUCCESS;
2679 }
2680
2681 /* If global pages are not supported, then all flushes are global. */
2682 if (!(cr4 & X86_CR4_PGE))
2683 fGlobal = true;
2684 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2685 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2686
2687 /*
2688 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2689 * This should be done before SyncCR3.
2690 */
2691 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2692 {
2693 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2694
2695 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2696 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2697#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2698 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2699 {
2700 RTGCPHYS GCPhysOut;
2701 int rc2 = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2702 if (RT_SUCCESS(rc2))
2703 GCPhysCR3 = GCPhysOut;
2704 else
2705 {
2706 /* CR3 SLAT translation failed but we try to pretend it
2707 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2708 AssertMsgFailed(("Failed to translate CR3 %#RX64. rc=%Rrc\n", cr3, rc2));
2709 rc2 = pgmGstUnmapCr3(pVCpu);
2710 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2711 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2712 return rc2;
2713 }
2714 }
2715#endif
2716 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
2717 if (GCPhysOldCR3 != GCPhysCR3)
2718 {
2719 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2720 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2721 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2722 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2723 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2724 }
2725
2726 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2727 if ( rc == VINF_PGM_SYNC_CR3
2728 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2729 {
2730 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2731#ifdef IN_RING3
2732 rc = pgmPoolSyncCR3(pVCpu);
2733#else
2734 if (rc == VINF_PGM_SYNC_CR3)
2735 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2736 return VINF_PGM_SYNC_CR3;
2737#endif
2738 }
2739 AssertRCReturn(rc, rc);
2740 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2741 }
2742
2743 /*
2744 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2745 */
2746 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2747
2748 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2749 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2750 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2751 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2752
2753 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2754 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2755 if (rc == VINF_SUCCESS)
2756 {
2757 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2758 {
2759 /* Go back to ring 3 if a pgm pool sync is again pending. */
2760 return VINF_PGM_SYNC_CR3;
2761 }
2762
2763 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2764 {
2765 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2766 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2767 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2768 }
2769 }
2770
2771 /*
2772 * Now flush the CR3 (guest context).
2773 */
2774 if (rc == VINF_SUCCESS)
2775 PGM_INVL_VCPU_TLBS(pVCpu);
2776 return rc;
2777}
2778
2779
2780/**
2781 * Maps all the PAE PDPE entries.
2782 *
2783 * @returns VBox status code.
2784 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2785 * @param paPaePdpes The new PAE PDPE values.
2786 *
2787 * @remarks This function may be invoked during the process of changing the guest
2788 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
2789 * reflect PAE paging just yet.
2790 */
2791VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2792{
2793 Assert(paPaePdpes);
2794 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2795 {
2796 X86PDPE const PaePdpe = paPaePdpes[i];
2797
2798 /*
2799 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
2800 * are deferred.[1] Also, different situations require different handling of invalid
2801 * PDPE entries. Here we assume the caller has already validated or doesn't require
2802 * validation of the PDPEs.
2803 *
2804 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
2805 */
2806 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
2807 {
2808 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2809 RTHCPTR HCPtr;
2810 RTGCPHYS const GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
2811
2812 PGM_LOCK_VOID(pVM);
2813 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2814 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
2815 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
2816 PGM_UNLOCK(pVM);
2817 if (RT_SUCCESS(rc))
2818 {
2819#ifdef IN_RING3
2820 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
2821 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
2822#else
2823 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
2824 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
2825#endif
2826 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
2827 continue;
2828 }
2829 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
2830 }
2831 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2832 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2833 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2834 }
2835
2836 return VINF_SUCCESS;
2837}
2838
2839
2840/**
2841 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
2842 *
2843 * @returns VBox status code.
2844 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2845 * @param cr3 The guest CR3 value.
2846 *
2847 * @remarks This function may be invoked during the process of changing the guest
2848 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
2849 * PAE paging just yet.
2850 */
2851VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
2852{
2853 /*
2854 * Read the page-directory-pointer table (PDPT) at CR3.
2855 */
2856 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
2857 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2858
2859#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2860 if (CPUMIsGuestVmxEptPaePagingEnabled(pVCpu))
2861 {
2862 RTGCPHYS GCPhysOut;
2863 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2864 if (RT_SUCCESS(rc))
2865 GCPhysCR3 = GCPhysOut;
2866 else
2867 {
2868 AssertMsgFailed(("Failed to load CR3 at %#RX64. rc=%Rrc\n", GCPhysCR3, rc));
2869 return rc;
2870 }
2871 }
2872#endif
2873
2874 RTHCPTR HCPtrGuestCr3;
2875 int rc = pgmGstMapCr3(pVCpu, GCPhysCR3, &HCPtrGuestCr3);
2876 if (RT_SUCCESS(rc))
2877 {
2878 /*
2879 * Validate the page-directory-pointer table entries (PDPE).
2880 */
2881 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
2882 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
2883 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
2884 {
2885 /*
2886 * Map the PDPT.
2887 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
2888 * that PGMFlushTLB will be called soon and only a change to CR3 then
2889 * will cause the shadow page tables to be updated.
2890 */
2891#ifdef IN_RING3
2892 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
2893 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2894#else
2895 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2896 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
2897#endif
2898
2899 /*
2900 * Update CPUM.
2901 * We do this prior to mapping the PDPEs to keep the order consistent
2902 * with what's used in HM. In practice, it doesn't really matter.
2903 */
2904 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
2905
2906 /*
2907 * Map the PDPEs.
2908 */
2909 rc = PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
2910 if (RT_SUCCESS(rc))
2911 {
2912#ifdef IN_RING3
2913 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = true;
2914 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = false;
2915#else
2916 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = false;
2917 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = true;
2918#endif
2919 pVCpu->pgm.s.GCPhysPaeCR3 = GCPhysCR3;
2920 }
2921 }
2922 else
2923 rc = VERR_PGM_PAE_PDPE_RSVD;
2924 }
2925 return rc;
2926}
2927
2928
2929/**
2930 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2931 *
2932 * @returns VBox status code, with the following informational code for
2933 * VM scheduling.
2934 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2935 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2936 *
2937 * @param pVCpu The cross context virtual CPU structure.
2938 * @param cr0 The new cr0.
2939 * @param cr4 The new cr4.
2940 * @param efer The new extended feature enable register.
2941 * @param fForce Whether to force a mode change.
2942 */
2943VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce)
2944{
2945 VMCPU_ASSERT_EMT(pVCpu);
2946
2947 /*
2948 * Calc the new guest mode.
2949 *
2950 * Note! We check PG before PE and without requiring PE because of the
2951 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2952 */
2953 PGMMODE enmGuestMode;
2954 if (cr0 & X86_CR0_PG)
2955 {
2956 if (!(cr4 & X86_CR4_PAE))
2957 {
2958 bool const fPse = !!(cr4 & X86_CR4_PSE);
2959 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2960 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2961 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2962 enmGuestMode = PGMMODE_32_BIT;
2963 }
2964 else if (!(efer & MSR_K6_EFER_LME))
2965 {
2966 if (!(efer & MSR_K6_EFER_NXE))
2967 enmGuestMode = PGMMODE_PAE;
2968 else
2969 enmGuestMode = PGMMODE_PAE_NX;
2970 }
2971 else
2972 {
2973 if (!(efer & MSR_K6_EFER_NXE))
2974 enmGuestMode = PGMMODE_AMD64;
2975 else
2976 enmGuestMode = PGMMODE_AMD64_NX;
2977 }
2978 }
2979 else if (!(cr0 & X86_CR0_PE))
2980 enmGuestMode = PGMMODE_REAL;
2981 else
2982 enmGuestMode = PGMMODE_PROTECTED;
2983
2984 /*
2985 * Did it change?
2986 */
2987 if ( !fForce
2988 && pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2989 return VINF_SUCCESS;
2990
2991 /* Flush the TLB */
2992 PGM_INVL_VCPU_TLBS(pVCpu);
2993 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode, fForce);
2994}
2995
2996
2997/**
2998 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
2999 *
3000 * @returns PGM_TYPE_*.
3001 * @param pgmMode The mode value to convert.
3002 */
3003DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
3004{
3005 switch (pgmMode)
3006 {
3007 case PGMMODE_REAL: return PGM_TYPE_REAL;
3008 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
3009 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
3010 case PGMMODE_PAE:
3011 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
3012 case PGMMODE_AMD64:
3013 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
3014 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
3015 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
3016 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
3017 case PGMMODE_EPT: return PGM_TYPE_EPT;
3018 case PGMMODE_NONE: return PGM_TYPE_NONE;
3019 default:
3020 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
3021 }
3022}
3023
3024
3025/**
3026 * Calculates the shadow paging mode.
3027 *
3028 * @returns The shadow paging mode.
3029 * @param pVM The cross context VM structure.
3030 * @param enmGuestMode The guest mode.
3031 * @param enmHostMode The host mode.
3032 * @param enmShadowMode The current shadow mode.
3033 */
3034static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
3035{
3036 switch (enmGuestMode)
3037 {
3038 case PGMMODE_REAL:
3039 case PGMMODE_PROTECTED:
3040 switch (enmHostMode)
3041 {
3042 case SUPPAGINGMODE_32_BIT:
3043 case SUPPAGINGMODE_32_BIT_GLOBAL:
3044 enmShadowMode = PGMMODE_32_BIT;
3045 break;
3046
3047 case SUPPAGINGMODE_PAE:
3048 case SUPPAGINGMODE_PAE_NX:
3049 case SUPPAGINGMODE_PAE_GLOBAL:
3050 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3051 enmShadowMode = PGMMODE_PAE;
3052 break;
3053
3054 case SUPPAGINGMODE_AMD64:
3055 case SUPPAGINGMODE_AMD64_GLOBAL:
3056 case SUPPAGINGMODE_AMD64_NX:
3057 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3058 enmShadowMode = PGMMODE_PAE;
3059 break;
3060
3061 default:
3062 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3063 }
3064 break;
3065
3066 case PGMMODE_32_BIT:
3067 switch (enmHostMode)
3068 {
3069 case SUPPAGINGMODE_32_BIT:
3070 case SUPPAGINGMODE_32_BIT_GLOBAL:
3071 enmShadowMode = PGMMODE_32_BIT;
3072 break;
3073
3074 case SUPPAGINGMODE_PAE:
3075 case SUPPAGINGMODE_PAE_NX:
3076 case SUPPAGINGMODE_PAE_GLOBAL:
3077 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3078 enmShadowMode = PGMMODE_PAE;
3079 break;
3080
3081 case SUPPAGINGMODE_AMD64:
3082 case SUPPAGINGMODE_AMD64_GLOBAL:
3083 case SUPPAGINGMODE_AMD64_NX:
3084 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3085 enmShadowMode = PGMMODE_PAE;
3086 break;
3087
3088 default:
3089 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3090 }
3091 break;
3092
3093 case PGMMODE_PAE:
3094 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3095 switch (enmHostMode)
3096 {
3097 case SUPPAGINGMODE_32_BIT:
3098 case SUPPAGINGMODE_32_BIT_GLOBAL:
3099 enmShadowMode = PGMMODE_PAE;
3100 break;
3101
3102 case SUPPAGINGMODE_PAE:
3103 case SUPPAGINGMODE_PAE_NX:
3104 case SUPPAGINGMODE_PAE_GLOBAL:
3105 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3106 enmShadowMode = PGMMODE_PAE;
3107 break;
3108
3109 case SUPPAGINGMODE_AMD64:
3110 case SUPPAGINGMODE_AMD64_GLOBAL:
3111 case SUPPAGINGMODE_AMD64_NX:
3112 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3113 enmShadowMode = PGMMODE_PAE;
3114 break;
3115
3116 default:
3117 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3118 }
3119 break;
3120
3121 case PGMMODE_AMD64:
3122 case PGMMODE_AMD64_NX:
3123 switch (enmHostMode)
3124 {
3125 case SUPPAGINGMODE_32_BIT:
3126 case SUPPAGINGMODE_32_BIT_GLOBAL:
3127 enmShadowMode = PGMMODE_AMD64;
3128 break;
3129
3130 case SUPPAGINGMODE_PAE:
3131 case SUPPAGINGMODE_PAE_NX:
3132 case SUPPAGINGMODE_PAE_GLOBAL:
3133 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3134 enmShadowMode = PGMMODE_AMD64;
3135 break;
3136
3137 case SUPPAGINGMODE_AMD64:
3138 case SUPPAGINGMODE_AMD64_GLOBAL:
3139 case SUPPAGINGMODE_AMD64_NX:
3140 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3141 enmShadowMode = PGMMODE_AMD64;
3142 break;
3143
3144 default:
3145 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3146 }
3147 break;
3148
3149 default:
3150 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3151 }
3152
3153 /*
3154 * Override the shadow mode when NEM, IEM or nested paging is active.
3155 */
3156 if (!VM_IS_HM_ENABLED(pVM))
3157 {
3158 Assert(VM_IS_NEM_ENABLED(pVM) || VM_IS_EXEC_ENGINE_IEM(pVM));
3159 pVM->pgm.s.fNestedPaging = true;
3160 enmShadowMode = PGMMODE_NONE;
3161 }
3162 else
3163 {
3164 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3165 pVM->pgm.s.fNestedPaging = fNestedPaging;
3166 if (fNestedPaging)
3167 {
3168 if (HMIsVmxActive(pVM))
3169 enmShadowMode = PGMMODE_EPT;
3170 else
3171 {
3172 /* The nested SVM paging depends on the host one. */
3173 Assert(HMIsSvmActive(pVM));
3174 if ( enmGuestMode == PGMMODE_AMD64
3175 || enmGuestMode == PGMMODE_AMD64_NX)
3176 enmShadowMode = PGMMODE_NESTED_AMD64;
3177 else
3178 switch (pVM->pgm.s.enmHostMode)
3179 {
3180 case SUPPAGINGMODE_32_BIT:
3181 case SUPPAGINGMODE_32_BIT_GLOBAL:
3182 enmShadowMode = PGMMODE_NESTED_32BIT;
3183 break;
3184
3185 case SUPPAGINGMODE_PAE:
3186 case SUPPAGINGMODE_PAE_GLOBAL:
3187 case SUPPAGINGMODE_PAE_NX:
3188 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3189 enmShadowMode = PGMMODE_NESTED_PAE;
3190 break;
3191
3192 case SUPPAGINGMODE_AMD64:
3193 case SUPPAGINGMODE_AMD64_GLOBAL:
3194 case SUPPAGINGMODE_AMD64_NX:
3195 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3196 enmShadowMode = PGMMODE_NESTED_AMD64;
3197 break;
3198
3199 default:
3200 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3201 }
3202 }
3203 }
3204#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3205 else
3206 {
3207 /* Nested paging is a requirement for nested VT-x. */
3208 AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3209 }
3210#endif
3211 }
3212
3213 return enmShadowMode;
3214}
3215
3216
3217/**
3218 * Performs the actual mode change.
3219 * This is called by PGMChangeMode and pgmR3InitPaging().
3220 *
3221 * @returns VBox status code. May suspend or power off the VM on error, but this
3222 * will trigger using FFs and not informational status codes.
3223 *
3224 * @param pVM The cross context VM structure.
3225 * @param pVCpu The cross context virtual CPU structure.
3226 * @param enmGuestMode The new guest mode. This is assumed to be different from
3227 * the current mode.
3228 * @param fForce Whether to force a shadow paging mode change.
3229 */
3230VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce)
3231{
3232 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3233 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3234
3235 /*
3236 * Calc the shadow mode and switcher.
3237 */
3238 PGMMODE const enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3239 bool const fShadowModeChanged = enmShadowMode != pVCpu->pgm.s.enmShadowMode || fForce;
3240
3241 /*
3242 * Exit old mode(s).
3243 */
3244 /* shadow */
3245 if (fShadowModeChanged)
3246 {
3247 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3248 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3249 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3250 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3251 {
3252 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3253 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3254 }
3255 }
3256 else
3257 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3258
3259 /* guest */
3260 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3261 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3262 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3263 {
3264 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3265 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3266 }
3267 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3268 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
3269 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
3270 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
3271
3272 /*
3273 * Change the paging mode data indexes.
3274 */
3275 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3276 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3277 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3278 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3279 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3280 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3281 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3282#ifdef IN_RING3
3283 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3284#endif
3285
3286 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3287 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3288 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3289 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3290 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3291 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3292 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3293#ifdef IN_RING3
3294 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3295#endif
3296
3297 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3298 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3299 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3300 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3301 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3302 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3303 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3304 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3305 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3306 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3307#ifdef VBOX_STRICT
3308 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3309#endif
3310
3311 /*
3312 * Enter new shadow mode (if changed).
3313 */
3314 if (fShadowModeChanged)
3315 {
3316 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3317 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu, enmGuestMode >= PGMMODE_AMD64);
3318 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3319 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3320 }
3321
3322 /*
3323 * Always flag the necessary updates
3324 */
3325 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3326
3327 /*
3328 * Enter the new guest and shadow+guest modes.
3329 */
3330 /* Calc the new CR3 value. */
3331 RTGCPHYS GCPhysCR3;
3332 switch (enmGuestMode)
3333 {
3334 case PGMMODE_REAL:
3335 case PGMMODE_PROTECTED:
3336 GCPhysCR3 = NIL_RTGCPHYS;
3337 break;
3338
3339 case PGMMODE_32_BIT:
3340 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3341 break;
3342
3343 case PGMMODE_PAE_NX:
3344 case PGMMODE_PAE:
3345 if (!pVM->cpum.ro.GuestFeatures.fPae)
3346#ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3347 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3348 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3349#else
3350 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3351
3352#endif
3353 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3354 break;
3355
3356#ifdef VBOX_WITH_64_BITS_GUESTS
3357 case PGMMODE_AMD64_NX:
3358 case PGMMODE_AMD64:
3359 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3360 break;
3361#endif
3362 default:
3363 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3364 }
3365
3366#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3367 /*
3368 * If a nested-guest is using EPT paging:
3369 * - Update the second-level address translation (SLAT) mode.
3370 * - Indicate that the CR3 is nested-guest physical address.
3371 */
3372 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
3373 {
3374 if (PGMMODE_WITH_PAGING(enmGuestMode))
3375 {
3376 /*
3377 * Translate CR3 to its guest-physical address.
3378 * We don't use pgmGstSlatTranslateCr3() here as we want to update GCPhysNstGstCR3 -after-
3379 * switching modes to keep it consistent with how GCPhysCR3 is updated.
3380 */
3381 PGMPTWALK Walk;
3382 PGMPTWALKGST GstWalk;
3383 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysCR3, &Walk, &GstWalk);
3384 if (RT_SUCCESS(rc))
3385 { /* likely */ }
3386 else
3387 {
3388 /*
3389 * SLAT failed but we avoid reporting this to the caller because the caller
3390 * is not supposed to fail. The only time the caller needs to indicate a
3391 * failure to software is when PAE paging is used by the nested-guest, but
3392 * we handle the PAE case separately (e.g., see VMX transition in IEM).
3393 * In all other cases, the failure will be indicated when CR3 tries to be
3394 * translated on the next linear-address memory access.
3395 * See Intel spec. 27.2.1 "EPT Overview".
3396 */
3397 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));
3398
3399 /* Trying to coax PGM to succeed for the time being... */
3400 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
3401 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3402 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
3403 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3404 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3405 return VINF_SUCCESS;
3406 }
3407 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3408 GCPhysCR3 = Walk.GCPhys;
3409 }
3410 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
3411 }
3412 else
3413 {
3414 Assert(pVCpu->pgm.s.GCPhysNstGstCR3 == NIL_RTGCPHYS);
3415 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;
3416 }
3417#endif
3418
3419 /*
3420 * Enter the new guest mode.
3421 */
3422 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3423 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3424 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3425
3426 /* Set the new guest CR3 (and nested-guest CR3). */
3427 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3428
3429 /* status codes. */
3430 AssertRC(rc);
3431 AssertRC(rc2);
3432 if (RT_SUCCESS(rc))
3433 {
3434 rc = rc2;
3435 if (RT_SUCCESS(rc)) /* no informational status codes. */
3436 rc = VINF_SUCCESS;
3437 }
3438
3439 /*
3440 * Notify HM.
3441 */
3442 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3443 return rc;
3444}
3445
3446
3447/**
3448 * Called by CPUM or REM when CR0.WP changes to 1.
3449 *
3450 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3451 * @thread EMT
3452 */
3453VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3454{
3455 /*
3456 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3457 *
3458 * Use the counter to judge whether there might be pool pages with active
3459 * hacks in them. If there are, we will be running the risk of messing up
3460 * the guest by allowing it to write to read-only pages. Thus, we have to
3461 * clear the page pool ASAP if there is the slightest chance.
3462 */
3463 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3464 {
3465 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3466
3467 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3468 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3469 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3470 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3471 }
3472}
3473
3474
3475/**
3476 * Gets the current guest paging mode.
3477 *
3478 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3479 *
3480 * @returns The current paging mode.
3481 * @param pVCpu The cross context virtual CPU structure.
3482 */
3483VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3484{
3485 return pVCpu->pgm.s.enmGuestMode;
3486}
3487
3488
3489/**
3490 * Gets the current shadow paging mode.
3491 *
3492 * @returns The current paging mode.
3493 * @param pVCpu The cross context virtual CPU structure.
3494 */
3495VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3496{
3497 return pVCpu->pgm.s.enmShadowMode;
3498}
3499
3500
3501/**
3502 * Gets the current host paging mode.
3503 *
3504 * @returns The current paging mode.
3505 * @param pVM The cross context VM structure.
3506 */
3507VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3508{
3509 switch (pVM->pgm.s.enmHostMode)
3510 {
3511 case SUPPAGINGMODE_32_BIT:
3512 case SUPPAGINGMODE_32_BIT_GLOBAL:
3513 return PGMMODE_32_BIT;
3514
3515 case SUPPAGINGMODE_PAE:
3516 case SUPPAGINGMODE_PAE_GLOBAL:
3517 return PGMMODE_PAE;
3518
3519 case SUPPAGINGMODE_PAE_NX:
3520 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3521 return PGMMODE_PAE_NX;
3522
3523 case SUPPAGINGMODE_AMD64:
3524 case SUPPAGINGMODE_AMD64_GLOBAL:
3525 return PGMMODE_AMD64;
3526
3527 case SUPPAGINGMODE_AMD64_NX:
3528 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3529 return PGMMODE_AMD64_NX;
3530
3531 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3532 }
3533
3534 return PGMMODE_INVALID;
3535}
3536
3537
3538/**
3539 * Get mode name.
3540 *
3541 * @returns read-only name string.
3542 * @param enmMode The mode which name is desired.
3543 */
3544VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3545{
3546 switch (enmMode)
3547 {
3548 case PGMMODE_REAL: return "Real";
3549 case PGMMODE_PROTECTED: return "Protected";
3550 case PGMMODE_32_BIT: return "32-bit";
3551 case PGMMODE_PAE: return "PAE";
3552 case PGMMODE_PAE_NX: return "PAE+NX";
3553 case PGMMODE_AMD64: return "AMD64";
3554 case PGMMODE_AMD64_NX: return "AMD64+NX";
3555 case PGMMODE_NESTED_32BIT: return "Nested-32";
3556 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3557 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3558 case PGMMODE_EPT: return "EPT";
3559 case PGMMODE_NONE: return "None";
3560 default: return "unknown mode value";
3561 }
3562}
3563
3564
3565#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3566/**
3567 * Gets the SLAT mode name.
3568 *
3569 * @returns The read-only SLAT mode descriptive string.
3570 * @param enmSlatMode The SLAT mode value.
3571 */
3572VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
3573{
3574 switch (enmSlatMode)
3575 {
3576 case PGMSLAT_DIRECT: return "Direct";
3577 case PGMSLAT_EPT: return "EPT";
3578 case PGMSLAT_32BIT: return "32-bit";
3579 case PGMSLAT_PAE: return "PAE";
3580 case PGMSLAT_AMD64: return "AMD64";
3581 default: return "Unknown";
3582 }
3583}
3584#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
3585
3586
3587/**
3588 * Gets the physical address represented in the guest CR3 as PGM sees it.
3589 *
3590 * This is mainly for logging and debugging.
3591 *
3592 * @returns PGM's guest CR3 value.
3593 * @param pVCpu The cross context virtual CPU structure.
3594 */
3595VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3596{
3597 return pVCpu->pgm.s.GCPhysCR3;
3598}
3599
3600
3601
3602/**
3603 * Notification from CPUM that the EFER.NXE bit has changed.
3604 *
3605 * @param pVCpu The cross context virtual CPU structure of the CPU for
3606 * which EFER changed.
3607 * @param fNxe The new NXE state.
3608 */
3609VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3610{
3611/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3612 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3613
3614 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3615 if (fNxe)
3616 {
3617 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3618 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3619 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3620 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3621 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3622 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3623 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3624 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3625 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3626 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3627 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3628
3629 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3630 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3631 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3632 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3633 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3634 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3635 }
3636 else
3637 {
3638 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3639 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3640 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3641 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3642 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3643 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3644 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3645 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3646 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3647 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3648 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3649
3650 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3651 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3652 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3653 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3654 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3655 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3656 }
3657}
3658
3659
3660/**
3661 * Check if any pgm pool pages are marked dirty (not monitored)
3662 *
3663 * @returns bool locked/not locked
3664 * @param pVM The cross context VM structure.
3665 */
3666VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3667{
3668 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3669}
3670
3671
3672/**
3673 * Check if this VCPU currently owns the PGM lock.
3674 *
3675 * @returns bool owner/not owner
3676 * @param pVM The cross context VM structure.
3677 */
3678VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
3679{
3680 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
3681}
3682
3683
3684/**
3685 * Enable or disable large page usage
3686 *
3687 * @returns VBox status code.
3688 * @param pVM The cross context VM structure.
3689 * @param fUseLargePages Use/not use large pages
3690 */
3691VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3692{
3693 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3694
3695 pVM->pgm.s.fUseLargePages = fUseLargePages;
3696 return VINF_SUCCESS;
3697}
3698
3699
3700/**
3701 * Acquire the PGM lock.
3702 *
3703 * @returns VBox status code
3704 * @param pVM The cross context VM structure.
3705 * @param fVoid Set if the caller cannot handle failure returns.
3706 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3707 */
3708#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
3709int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
3710#else
3711int pgmLock(PVMCC pVM, bool fVoid)
3712#endif
3713{
3714#if defined(VBOX_STRICT)
3715 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3716#else
3717 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
3718#endif
3719 if (RT_SUCCESS(rc))
3720 return rc;
3721 if (fVoid)
3722 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
3723 else
3724 AssertRC(rc);
3725 return rc;
3726}
3727
3728
3729/**
3730 * Release the PGM lock.
3731 *
3732 * @returns VBox status code
3733 * @param pVM The cross context VM structure.
3734 */
3735void pgmUnlock(PVMCC pVM)
3736{
3737 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3738 pVM->pgm.s.cDeprecatedPageLocks = 0;
3739 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
3740 if (rc == VINF_SEM_NESTED)
3741 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3742}
3743
3744
3745#if !defined(IN_R0) || defined(LOG_ENABLED)
3746
3747/** Format handler for PGMPAGE.
3748 * @copydoc FNRTSTRFORMATTYPE */
3749static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3750 const char *pszType, void const *pvValue,
3751 int cchWidth, int cchPrecision, unsigned fFlags,
3752 void *pvUser)
3753{
3754 size_t cch;
3755 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3756 if (RT_VALID_PTR(pPage))
3757 {
3758 char szTmp[64+80];
3759
3760 cch = 0;
3761
3762 /* The single char state stuff. */
3763 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3764 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3765
3766# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3767 if (IS_PART_INCLUDED(5))
3768 {
3769 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
3770 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
3771 }
3772
3773 /* The type. */
3774 if (IS_PART_INCLUDED(4))
3775 {
3776 szTmp[cch++] = ':';
3777 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3778 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3779 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3780 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3781 }
3782
3783 /* The numbers. */
3784 if (IS_PART_INCLUDED(3))
3785 {
3786 szTmp[cch++] = ':';
3787 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3788 }
3789
3790 if (IS_PART_INCLUDED(2))
3791 {
3792 szTmp[cch++] = ':';
3793 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3794 }
3795
3796 if (IS_PART_INCLUDED(6))
3797 {
3798 szTmp[cch++] = ':';
3799 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3800 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3801 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3802 }
3803# undef IS_PART_INCLUDED
3804
3805 cch = pfnOutput(pvArgOutput, szTmp, cch);
3806 }
3807 else
3808 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3809 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3810 return cch;
3811}
3812
3813
3814/** Format handler for PGMRAMRANGE.
3815 * @copydoc FNRTSTRFORMATTYPE */
3816static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3817 const char *pszType, void const *pvValue,
3818 int cchWidth, int cchPrecision, unsigned fFlags,
3819 void *pvUser)
3820{
3821 size_t cch;
3822 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3823 if (RT_VALID_PTR(pRam))
3824 {
3825 char szTmp[80];
3826 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3827 cch = pfnOutput(pvArgOutput, szTmp, cch);
3828 }
3829 else
3830 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3831 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3832 return cch;
3833}
3834
3835/** Format type andlers to be registered/deregistered. */
3836static const struct
3837{
3838 char szType[24];
3839 PFNRTSTRFORMATTYPE pfnHandler;
3840} g_aPgmFormatTypes[] =
3841{
3842 { "pgmpage", pgmFormatTypeHandlerPage },
3843 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3844};
3845
3846#endif /* !IN_R0 || LOG_ENABLED */
3847
3848/**
3849 * Registers the global string format types.
3850 *
3851 * This should be called at module load time or in some other manner that ensure
3852 * that it's called exactly one time.
3853 *
3854 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3855 */
3856VMMDECL(int) PGMRegisterStringFormatTypes(void)
3857{
3858#if !defined(IN_R0) || defined(LOG_ENABLED)
3859 int rc = VINF_SUCCESS;
3860 unsigned i;
3861 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3862 {
3863 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3864# ifdef IN_RING0
3865 if (rc == VERR_ALREADY_EXISTS)
3866 {
3867 /* in case of cleanup failure in ring-0 */
3868 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3869 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3870 }
3871# endif
3872 }
3873 if (RT_FAILURE(rc))
3874 while (i-- > 0)
3875 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3876
3877 return rc;
3878#else
3879 return VINF_SUCCESS;
3880#endif
3881}
3882
3883
3884/**
3885 * Deregisters the global string format types.
3886 *
3887 * This should be called at module unload time or in some other manner that
3888 * ensure that it's called exactly one time.
3889 */
3890VMMDECL(void) PGMDeregisterStringFormatTypes(void)
3891{
3892#if !defined(IN_R0) || defined(LOG_ENABLED)
3893 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3894 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3895#endif
3896}
3897
3898
3899#ifdef VBOX_STRICT
3900/**
3901 * Asserts that everything related to the guest CR3 is correctly shadowed.
3902 *
3903 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3904 * and assert the correctness of the guest CR3 mapping before asserting that the
3905 * shadow page tables is in sync with the guest page tables.
3906 *
3907 * @returns Number of conflicts.
3908 * @param pVM The cross context VM structure.
3909 * @param pVCpu The cross context virtual CPU structure.
3910 * @param cr3 The current guest CR3 register value.
3911 * @param cr4 The current guest CR4 register value.
3912 */
3913VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
3914{
3915 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3916
3917 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
3918 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
3919 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
3920
3921 PGM_LOCK_VOID(pVM);
3922 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3923 PGM_UNLOCK(pVM);
3924
3925 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3926 return cErrors;
3927}
3928#endif /* VBOX_STRICT */
3929
3930
3931/**
3932 * Updates PGM's copy of the guest's EPT pointer.
3933 *
3934 * @param pVCpu The cross context virtual CPU structure.
3935 * @param uEptPtr The EPT pointer.
3936 *
3937 * @remarks This can be called as part of VM-entry so we might be in the midst of
3938 * switching to VMX non-root mode.
3939 */
3940VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
3941{
3942 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3943 PGM_LOCK_VOID(pVM);
3944 pVCpu->pgm.s.uEptPtr = uEptPtr;
3945 PGM_UNLOCK(pVM);
3946}
3947
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette