VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 97197

Last change on this file since 97197 was 97197, checked in by vboxsync, 2 years ago

VMM/PGM,IEM,EM: Changed FNPGMRZPHYSPFHANDLER, PGMTrap0eHandler and PGMR0Trap0eHandlerNPMisconfig to take PCPUMCTX instead of PCPUMCTXCORE parameters; dropped PCPUMCTXCORE parameters from IEMExecOneBypassEx, PGMInterpretInstruction and EMInterpretInstruction together with some associated cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 149.0 KB
Line 
1/* $Id: PGMAll.cpp 97197 2022-10-18 11:09:55Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/selm.h>
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/iom.h>
39#include <VBox/sup.h>
40#include <VBox/vmm/mm.h>
41#include <VBox/vmm/stam.h>
42#include <VBox/vmm/trpm.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/hm_vmx.h>
46#include "PGMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include "PGMInline.h"
49#include <iprt/assert.h>
50#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
51# include <iprt/asm-amd64-x86.h>
52#endif
53#include <iprt/string.h>
54#include <VBox/log.h>
55#include <VBox/param.h>
56#include <VBox/err.h>
57
58
59/*********************************************************************************************************************************
60* Internal Functions *
61*********************************************************************************************************************************/
62DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
63DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
64DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3);
65#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
66static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
67 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
68static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
69 PPGMPTWALKGST pGstWalk);
70static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3);
71static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD,
72 PPGMPTWALKGST pGstWalkAll);
73#endif
74static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
75static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
76
77
78/*
79 * Second level transation - EPT.
80 */
81#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
82# define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT
83# include "PGMSlatDefs.h"
84# include "PGMAllGstSlatEpt.cpp.h"
85# undef PGM_SLAT_TYPE
86#endif
87
88
89/*
90 * Shadow - 32-bit mode
91 */
92#define PGM_SHW_TYPE PGM_TYPE_32BIT
93#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
94#include "PGMAllShw.h"
95
96/* Guest - real mode */
97#define PGM_GST_TYPE PGM_TYPE_REAL
98#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
99#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
100#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
101#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
102#include "PGMGstDefs.h"
103#include "PGMAllGst.h"
104#include "PGMAllBth.h"
105#undef BTH_PGMPOOLKIND_PT_FOR_PT
106#undef BTH_PGMPOOLKIND_ROOT
107#undef PGM_BTH_NAME
108#undef PGM_GST_TYPE
109#undef PGM_GST_NAME
110
111/* Guest - protected mode */
112#define PGM_GST_TYPE PGM_TYPE_PROT
113#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
114#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
115#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
116#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
117#include "PGMGstDefs.h"
118#include "PGMAllGst.h"
119#include "PGMAllBth.h"
120#undef BTH_PGMPOOLKIND_PT_FOR_PT
121#undef BTH_PGMPOOLKIND_ROOT
122#undef PGM_BTH_NAME
123#undef PGM_GST_TYPE
124#undef PGM_GST_NAME
125
126/* Guest - 32-bit mode */
127#define PGM_GST_TYPE PGM_TYPE_32BIT
128#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
129#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
130#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
131#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
132#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
133#include "PGMGstDefs.h"
134#include "PGMAllGst.h"
135#include "PGMAllBth.h"
136#undef BTH_PGMPOOLKIND_PT_FOR_BIG
137#undef BTH_PGMPOOLKIND_PT_FOR_PT
138#undef BTH_PGMPOOLKIND_ROOT
139#undef PGM_BTH_NAME
140#undef PGM_GST_TYPE
141#undef PGM_GST_NAME
142
143#undef PGM_SHW_TYPE
144#undef PGM_SHW_NAME
145
146
147/*
148 * Shadow - PAE mode
149 */
150#define PGM_SHW_TYPE PGM_TYPE_PAE
151#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
152#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
153#include "PGMAllShw.h"
154
155/* Guest - real mode */
156#define PGM_GST_TYPE PGM_TYPE_REAL
157#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
158#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
159#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
160#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
161#include "PGMGstDefs.h"
162#include "PGMAllBth.h"
163#undef BTH_PGMPOOLKIND_PT_FOR_PT
164#undef BTH_PGMPOOLKIND_ROOT
165#undef PGM_BTH_NAME
166#undef PGM_GST_TYPE
167#undef PGM_GST_NAME
168
169/* Guest - protected mode */
170#define PGM_GST_TYPE PGM_TYPE_PROT
171#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
172#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
173#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
174#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
175#include "PGMGstDefs.h"
176#include "PGMAllBth.h"
177#undef BTH_PGMPOOLKIND_PT_FOR_PT
178#undef BTH_PGMPOOLKIND_ROOT
179#undef PGM_BTH_NAME
180#undef PGM_GST_TYPE
181#undef PGM_GST_NAME
182
183/* Guest - 32-bit mode */
184#define PGM_GST_TYPE PGM_TYPE_32BIT
185#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
186#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
187#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
188#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
189#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
190#include "PGMGstDefs.h"
191#include "PGMAllBth.h"
192#undef BTH_PGMPOOLKIND_PT_FOR_BIG
193#undef BTH_PGMPOOLKIND_PT_FOR_PT
194#undef BTH_PGMPOOLKIND_ROOT
195#undef PGM_BTH_NAME
196#undef PGM_GST_TYPE
197#undef PGM_GST_NAME
198
199
200/* Guest - PAE mode */
201#define PGM_GST_TYPE PGM_TYPE_PAE
202#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
203#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
204#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
205#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
206#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
207#include "PGMGstDefs.h"
208#include "PGMAllGst.h"
209#include "PGMAllBth.h"
210#undef BTH_PGMPOOLKIND_PT_FOR_BIG
211#undef BTH_PGMPOOLKIND_PT_FOR_PT
212#undef BTH_PGMPOOLKIND_ROOT
213#undef PGM_BTH_NAME
214#undef PGM_GST_TYPE
215#undef PGM_GST_NAME
216
217#undef PGM_SHW_TYPE
218#undef PGM_SHW_NAME
219
220
221/*
222 * Shadow - AMD64 mode
223 */
224#define PGM_SHW_TYPE PGM_TYPE_AMD64
225#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
226#include "PGMAllShw.h"
227
228/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
229/** @todo retire this hack. */
230#define PGM_GST_TYPE PGM_TYPE_PROT
231#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
232#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
233#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
234#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
235#include "PGMGstDefs.h"
236#include "PGMAllBth.h"
237#undef BTH_PGMPOOLKIND_PT_FOR_PT
238#undef BTH_PGMPOOLKIND_ROOT
239#undef PGM_BTH_NAME
240#undef PGM_GST_TYPE
241#undef PGM_GST_NAME
242
243#ifdef VBOX_WITH_64_BITS_GUESTS
244/* Guest - AMD64 mode */
245# define PGM_GST_TYPE PGM_TYPE_AMD64
246# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
247# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
248# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
249# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
250# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
251# include "PGMGstDefs.h"
252# include "PGMAllGst.h"
253# include "PGMAllBth.h"
254# undef BTH_PGMPOOLKIND_PT_FOR_BIG
255# undef BTH_PGMPOOLKIND_PT_FOR_PT
256# undef BTH_PGMPOOLKIND_ROOT
257# undef PGM_BTH_NAME
258# undef PGM_GST_TYPE
259# undef PGM_GST_NAME
260#endif /* VBOX_WITH_64_BITS_GUESTS */
261
262#undef PGM_SHW_TYPE
263#undef PGM_SHW_NAME
264
265
266/*
267 * Shadow - 32-bit nested paging mode.
268 */
269#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
270#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
271#include "PGMAllShw.h"
272
273/* Guest - real mode */
274#define PGM_GST_TYPE PGM_TYPE_REAL
275#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
276#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
277#include "PGMGstDefs.h"
278#include "PGMAllBth.h"
279#undef PGM_BTH_NAME
280#undef PGM_GST_TYPE
281#undef PGM_GST_NAME
282
283/* Guest - protected mode */
284#define PGM_GST_TYPE PGM_TYPE_PROT
285#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
286#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
287#include "PGMGstDefs.h"
288#include "PGMAllBth.h"
289#undef PGM_BTH_NAME
290#undef PGM_GST_TYPE
291#undef PGM_GST_NAME
292
293/* Guest - 32-bit mode */
294#define PGM_GST_TYPE PGM_TYPE_32BIT
295#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
296#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
297#include "PGMGstDefs.h"
298#include "PGMAllBth.h"
299#undef PGM_BTH_NAME
300#undef PGM_GST_TYPE
301#undef PGM_GST_NAME
302
303/* Guest - PAE mode */
304#define PGM_GST_TYPE PGM_TYPE_PAE
305#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
306#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
307#include "PGMGstDefs.h"
308#include "PGMAllBth.h"
309#undef PGM_BTH_NAME
310#undef PGM_GST_TYPE
311#undef PGM_GST_NAME
312
313#ifdef VBOX_WITH_64_BITS_GUESTS
314/* Guest - AMD64 mode */
315# define PGM_GST_TYPE PGM_TYPE_AMD64
316# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
317# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
318# include "PGMGstDefs.h"
319# include "PGMAllBth.h"
320# undef PGM_BTH_NAME
321# undef PGM_GST_TYPE
322# undef PGM_GST_NAME
323#endif /* VBOX_WITH_64_BITS_GUESTS */
324
325#undef PGM_SHW_TYPE
326#undef PGM_SHW_NAME
327
328
329/*
330 * Shadow - PAE nested paging mode.
331 */
332#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
333#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
334#include "PGMAllShw.h"
335
336/* Guest - real mode */
337#define PGM_GST_TYPE PGM_TYPE_REAL
338#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
339#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
340#include "PGMGstDefs.h"
341#include "PGMAllBth.h"
342#undef PGM_BTH_NAME
343#undef PGM_GST_TYPE
344#undef PGM_GST_NAME
345
346/* Guest - protected mode */
347#define PGM_GST_TYPE PGM_TYPE_PROT
348#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
349#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
350#include "PGMGstDefs.h"
351#include "PGMAllBth.h"
352#undef PGM_BTH_NAME
353#undef PGM_GST_TYPE
354#undef PGM_GST_NAME
355
356/* Guest - 32-bit mode */
357#define PGM_GST_TYPE PGM_TYPE_32BIT
358#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
359#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
360#include "PGMGstDefs.h"
361#include "PGMAllBth.h"
362#undef PGM_BTH_NAME
363#undef PGM_GST_TYPE
364#undef PGM_GST_NAME
365
366/* Guest - PAE mode */
367#define PGM_GST_TYPE PGM_TYPE_PAE
368#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
369#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
370#include "PGMGstDefs.h"
371#include "PGMAllBth.h"
372#undef PGM_BTH_NAME
373#undef PGM_GST_TYPE
374#undef PGM_GST_NAME
375
376#ifdef VBOX_WITH_64_BITS_GUESTS
377/* Guest - AMD64 mode */
378# define PGM_GST_TYPE PGM_TYPE_AMD64
379# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
380# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
381# include "PGMGstDefs.h"
382# include "PGMAllBth.h"
383# undef PGM_BTH_NAME
384# undef PGM_GST_TYPE
385# undef PGM_GST_NAME
386#endif /* VBOX_WITH_64_BITS_GUESTS */
387
388#undef PGM_SHW_TYPE
389#undef PGM_SHW_NAME
390
391
392/*
393 * Shadow - AMD64 nested paging mode.
394 */
395#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
396#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
397#include "PGMAllShw.h"
398
399/* Guest - real mode */
400#define PGM_GST_TYPE PGM_TYPE_REAL
401#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
402#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
403#include "PGMGstDefs.h"
404#include "PGMAllBth.h"
405#undef PGM_BTH_NAME
406#undef PGM_GST_TYPE
407#undef PGM_GST_NAME
408
409/* Guest - protected mode */
410#define PGM_GST_TYPE PGM_TYPE_PROT
411#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
412#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
413#include "PGMGstDefs.h"
414#include "PGMAllBth.h"
415#undef PGM_BTH_NAME
416#undef PGM_GST_TYPE
417#undef PGM_GST_NAME
418
419/* Guest - 32-bit mode */
420#define PGM_GST_TYPE PGM_TYPE_32BIT
421#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
422#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
423#include "PGMGstDefs.h"
424#include "PGMAllBth.h"
425#undef PGM_BTH_NAME
426#undef PGM_GST_TYPE
427#undef PGM_GST_NAME
428
429/* Guest - PAE mode */
430#define PGM_GST_TYPE PGM_TYPE_PAE
431#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
432#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
433#include "PGMGstDefs.h"
434#include "PGMAllBth.h"
435#undef PGM_BTH_NAME
436#undef PGM_GST_TYPE
437#undef PGM_GST_NAME
438
439#ifdef VBOX_WITH_64_BITS_GUESTS
440/* Guest - AMD64 mode */
441# define PGM_GST_TYPE PGM_TYPE_AMD64
442# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
443# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
444# include "PGMGstDefs.h"
445# include "PGMAllBth.h"
446# undef PGM_BTH_NAME
447# undef PGM_GST_TYPE
448# undef PGM_GST_NAME
449#endif /* VBOX_WITH_64_BITS_GUESTS */
450
451#undef PGM_SHW_TYPE
452#undef PGM_SHW_NAME
453
454
455/*
456 * Shadow - EPT.
457 */
458#define PGM_SHW_TYPE PGM_TYPE_EPT
459#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
460#include "PGMAllShw.h"
461
462/* Guest - real mode */
463#define PGM_GST_TYPE PGM_TYPE_REAL
464#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
465#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
466#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
467#include "PGMGstDefs.h"
468#include "PGMAllBth.h"
469#undef BTH_PGMPOOLKIND_PT_FOR_PT
470#undef PGM_BTH_NAME
471#undef PGM_GST_TYPE
472#undef PGM_GST_NAME
473
474/* Guest - protected mode */
475#define PGM_GST_TYPE PGM_TYPE_PROT
476#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
477#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
478#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
479#include "PGMGstDefs.h"
480#include "PGMAllBth.h"
481#undef BTH_PGMPOOLKIND_PT_FOR_PT
482#undef PGM_BTH_NAME
483#undef PGM_GST_TYPE
484#undef PGM_GST_NAME
485
486/* Guest - 32-bit mode */
487#define PGM_GST_TYPE PGM_TYPE_32BIT
488#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
489#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
490#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
491#include "PGMGstDefs.h"
492#include "PGMAllBth.h"
493#undef BTH_PGMPOOLKIND_PT_FOR_PT
494#undef PGM_BTH_NAME
495#undef PGM_GST_TYPE
496#undef PGM_GST_NAME
497
498/* Guest - PAE mode */
499#define PGM_GST_TYPE PGM_TYPE_PAE
500#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
501#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
502#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
503#include "PGMGstDefs.h"
504#include "PGMAllBth.h"
505#undef BTH_PGMPOOLKIND_PT_FOR_PT
506#undef PGM_BTH_NAME
507#undef PGM_GST_TYPE
508#undef PGM_GST_NAME
509
510#ifdef VBOX_WITH_64_BITS_GUESTS
511/* Guest - AMD64 mode */
512# define PGM_GST_TYPE PGM_TYPE_AMD64
513# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
514# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
515# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
516# include "PGMGstDefs.h"
517# include "PGMAllBth.h"
518# undef BTH_PGMPOOLKIND_PT_FOR_PT
519# undef PGM_BTH_NAME
520# undef PGM_GST_TYPE
521# undef PGM_GST_NAME
522#endif /* VBOX_WITH_64_BITS_GUESTS */
523
524#undef PGM_SHW_TYPE
525#undef PGM_SHW_NAME
526
527
528/*
529 * Shadow - NEM / None.
530 */
531#define PGM_SHW_TYPE PGM_TYPE_NONE
532#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
533#include "PGMAllShw.h"
534
535/* Guest - real mode */
536#define PGM_GST_TYPE PGM_TYPE_REAL
537#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
538#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
539#include "PGMGstDefs.h"
540#include "PGMAllBth.h"
541#undef PGM_BTH_NAME
542#undef PGM_GST_TYPE
543#undef PGM_GST_NAME
544
545/* Guest - protected mode */
546#define PGM_GST_TYPE PGM_TYPE_PROT
547#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
548#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
549#include "PGMGstDefs.h"
550#include "PGMAllBth.h"
551#undef PGM_BTH_NAME
552#undef PGM_GST_TYPE
553#undef PGM_GST_NAME
554
555/* Guest - 32-bit mode */
556#define PGM_GST_TYPE PGM_TYPE_32BIT
557#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
558#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
559#include "PGMGstDefs.h"
560#include "PGMAllBth.h"
561#undef PGM_BTH_NAME
562#undef PGM_GST_TYPE
563#undef PGM_GST_NAME
564
565/* Guest - PAE mode */
566#define PGM_GST_TYPE PGM_TYPE_PAE
567#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
568#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
569#include "PGMGstDefs.h"
570#include "PGMAllBth.h"
571#undef PGM_BTH_NAME
572#undef PGM_GST_TYPE
573#undef PGM_GST_NAME
574
575#ifdef VBOX_WITH_64_BITS_GUESTS
576/* Guest - AMD64 mode */
577# define PGM_GST_TYPE PGM_TYPE_AMD64
578# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
579# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
580# include "PGMGstDefs.h"
581# include "PGMAllBth.h"
582# undef PGM_BTH_NAME
583# undef PGM_GST_TYPE
584# undef PGM_GST_NAME
585#endif /* VBOX_WITH_64_BITS_GUESTS */
586
587#undef PGM_SHW_TYPE
588#undef PGM_SHW_NAME
589
590
591
592/**
593 * Guest mode data array.
594 */
595PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
596{
597 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* 0 */
598 {
599 PGM_TYPE_REAL,
600 PGM_GST_NAME_REAL(GetPage),
601 PGM_GST_NAME_REAL(ModifyPage),
602 PGM_GST_NAME_REAL(Enter),
603 PGM_GST_NAME_REAL(Exit),
604#ifdef IN_RING3
605 PGM_GST_NAME_REAL(Relocate),
606#endif
607 },
608 {
609 PGM_TYPE_PROT,
610 PGM_GST_NAME_PROT(GetPage),
611 PGM_GST_NAME_PROT(ModifyPage),
612 PGM_GST_NAME_PROT(Enter),
613 PGM_GST_NAME_PROT(Exit),
614#ifdef IN_RING3
615 PGM_GST_NAME_PROT(Relocate),
616#endif
617 },
618 {
619 PGM_TYPE_32BIT,
620 PGM_GST_NAME_32BIT(GetPage),
621 PGM_GST_NAME_32BIT(ModifyPage),
622 PGM_GST_NAME_32BIT(Enter),
623 PGM_GST_NAME_32BIT(Exit),
624#ifdef IN_RING3
625 PGM_GST_NAME_32BIT(Relocate),
626#endif
627 },
628 {
629 PGM_TYPE_PAE,
630 PGM_GST_NAME_PAE(GetPage),
631 PGM_GST_NAME_PAE(ModifyPage),
632 PGM_GST_NAME_PAE(Enter),
633 PGM_GST_NAME_PAE(Exit),
634#ifdef IN_RING3
635 PGM_GST_NAME_PAE(Relocate),
636#endif
637 },
638#ifdef VBOX_WITH_64_BITS_GUESTS
639 {
640 PGM_TYPE_AMD64,
641 PGM_GST_NAME_AMD64(GetPage),
642 PGM_GST_NAME_AMD64(ModifyPage),
643 PGM_GST_NAME_AMD64(Enter),
644 PGM_GST_NAME_AMD64(Exit),
645# ifdef IN_RING3
646 PGM_GST_NAME_AMD64(Relocate),
647# endif
648 },
649#endif
650};
651
652
653/**
654 * The shadow mode data array.
655 */
656PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
657{
658 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
659 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
660 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
661 {
662 PGM_TYPE_32BIT,
663 PGM_SHW_NAME_32BIT(GetPage),
664 PGM_SHW_NAME_32BIT(ModifyPage),
665 PGM_SHW_NAME_32BIT(Enter),
666 PGM_SHW_NAME_32BIT(Exit),
667#ifdef IN_RING3
668 PGM_SHW_NAME_32BIT(Relocate),
669#endif
670 },
671 {
672 PGM_TYPE_PAE,
673 PGM_SHW_NAME_PAE(GetPage),
674 PGM_SHW_NAME_PAE(ModifyPage),
675 PGM_SHW_NAME_PAE(Enter),
676 PGM_SHW_NAME_PAE(Exit),
677#ifdef IN_RING3
678 PGM_SHW_NAME_PAE(Relocate),
679#endif
680 },
681 {
682 PGM_TYPE_AMD64,
683 PGM_SHW_NAME_AMD64(GetPage),
684 PGM_SHW_NAME_AMD64(ModifyPage),
685 PGM_SHW_NAME_AMD64(Enter),
686 PGM_SHW_NAME_AMD64(Exit),
687#ifdef IN_RING3
688 PGM_SHW_NAME_AMD64(Relocate),
689#endif
690 },
691 {
692 PGM_TYPE_NESTED_32BIT,
693 PGM_SHW_NAME_NESTED_32BIT(GetPage),
694 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
695 PGM_SHW_NAME_NESTED_32BIT(Enter),
696 PGM_SHW_NAME_NESTED_32BIT(Exit),
697#ifdef IN_RING3
698 PGM_SHW_NAME_NESTED_32BIT(Relocate),
699#endif
700 },
701 {
702 PGM_TYPE_NESTED_PAE,
703 PGM_SHW_NAME_NESTED_PAE(GetPage),
704 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
705 PGM_SHW_NAME_NESTED_PAE(Enter),
706 PGM_SHW_NAME_NESTED_PAE(Exit),
707#ifdef IN_RING3
708 PGM_SHW_NAME_NESTED_PAE(Relocate),
709#endif
710 },
711 {
712 PGM_TYPE_NESTED_AMD64,
713 PGM_SHW_NAME_NESTED_AMD64(GetPage),
714 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
715 PGM_SHW_NAME_NESTED_AMD64(Enter),
716 PGM_SHW_NAME_NESTED_AMD64(Exit),
717#ifdef IN_RING3
718 PGM_SHW_NAME_NESTED_AMD64(Relocate),
719#endif
720 },
721 {
722 PGM_TYPE_EPT,
723 PGM_SHW_NAME_EPT(GetPage),
724 PGM_SHW_NAME_EPT(ModifyPage),
725 PGM_SHW_NAME_EPT(Enter),
726 PGM_SHW_NAME_EPT(Exit),
727#ifdef IN_RING3
728 PGM_SHW_NAME_EPT(Relocate),
729#endif
730 },
731 {
732 PGM_TYPE_NONE,
733 PGM_SHW_NAME_NONE(GetPage),
734 PGM_SHW_NAME_NONE(ModifyPage),
735 PGM_SHW_NAME_NONE(Enter),
736 PGM_SHW_NAME_NONE(Exit),
737#ifdef IN_RING3
738 PGM_SHW_NAME_NONE(Relocate),
739#endif
740 },
741};
742
743
744/**
745 * The guest+shadow mode data array.
746 */
747PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
748{
749#if !defined(IN_RING3) && !defined(VBOX_STRICT)
750# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
751# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
752 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler) }
753
754#elif !defined(IN_RING3) && defined(VBOX_STRICT)
755# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
756# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
757 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler), Nm(AssertCR3) }
758
759#elif defined(IN_RING3) && !defined(VBOX_STRICT)
760# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
761# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
762 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
763
764#elif defined(IN_RING3) && defined(VBOX_STRICT)
765# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
766# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
767 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
768
769#else
770# error "Misconfig."
771#endif
772
773 /* 32-bit shadow paging mode: */
774 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
775 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
776 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
777 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
778 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
779 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
780 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
781 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
782 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
783 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
784 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
785
786 /* PAE shadow paging mode: */
787 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
788 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
789 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
790 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
791 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
792 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
793 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
794 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
795 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
796 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
797 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
798
799 /* AMD64 shadow paging mode: */
800 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
801 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
802 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
803 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
804 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
805#ifdef VBOX_WITH_64_BITS_GUESTS
806 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
807#else
808 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
809#endif
810 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
812 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
814 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
815
816 /* 32-bit nested paging mode: */
817 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
818 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
819 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
820 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
821 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
822#ifdef VBOX_WITH_64_BITS_GUESTS
823 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
824#else
825 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
826#endif
827 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
828 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
829 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
831 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
832
833 /* PAE nested paging mode: */
834 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
835 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
836 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
837 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
838 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
839#ifdef VBOX_WITH_64_BITS_GUESTS
840 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
841#else
842 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
843#endif
844 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
845 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
846 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
848 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
849
850 /* AMD64 nested paging mode: */
851 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
852 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
853 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
854 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
855 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
856#ifdef VBOX_WITH_64_BITS_GUESTS
857 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
858#else
859 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
860#endif
861 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
862 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
863 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
865 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
866
867 /* EPT nested paging mode: */
868 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
869 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
870 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
871 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
872 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
873#ifdef VBOX_WITH_64_BITS_GUESTS
874 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
875#else
876 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
877#endif
878 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
880 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
881 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
882 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
883
884 /* NONE / NEM: */
885 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
886 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
887 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
888 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
889 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
890#ifdef VBOX_WITH_64_BITS_GUESTS
891 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
892#else
893 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
894#endif
895 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
896 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
897 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
898 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
899 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
900
901
902#undef PGMMODEDATABTH_ENTRY
903#undef PGMMODEDATABTH_NULL_ENTRY
904};
905
906
907/** Mask array used by pgmGetCr3MaskForMode.
908 * X86_CR3_AMD64_PAGE_MASK is used for modes that doesn't have a CR3 or EPTP. */
909static uint64_t const g_auCr3MaskForMode[PGMMODE_MAX] =
910{
911 /* [PGMMODE_INVALID] = */ X86_CR3_AMD64_PAGE_MASK,
912 /* [PGMMODE_REAL] = */ X86_CR3_AMD64_PAGE_MASK,
913 /* [PGMMODE_PROTECTED] = */ X86_CR3_AMD64_PAGE_MASK,
914 /* [PGMMODE_32_BIT] = */ X86_CR3_PAGE_MASK,
915 /* [PGMMODE_PAE] = */ X86_CR3_PAE_PAGE_MASK,
916 /* [PGMMODE_PAE_NX] = */ X86_CR3_PAE_PAGE_MASK,
917 /* [PGMMODE_AMD64] = */ X86_CR3_AMD64_PAGE_MASK,
918 /* [PGMMODE_AMD64_NX] = */ X86_CR3_AMD64_PAGE_MASK,
919 /* [PGMMODE_NESTED_32BIT = */ X86_CR3_PAGE_MASK,
920 /* [PGMMODE_NESTED_PAE] = */ X86_CR3_PAE_PAGE_MASK,
921 /* [PGMMODE_NESTED_AMD64] = */ X86_CR3_AMD64_PAGE_MASK,
922 /* [PGMMODE_EPT] = */ X86_CR3_EPT_PAGE_MASK,
923 /* [PGMMODE_NONE] = */ X86_CR3_AMD64_PAGE_MASK,
924};
925
926
927/**
928 * Gets the physical address mask for CR3 in the given paging mode.
929 *
930 * The mask is for eliminating flags and other stuff in CR3/EPTP when
931 * extracting the physical address. It is not for validating whether there are
932 * reserved bits set. PGM ASSUMES that whoever loaded the CR3 value and passed
933 * it to PGM checked for reserved bits, including reserved physical address
934 * bits.
935 *
936 * @returns The CR3 mask.
937 * @param enmMode The paging mode.
938 * @param enmSlatMode The second-level address translation mode.
939 */
940DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode, PGMSLAT enmSlatMode)
941{
942 if (enmSlatMode == PGMSLAT_DIRECT)
943 {
944 Assert(enmMode != PGMMODE_EPT);
945 return g_auCr3MaskForMode[(unsigned)enmMode < (unsigned)PGMMODE_MAX ? enmMode : 0];
946 }
947 Assert(enmSlatMode == PGMSLAT_EPT);
948 return X86_CR3_EPT_PAGE_MASK;
949}
950
951
952/**
953 * Gets the masked CR3 value according to the current guest paging mode.
954 *
955 * See disclaimer in pgmGetCr3MaskForMode.
956 *
957 * @returns The masked PGM CR3 value.
958 * @param pVCpu The cross context virtual CPU structure.
959 * @param uCr3 The raw guest CR3 value.
960 */
961DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
962{
963 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode, pVCpu->pgm.s.enmGuestSlatMode);
964 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);
965 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
966 return GCPhysCR3;
967}
968
969
970#ifdef IN_RING0
971/**
972 * #PF Handler.
973 *
974 * @returns VBox status code (appropriate for trap handling and GC return).
975 * @param pVCpu The cross context virtual CPU structure.
976 * @param uErr The trap error code.
977 * @param pCtx Pointer to the register context for the CPU.
978 * @param pvFault The fault address.
979 */
980VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTX pCtx, RTGCPTR pvFault)
981{
982 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
983
984 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
985 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
986 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
987
988
989# ifdef VBOX_WITH_STATISTICS
990 /*
991 * Error code stats.
992 */
993 if (uErr & X86_TRAP_PF_US)
994 {
995 if (!(uErr & X86_TRAP_PF_P))
996 {
997 if (uErr & X86_TRAP_PF_RW)
998 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
999 else
1000 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
1001 }
1002 else if (uErr & X86_TRAP_PF_RW)
1003 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
1004 else if (uErr & X86_TRAP_PF_RSVD)
1005 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
1006 else if (uErr & X86_TRAP_PF_ID)
1007 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
1008 else
1009 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
1010 }
1011 else
1012 { /* Supervisor */
1013 if (!(uErr & X86_TRAP_PF_P))
1014 {
1015 if (uErr & X86_TRAP_PF_RW)
1016 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
1017 else
1018 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
1019 }
1020 else if (uErr & X86_TRAP_PF_RW)
1021 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
1022 else if (uErr & X86_TRAP_PF_ID)
1023 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
1024 else if (uErr & X86_TRAP_PF_RSVD)
1025 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
1026 }
1027# endif /* VBOX_WITH_STATISTICS */
1028
1029 /*
1030 * Call the worker.
1031 */
1032 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1033 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1034 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
1035 bool fLockTaken = false;
1036 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pCtx, pvFault, &fLockTaken);
1037 if (fLockTaken)
1038 {
1039 PGM_LOCK_ASSERT_OWNER(pVM);
1040 PGM_UNLOCK(pVM);
1041 }
1042 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
1043
1044 /*
1045 * Return code tweaks.
1046 */
1047 if (rc != VINF_SUCCESS)
1048 {
1049 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
1050 rc = VINF_SUCCESS;
1051
1052 /* Note: hack alert for difficult to reproduce problem. */
1053 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
1054 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
1055 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
1056 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
1057 {
1058 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pCtx->rip));
1059 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
1060 rc = VINF_SUCCESS;
1061 }
1062 }
1063
1064 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
1065 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
1066 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
1067 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
1068 return rc;
1069}
1070#endif /* IN_RING0 */
1071
1072
1073/**
1074 * Prefetch a page
1075 *
1076 * Typically used to sync commonly used pages before entering raw mode
1077 * after a CR3 reload.
1078 *
1079 * @returns VBox status code suitable for scheduling.
1080 * @retval VINF_SUCCESS on success.
1081 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
1082 * @param pVCpu The cross context virtual CPU structure.
1083 * @param GCPtrPage Page to invalidate.
1084 */
1085VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1086{
1087 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1088
1089 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1090 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1091 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1092 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1093
1094 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1095 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1096 return rc;
1097}
1098
1099
1100/**
1101 * Emulation of the invlpg instruction (HC only actually).
1102 *
1103 * @returns Strict VBox status code, special care required.
1104 * @retval VINF_PGM_SYNC_CR3 - handled.
1105 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1106 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param GCPtrPage Page to invalidate.
1110 *
1111 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1112 * safe, but there could be edge cases!
1113 *
1114 * @todo Flush page or page directory only if necessary!
1115 * @todo VBOXSTRICTRC
1116 */
1117VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1118{
1119 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1120 int rc;
1121 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1122
1123 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1124
1125 /*
1126 * Call paging mode specific worker.
1127 */
1128 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1129 PGM_LOCK_VOID(pVM);
1130
1131 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1132 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1133 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1134 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1135
1136 PGM_UNLOCK(pVM);
1137 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1138
1139 /* Ignore all irrelevant error codes. */
1140 if ( rc == VERR_PAGE_NOT_PRESENT
1141 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1142 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1143 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1144 rc = VINF_SUCCESS;
1145
1146 return rc;
1147}
1148
1149
1150/**
1151 * Executes an instruction using the interpreter.
1152 *
1153 * @returns VBox status code (appropriate for trap handling and GC return).
1154 * @param pVCpu The cross context virtual CPU structure.
1155 * @param pvFault Fault address.
1156 */
1157VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCPUCC pVCpu, RTGCPTR pvFault)
1158{
1159 RT_NOREF(pvFault);
1160 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu);
1161 if (rc == VERR_EM_INTERPRETER)
1162 rc = VINF_EM_RAW_EMULATE_INSTR;
1163 if (rc != VINF_SUCCESS)
1164 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1165 return rc;
1166}
1167
1168
1169/**
1170 * Gets effective page information (from the VMM page directory).
1171 *
1172 * @returns VBox status code.
1173 * @param pVCpu The cross context virtual CPU structure.
1174 * @param GCPtr Guest Context virtual address of the page.
1175 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1176 * @param pHCPhys Where to store the HC physical address of the page.
1177 * This is page aligned.
1178 * @remark You should use PGMMapGetPage() for pages in a mapping.
1179 */
1180VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1181{
1182 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1183 PGM_LOCK_VOID(pVM);
1184
1185 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1186 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1187 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1188 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1189
1190 PGM_UNLOCK(pVM);
1191 return rc;
1192}
1193
1194
1195/**
1196 * Modify page flags for a range of pages in the shadow context.
1197 *
1198 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1199 *
1200 * @returns VBox status code.
1201 * @param pVCpu The cross context virtual CPU structure.
1202 * @param GCPtr Virtual address of the first page in the range.
1203 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1204 * @param fMask The AND mask - page flags X86_PTE_*.
1205 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1206 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1207 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1208 */
1209DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1210{
1211 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1212 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1213
1214 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK; /** @todo this ain't necessary, right... */
1215
1216 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1217 PGM_LOCK_VOID(pVM);
1218
1219 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1220 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1221 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1222 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, GUEST_PAGE_SIZE, fFlags, fMask, fOpFlags);
1223
1224 PGM_UNLOCK(pVM);
1225 return rc;
1226}
1227
1228
1229/**
1230 * Changing the page flags for a single page in the shadow page tables so as to
1231 * make it read-only.
1232 *
1233 * @returns VBox status code.
1234 * @param pVCpu The cross context virtual CPU structure.
1235 * @param GCPtr Virtual address of the first page in the range.
1236 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1237 */
1238VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1239{
1240 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1241}
1242
1243
1244/**
1245 * Changing the page flags for a single page in the shadow page tables so as to
1246 * make it writable.
1247 *
1248 * The call must know with 101% certainty that the guest page tables maps this
1249 * as writable too. This function will deal shared, zero and write monitored
1250 * pages.
1251 *
1252 * @returns VBox status code.
1253 * @param pVCpu The cross context virtual CPU structure.
1254 * @param GCPtr Virtual address of the first page in the range.
1255 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1256 */
1257VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1258{
1259 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1260 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1261 return VINF_SUCCESS;
1262}
1263
1264
1265/**
1266 * Changing the page flags for a single page in the shadow page tables so as to
1267 * make it not present.
1268 *
1269 * @returns VBox status code.
1270 * @param pVCpu The cross context virtual CPU structure.
1271 * @param GCPtr Virtual address of the first page in the range.
1272 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1273 */
1274VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1275{
1276 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1277}
1278
1279
1280/**
1281 * Changing the page flags for a single page in the shadow page tables so as to
1282 * make it supervisor and writable.
1283 *
1284 * This if for dealing with CR0.WP=0 and readonly user pages.
1285 *
1286 * @returns VBox status code.
1287 * @param pVCpu The cross context virtual CPU structure.
1288 * @param GCPtr Virtual address of the first page in the range.
1289 * @param fBigPage Whether or not this is a big page. If it is, we have to
1290 * change the shadow PDE as well. If it isn't, the caller
1291 * has checked that the shadow PDE doesn't need changing.
1292 * We ASSUME 4KB pages backing the big page here!
1293 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1294 */
1295int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1296{
1297 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1298 if (rc == VINF_SUCCESS && fBigPage)
1299 {
1300 /* this is a bit ugly... */
1301 switch (pVCpu->pgm.s.enmShadowMode)
1302 {
1303 case PGMMODE_32_BIT:
1304 {
1305 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1306 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1307 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1308 pPde->u |= X86_PDE_RW;
1309 Log(("-> PDE=%#llx (32)\n", pPde->u));
1310 break;
1311 }
1312 case PGMMODE_PAE:
1313 case PGMMODE_PAE_NX:
1314 {
1315 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1316 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1317 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1318 pPde->u |= X86_PDE_RW;
1319 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1320 break;
1321 }
1322 default:
1323 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1324 }
1325 }
1326 return rc;
1327}
1328
1329
1330/**
1331 * Gets the shadow page directory for the specified address, PAE.
1332 *
1333 * @returns Pointer to the shadow PD.
1334 * @param pVCpu The cross context virtual CPU structure.
1335 * @param GCPtr The address.
1336 * @param uGstPdpe Guest PDPT entry. Valid.
1337 * @param ppPD Receives address of page directory
1338 */
1339int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1340{
1341 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1342 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1343 PPGMPOOLPAGE pShwPage;
1344 int rc;
1345 PGM_LOCK_ASSERT_OWNER(pVM);
1346
1347
1348 /* Allocate page directory if not present. */
1349 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1350 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1351 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1352 X86PGPAEUINT const uPdpe = pPdpe->u;
1353 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1354 {
1355 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1356 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1357 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1358
1359 pgmPoolCacheUsed(pPool, pShwPage);
1360
1361 /* Update the entry if necessary. */
1362 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1363 if (uPdpeNew == uPdpe)
1364 { /* likely */ }
1365 else
1366 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1367 }
1368 else
1369 {
1370 RTGCPTR64 GCPdPt;
1371 PGMPOOLKIND enmKind;
1372 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1373 {
1374 /* AMD-V nested paging or real/protected mode without paging. */
1375 GCPdPt = GCPtr & ~(RT_BIT_64(X86_PDPT_SHIFT) - 1);
1376 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1377 }
1378 else if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1379 {
1380 if (uGstPdpe & X86_PDPE_P)
1381 {
1382 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1383 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1384 }
1385 else
1386 {
1387 /* PD not present; guest must reload CR3 to change it.
1388 * No need to monitor anything in this case. */
1389 /** @todo r=bird: WTF is hit?!? */
1390 /*Assert(VM_IS_RAW_MODE_ENABLED(pVM)); - ??? */
1391 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1392 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1393 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1394 }
1395 }
1396 else
1397 {
1398 GCPdPt = CPUMGetGuestCR3(pVCpu);
1399 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1400 }
1401
1402 /* Create a reference back to the PDPT by using the index in its shadow page. */
1403 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1404 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1405 &pShwPage);
1406 AssertRCReturn(rc, rc);
1407
1408 /* Hook it up. */
1409 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1410 }
1411 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1412
1413 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1414 return VINF_SUCCESS;
1415}
1416
1417
1418/**
1419 * Gets the pointer to the shadow page directory entry for an address, PAE.
1420 *
1421 * @returns Pointer to the PDE.
1422 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1423 * @param GCPtr The address.
1424 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1425 */
1426DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1427{
1428 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1429 PGM_LOCK_ASSERT_OWNER(pVM);
1430
1431 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1432 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1433 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1434 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1435 if (!(uPdpe & X86_PDPE_P))
1436 {
1437 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1438 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1439 }
1440 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1441
1442 /* Fetch the pgm pool shadow descriptor. */
1443 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1444 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1445
1446 *ppShwPde = pShwPde;
1447 return VINF_SUCCESS;
1448}
1449
1450
1451/**
1452 * Syncs the SHADOW page directory pointer for the specified address.
1453 *
1454 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1455 *
1456 * The caller is responsible for making sure the guest has a valid PD before
1457 * calling this function.
1458 *
1459 * @returns VBox status code.
1460 * @param pVCpu The cross context virtual CPU structure.
1461 * @param GCPtr The address.
1462 * @param uGstPml4e Guest PML4 entry (valid).
1463 * @param uGstPdpe Guest PDPT entry (valid).
1464 * @param ppPD Receives address of page directory
1465 */
1466static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1467{
1468 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1469 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1470 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1471 int rc;
1472
1473 PGM_LOCK_ASSERT_OWNER(pVM);
1474
1475 /*
1476 * PML4.
1477 */
1478 PPGMPOOLPAGE pShwPage;
1479 {
1480 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1481 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1482 X86PGPAEUINT const uPml4e = pPml4e->u;
1483
1484 /* Allocate page directory pointer table if not present. */
1485 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1486 {
1487 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1488 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1489
1490 pgmPoolCacheUsed(pPool, pShwPage);
1491
1492 /* Update the entry if needed. */
1493 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1494 | (uPml4e & PGM_PML4_FLAGS);
1495 if (uPml4e == uPml4eNew)
1496 { /* likely */ }
1497 else
1498 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1499 }
1500 else
1501 {
1502 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1503
1504 RTGCPTR64 GCPml4;
1505 PGMPOOLKIND enmKind;
1506 if (fNestedPagingOrNoGstPaging)
1507 {
1508 /* AMD-V nested paging or real/protected mode without paging */
1509 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; /** @todo bogus calculation for PML5 */
1510 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1511 }
1512 else
1513 {
1514 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1515 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1516 }
1517
1518 /* Create a reference back to the PDPT by using the index in its shadow page. */
1519 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1520 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1521 &pShwPage);
1522 AssertRCReturn(rc, rc);
1523
1524 /* Hook it up. */
1525 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1526 | (uPml4e & PGM_PML4_FLAGS));
1527 }
1528 }
1529
1530 /*
1531 * PDPT.
1532 */
1533 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1534 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1535 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1536 X86PGPAEUINT const uPdpe = pPdpe->u;
1537
1538 /* Allocate page directory if not present. */
1539 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1540 {
1541 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1542 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1543
1544 pgmPoolCacheUsed(pPool, pShwPage);
1545
1546 /* Update the entry if needed. */
1547 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1548 | (uPdpe & PGM_PDPT_FLAGS);
1549 if (uPdpe == uPdpeNew)
1550 { /* likely */ }
1551 else
1552 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1553 }
1554 else
1555 {
1556 RTGCPTR64 GCPdPt;
1557 PGMPOOLKIND enmKind;
1558 if (fNestedPagingOrNoGstPaging)
1559 {
1560 /* AMD-V nested paging or real/protected mode without paging */
1561 GCPdPt = GCPtr & ~(RT_BIT_64(iPdPt << X86_PDPT_SHIFT) - 1);
1562 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1563 }
1564 else
1565 {
1566 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1567 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1568 }
1569
1570 /* Create a reference back to the PDPT by using the index in its shadow page. */
1571 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1572 pShwPage->idx, iPdPt, false /*fLockPage*/,
1573 &pShwPage);
1574 AssertRCReturn(rc, rc);
1575
1576 /* Hook it up. */
1577 ASMAtomicWriteU64(&pPdpe->u,
1578 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1579 }
1580
1581 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1582 return VINF_SUCCESS;
1583}
1584
1585
1586/**
1587 * Gets the SHADOW page directory pointer for the specified address (long mode).
1588 *
1589 * @returns VBox status code.
1590 * @param pVCpu The cross context virtual CPU structure.
1591 * @param GCPtr The address.
1592 * @param ppPml4e Receives the address of the page map level 4 entry.
1593 * @param ppPdpt Receives the address of the page directory pointer table.
1594 * @param ppPD Receives the address of the page directory.
1595 */
1596DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1597{
1598 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1599 PGM_LOCK_ASSERT_OWNER(pVM);
1600
1601 /*
1602 * PML4
1603 */
1604 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1605 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1606 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1607 if (ppPml4e)
1608 *ppPml4e = (PX86PML4E)pPml4e;
1609 X86PGPAEUINT const uPml4e = pPml4e->u;
1610 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1611 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1612 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1613
1614 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1615 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1616 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1617
1618 /*
1619 * PDPT
1620 */
1621 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1622 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1623 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1624 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1625 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1626
1627 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1628 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1629
1630 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1631 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1632 return VINF_SUCCESS;
1633}
1634
1635
1636/**
1637 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1638 * backing pages in case the PDPT or PML4 entry is missing.
1639 *
1640 * @returns VBox status code.
1641 * @param pVCpu The cross context virtual CPU structure.
1642 * @param GCPtr The address.
1643 * @param ppPdpt Receives address of pdpt
1644 * @param ppPD Receives address of page directory
1645 */
1646static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1647{
1648 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1649 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1650 int rc;
1651
1652 Assert(pVM->pgm.s.fNestedPaging);
1653 PGM_LOCK_ASSERT_OWNER(pVM);
1654
1655 /*
1656 * PML4 level.
1657 */
1658 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1659 Assert(pPml4);
1660
1661 /* Allocate page directory pointer table if not present. */
1662 PPGMPOOLPAGE pShwPage;
1663 {
1664 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1665 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1666 EPTPML4E Pml4e;
1667 Pml4e.u = pPml4e->u;
1668 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1669 {
1670 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1671 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1672 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1673 &pShwPage);
1674 AssertRCReturn(rc, rc);
1675
1676 /* Hook up the new PDPT now. */
1677 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1678 }
1679 else
1680 {
1681 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1682 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1683
1684 pgmPoolCacheUsed(pPool, pShwPage);
1685
1686 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1687 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1688 { }
1689 else
1690 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1691 }
1692 }
1693
1694 /*
1695 * PDPT level.
1696 */
1697 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1698 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1699 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1700
1701 if (ppPdpt)
1702 *ppPdpt = pPdpt;
1703
1704 /* Allocate page directory if not present. */
1705 EPTPDPTE Pdpe;
1706 Pdpe.u = pPdpe->u;
1707 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1708 {
1709 RTGCPTR64 const GCPdPt = GCPtr & ~(RT_BIT_64(EPT_PDPT_SHIFT) - 1);
1710 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1711 pShwPage->idx, iPdPt, false /*fLockPage*/,
1712 &pShwPage);
1713 AssertRCReturn(rc, rc);
1714
1715 /* Hook up the new PD now. */
1716 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1717 }
1718 else
1719 {
1720 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1721 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1722
1723 pgmPoolCacheUsed(pPool, pShwPage);
1724
1725 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1726 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1727 { }
1728 else
1729 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1730 }
1731
1732 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1733 return VINF_SUCCESS;
1734}
1735
1736
1737#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1738/**
1739 * Syncs the SHADOW nested-guest page directory pointer for the specified address.
1740 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1741 *
1742 * @returns VBox status code.
1743 * @param pVCpu The cross context virtual CPU structure.
1744 * @param GCPhysNested The nested-guest physical address.
1745 * @param ppPdpt Where to store the PDPT. Optional, can be NULL.
1746 * @param ppPD Where to store the PD. Optional, can be NULL.
1747 * @param pGstWalkAll The guest walk info.
1748 */
1749static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD,
1750 PPGMPTWALKGST pGstWalkAll)
1751{
1752 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1753 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1754 int rc;
1755
1756 PPGMPOOLPAGE pShwPage;
1757 Assert(pVM->pgm.s.fNestedPaging);
1758 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT);
1759 PGM_LOCK_ASSERT_OWNER(pVM);
1760
1761 /*
1762 * PML4 level.
1763 */
1764 {
1765 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1766 Assert(pPml4);
1767
1768 /* Allocate page directory pointer table if not present. */
1769 {
1770 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pml4e.u & pVCpu->pgm.s.fGstEptShadowedPml4eMask;
1771 const unsigned iPml4e = (GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1772 PEPTPML4E pPml4e = &pPml4->a[iPml4e];
1773
1774 if (!(pPml4e->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK)))
1775 {
1776 RTGCPHYS const GCPhysPdpt = pGstWalkAll->u.Ept.Pml4e.u & EPT_PML4E_PG_MASK;
1777 rc = pgmPoolAlloc(pVM, GCPhysPdpt, PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT, PGMPOOLACCESS_DONTCARE,
1778 PGM_A20_IS_ENABLED(pVCpu), pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4e, false /*fLockPage*/,
1779 &pShwPage);
1780 AssertRCReturn(rc, rc);
1781
1782 /* Hook up the new PDPT now. */
1783 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags);
1784 }
1785 else
1786 {
1787 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1788 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1789
1790 pgmPoolCacheUsed(pPool, pShwPage);
1791
1792 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1793 if (pPml4e->u != (pShwPage->Core.Key | fShwFlags))
1794 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags);
1795 }
1796 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
1797 Log7Func(("GstPml4e=%RX64 ShwPml4e=%RX64 iPml4e=%u\n", pGstWalkAll->u.Ept.Pml4e.u, pPml4e->u, iPml4e));
1798 }
1799 }
1800
1801 /*
1802 * PDPT level.
1803 */
1804 {
1805 AssertReturn(!(pGstWalkAll->u.Ept.Pdpte.u & EPT_E_LEAF), VERR_NOT_SUPPORTED); /* shadowing 1GB pages not supported yet. */
1806
1807 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1808 if (ppPdpt)
1809 *ppPdpt = pPdpt;
1810
1811 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pdpte.u & pVCpu->pgm.s.fGstEptShadowedPdpteMask;
1812 const unsigned iPdPte = (GCPhysNested >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1813 PEPTPDPTE pPdpte = &pPdpt->a[iPdPte];
1814
1815 if (!(pPdpte->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK)))
1816 {
1817 RTGCPHYS const GCPhysPd = pGstWalkAll->u.Ept.Pdpte.u & EPT_PDPTE_PG_MASK;
1818 rc = pgmPoolAlloc(pVM, GCPhysPd, PGMPOOLKIND_EPT_PD_FOR_EPT_PD, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1819 pShwPage->idx, iPdPte, false /*fLockPage*/, &pShwPage);
1820 AssertRCReturn(rc, rc);
1821
1822 /* Hook up the new PD now. */
1823 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags);
1824 }
1825 else
1826 {
1827 pShwPage = pgmPoolGetPage(pPool, pPdpte->u & EPT_PDPTE_PG_MASK);
1828 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1829
1830 pgmPoolCacheUsed(pPool, pShwPage);
1831
1832 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1833 if (pPdpte->u != (pShwPage->Core.Key | fShwFlags))
1834 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags);
1835 }
1836 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
1837 Log7Func(("GstPdpte=%RX64 ShwPdpte=%RX64 iPdPte=%u \n", pGstWalkAll->u.Ept.Pdpte.u, pPdpte->u, iPdPte));
1838
1839 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1840 }
1841
1842 return VINF_SUCCESS;
1843}
1844#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1845
1846
1847#ifdef IN_RING0
1848/**
1849 * Synchronizes a range of nested page table entries.
1850 *
1851 * The caller must own the PGM lock.
1852 *
1853 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1854 * @param GCPhys Where to start.
1855 * @param cPages How many pages which entries should be synced.
1856 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1857 * host paging mode for AMD-V).
1858 */
1859int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1860{
1861 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1862
1863/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1864 int rc;
1865 switch (enmShwPagingMode)
1866 {
1867 case PGMMODE_32_BIT:
1868 {
1869 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1870 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1871 break;
1872 }
1873
1874 case PGMMODE_PAE:
1875 case PGMMODE_PAE_NX:
1876 {
1877 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1878 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1879 break;
1880 }
1881
1882 case PGMMODE_AMD64:
1883 case PGMMODE_AMD64_NX:
1884 {
1885 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1886 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1887 break;
1888 }
1889
1890 case PGMMODE_EPT:
1891 {
1892 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1893 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1894 break;
1895 }
1896
1897 default:
1898 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1899 }
1900 return rc;
1901}
1902#endif /* IN_RING0 */
1903
1904
1905/**
1906 * Gets effective Guest OS page information.
1907 *
1908 * When GCPtr is in a big page, the function will return as if it was a normal
1909 * 4KB page. If the need for distinguishing between big and normal page becomes
1910 * necessary at a later point, a PGMGstGetPage() will be created for that
1911 * purpose.
1912 *
1913 * @returns VBox status code.
1914 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1915 * @param GCPtr Guest Context virtual address of the page.
1916 * @param pWalk Where to store the page walk information.
1917 */
1918VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
1919{
1920 VMCPU_ASSERT_EMT(pVCpu);
1921 Assert(pWalk);
1922 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1923 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1924 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1925 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
1926}
1927
1928
1929/**
1930 * Maps the guest CR3.
1931 *
1932 * @returns VBox status code.
1933 * @param pVCpu The cross context virtual CPU structure.
1934 * @param GCPhysCr3 The guest CR3 value.
1935 * @param pHCPtrGuestCr3 Where to store the mapped memory.
1936 */
1937DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3)
1938{
1939 /** @todo this needs some reworking wrt. locking? */
1940 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1941 PGM_LOCK_VOID(pVM);
1942 PPGMPAGE pPageCr3 = pgmPhysGetPage(pVM, GCPhysCr3);
1943 AssertReturnStmt(pPageCr3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
1944
1945 RTHCPTR HCPtrGuestCr3;
1946 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCr3, GCPhysCr3, (void **)&HCPtrGuestCr3);
1947 PGM_UNLOCK(pVM);
1948
1949 *pHCPtrGuestCr3 = HCPtrGuestCr3;
1950 return rc;
1951}
1952
1953
1954/**
1955 * Unmaps the guest CR3.
1956 *
1957 * @returns VBox status code.
1958 * @param pVCpu The cross context virtual CPU structure.
1959 */
1960DECLINLINE(int) pgmGstUnmapCr3(PVMCPUCC pVCpu)
1961{
1962 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1963 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1964 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
1965 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
1966}
1967
1968
1969/**
1970 * Performs a guest page table walk.
1971 *
1972 * The guest should be in paged protect mode or long mode when making a call to
1973 * this function.
1974 *
1975 * @returns VBox status code.
1976 * @retval VINF_SUCCESS on success.
1977 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1978 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1979 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1980 *
1981 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1982 * @param GCPtr The guest virtual address to walk by.
1983 * @param pWalk Where to return the walk result. This is valid for some
1984 * error codes as well.
1985 * @param pGstWalk The guest mode specific page walk information.
1986 */
1987int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1988{
1989 VMCPU_ASSERT_EMT(pVCpu);
1990 switch (pVCpu->pgm.s.enmGuestMode)
1991 {
1992 case PGMMODE_32_BIT:
1993 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1994 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy);
1995
1996 case PGMMODE_PAE:
1997 case PGMMODE_PAE_NX:
1998 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1999 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae);
2000
2001 case PGMMODE_AMD64:
2002 case PGMMODE_AMD64_NX:
2003 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
2004 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64);
2005
2006 case PGMMODE_REAL:
2007 case PGMMODE_PROTECTED:
2008 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2009 return VERR_PGM_NOT_USED_IN_MODE;
2010
2011 case PGMMODE_EPT:
2012 case PGMMODE_NESTED_32BIT:
2013 case PGMMODE_NESTED_PAE:
2014 case PGMMODE_NESTED_AMD64:
2015 default:
2016 AssertFailed();
2017 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2018 return VERR_PGM_NOT_USED_IN_MODE;
2019 }
2020}
2021
2022#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2023
2024/**
2025 * Performs a guest second-level address translation (SLAT).
2026 *
2027 * The guest paging mode must be 32-bit, PAE or AMD64 when making a call to this
2028 * function.
2029 *
2030 * @returns VBox status code.
2031 * @retval VINF_SUCCESS on success.
2032 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
2033 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2034 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2035 *
2036 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2037 * @param GCPhysNested The nested-guest physical address being translated
2038 * (input).
2039 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is
2040 * valid. This indicates the SLAT is caused when
2041 * translating a nested-guest linear address.
2042 * @param GCPtrNested The nested-guest virtual address that initiated the
2043 * SLAT. If none, pass NIL_RTGCPTR.
2044 * @param pWalk Where to return the walk result. This is valid for
2045 * some error codes as well.
2046 * @param pGstWalk The second-level paging-mode specific walk
2047 * information.
2048 */
2049static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
2050 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
2051{
2052 /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
2053 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
2054 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
2055 switch (pVCpu->pgm.s.enmGuestSlatMode)
2056 {
2057 case PGMSLAT_EPT:
2058 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
2059 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
2060
2061 default:
2062 AssertFailed();
2063 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2064 return VERR_PGM_NOT_USED_IN_MODE;
2065 }
2066}
2067
2068
2069/**
2070 * Performs a guest second-level address translation (SLAT) for a nested-guest
2071 * physical address.
2072 *
2073 * This version requires the SLAT mode to be provided by the caller because we could
2074 * be in the process of switching paging modes (MOV CRX) and cannot presume control
2075 * register values.
2076 *
2077 * @returns VBox status code.
2078 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2079 * @param enmSlatMode The second-level paging mode to use.
2080 * @param GCPhysNested The nested-guest physical address to translate.
2081 * @param pWalk Where to store the walk result.
2082 * @param pGstWalk Where to store the second-level paging-mode specific
2083 * walk information.
2084 */
2085static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
2086 PPGMPTWALKGST pGstWalk)
2087{
2088 AssertPtr(pWalk);
2089 AssertPtr(pGstWalk);
2090 switch (enmSlatMode)
2091 {
2092 case PGMSLAT_EPT:
2093 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
2094 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, 0 /* GCPtrNested */,
2095 pWalk, &pGstWalk->u.Ept);
2096
2097 default:
2098 AssertFailed();
2099 return VERR_PGM_NOT_USED_IN_MODE;
2100 }
2101}
2102
2103#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
2104
2105/**
2106 * Tries to continue the previous walk.
2107 *
2108 * @note Requires the caller to hold the PGM lock from the first
2109 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
2110 * we cannot use the pointers.
2111 *
2112 * @returns VBox status code.
2113 * @retval VINF_SUCCESS on success.
2114 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
2115 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2116 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2117 *
2118 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2119 * @param GCPtr The guest virtual address to walk by.
2120 * @param pWalk Pointer to the previous walk result and where to return
2121 * the result of this walk. This is valid for some error
2122 * codes as well.
2123 * @param pGstWalk The guest-mode specific walk information.
2124 */
2125int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
2126{
2127 /*
2128 * We can only handle successfully walks.
2129 * We also limit ourselves to the next page.
2130 */
2131 if ( pWalk->fSucceeded
2132 && GCPtr - pWalk->GCPtr == GUEST_PAGE_SIZE)
2133 {
2134 Assert(pWalk->uLevel == 0);
2135 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
2136 {
2137 /*
2138 * AMD64
2139 */
2140 if (!pWalk->fGigantPage && !pWalk->fBigPage)
2141 {
2142 /*
2143 * We fall back to full walk if the PDE table changes, if any
2144 * reserved bits are set, or if the effective page access changes.
2145 */
2146 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
2147 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
2148 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
2149 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
2150
2151 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT))
2152 {
2153 if (pGstWalk->u.Amd64.pPte)
2154 {
2155 X86PTEPAE Pte;
2156 Pte.u = pGstWalk->u.Amd64.pPte[1].u;
2157 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2158 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2159 {
2160 pWalk->GCPtr = GCPtr;
2161 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2162 pGstWalk->u.Amd64.Pte.u = Pte.u;
2163 pGstWalk->u.Amd64.pPte++;
2164 return VINF_SUCCESS;
2165 }
2166 }
2167 }
2168 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT))
2169 {
2170 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
2171 if (pGstWalk->u.Amd64.pPde)
2172 {
2173 X86PDEPAE Pde;
2174 Pde.u = pGstWalk->u.Amd64.pPde[1].u;
2175 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame)
2176 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
2177 {
2178 /* Get the new PTE and check out the first entry. */
2179 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
2180 &pGstWalk->u.Amd64.pPt);
2181 if (RT_SUCCESS(rc))
2182 {
2183 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0];
2184 X86PTEPAE Pte;
2185 Pte.u = pGstWalk->u.Amd64.pPte->u;
2186 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2187 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2188 {
2189 pWalk->GCPtr = GCPtr;
2190 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2191 pGstWalk->u.Amd64.Pte.u = Pte.u;
2192 pGstWalk->u.Amd64.Pde.u = Pde.u;
2193 pGstWalk->u.Amd64.pPde++;
2194 return VINF_SUCCESS;
2195 }
2196 }
2197 }
2198 }
2199 }
2200 }
2201 else if (!pWalk->fGigantPage)
2202 {
2203 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK))
2204 {
2205 pWalk->GCPtr = GCPtr;
2206 pWalk->GCPhys += GUEST_PAGE_SIZE;
2207 return VINF_SUCCESS;
2208 }
2209 }
2210 else
2211 {
2212 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK))
2213 {
2214 pWalk->GCPtr = GCPtr;
2215 pWalk->GCPhys += GUEST_PAGE_SIZE;
2216 return VINF_SUCCESS;
2217 }
2218 }
2219 }
2220 }
2221 /* Case we don't handle. Do full walk. */
2222 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk);
2223}
2224
2225
2226/**
2227 * Modify page flags for a range of pages in the guest's tables
2228 *
2229 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2230 *
2231 * @returns VBox status code.
2232 * @param pVCpu The cross context virtual CPU structure.
2233 * @param GCPtr Virtual address of the first page in the range.
2234 * @param cb Size (in bytes) of the range to apply the modification to.
2235 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2236 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2237 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2238 */
2239VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2240{
2241 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2242 VMCPU_ASSERT_EMT(pVCpu);
2243
2244 /*
2245 * Validate input.
2246 */
2247 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2248 Assert(cb);
2249
2250 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2251
2252 /*
2253 * Adjust input.
2254 */
2255 cb += GCPtr & GUEST_PAGE_OFFSET_MASK;
2256 cb = RT_ALIGN_Z(cb, GUEST_PAGE_SIZE);
2257 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
2258
2259 /*
2260 * Call worker.
2261 */
2262 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2263 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2264 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2265 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2266
2267 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2268 return rc;
2269}
2270
2271
2272/**
2273 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2274 *
2275 * @returns @c true if the PDPE is valid, @c false otherwise.
2276 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2277 * @param paPaePdpes The PAE PDPEs to validate.
2278 *
2279 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2280 */
2281VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2282{
2283 Assert(paPaePdpes);
2284 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2285 {
2286 X86PDPE const PaePdpe = paPaePdpes[i];
2287 if ( !(PaePdpe.u & X86_PDPE_P)
2288 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2289 { /* likely */ }
2290 else
2291 return false;
2292 }
2293 return true;
2294}
2295
2296
2297/**
2298 * Performs the lazy mapping of the 32-bit guest PD.
2299 *
2300 * @returns VBox status code.
2301 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2302 * @param ppPd Where to return the pointer to the mapping. This is
2303 * always set.
2304 */
2305int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2306{
2307 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2308 PGM_LOCK_VOID(pVM);
2309
2310 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2311
2312 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2313 PPGMPAGE pPage;
2314 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2315 if (RT_SUCCESS(rc))
2316 {
2317 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2318 if (RT_SUCCESS(rc))
2319 {
2320# ifdef IN_RING3
2321 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2322 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2323# else
2324 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2325 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2326# endif
2327 PGM_UNLOCK(pVM);
2328 return VINF_SUCCESS;
2329 }
2330 AssertRC(rc);
2331 }
2332 PGM_UNLOCK(pVM);
2333
2334 *ppPd = NULL;
2335 return rc;
2336}
2337
2338
2339/**
2340 * Performs the lazy mapping of the PAE guest PDPT.
2341 *
2342 * @returns VBox status code.
2343 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2344 * @param ppPdpt Where to return the pointer to the mapping. This is
2345 * always set.
2346 */
2347int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2348{
2349 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2350 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2351 PGM_LOCK_VOID(pVM);
2352
2353 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2354 PPGMPAGE pPage;
2355 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2356 if (RT_SUCCESS(rc))
2357 {
2358 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2359 if (RT_SUCCESS(rc))
2360 {
2361# ifdef IN_RING3
2362 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2363 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2364# else
2365 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2366 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2367# endif
2368 PGM_UNLOCK(pVM);
2369 return VINF_SUCCESS;
2370 }
2371 AssertRC(rc);
2372 }
2373
2374 PGM_UNLOCK(pVM);
2375 *ppPdpt = NULL;
2376 return rc;
2377}
2378
2379
2380/**
2381 * Performs the lazy mapping / updating of a PAE guest PD.
2382 *
2383 * @returns Pointer to the mapping.
2384 * @returns VBox status code.
2385 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2386 * @param iPdpt Which PD entry to map (0..3).
2387 * @param ppPd Where to return the pointer to the mapping. This is
2388 * always set.
2389 */
2390int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2391{
2392 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2393 PGM_LOCK_VOID(pVM);
2394
2395 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2396 Assert(pGuestPDPT);
2397 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2398 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2399 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2400
2401 PPGMPAGE pPage;
2402 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2403 if (RT_SUCCESS(rc))
2404 {
2405 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2406 AssertRC(rc);
2407 if (RT_SUCCESS(rc))
2408 {
2409# ifdef IN_RING3
2410 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2411 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2412# else
2413 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2414 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2415# endif
2416 if (fChanged)
2417 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2418 PGM_UNLOCK(pVM);
2419 return VINF_SUCCESS;
2420 }
2421 }
2422
2423 /* Invalid page or some failure, invalidate the entry. */
2424 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2425 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2426 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2427
2428 PGM_UNLOCK(pVM);
2429 return rc;
2430}
2431
2432
2433/**
2434 * Performs the lazy mapping of the 32-bit guest PD.
2435 *
2436 * @returns VBox status code.
2437 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2438 * @param ppPml4 Where to return the pointer to the mapping. This will
2439 * always be set.
2440 */
2441int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2442{
2443 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2444 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2445 PGM_LOCK_VOID(pVM);
2446
2447 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2448 PPGMPAGE pPage;
2449 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2450 if (RT_SUCCESS(rc))
2451 {
2452 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2453 if (RT_SUCCESS(rc))
2454 {
2455# ifdef IN_RING3
2456 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2457 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2458# else
2459 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2460 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2461# endif
2462 PGM_UNLOCK(pVM);
2463 return VINF_SUCCESS;
2464 }
2465 }
2466
2467 PGM_UNLOCK(pVM);
2468 *ppPml4 = NULL;
2469 return rc;
2470}
2471
2472
2473#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2474 /**
2475 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
2476 *
2477 * @returns VBox status code.
2478 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2479 * @param ppEptPml4 Where to return the pointer to the mapping. This will
2480 * always be set.
2481 */
2482int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
2483{
2484 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
2485 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2486 PGM_LOCK_VOID(pVM);
2487
2488 RTGCPHYS const GCPhysEpt = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
2489 PPGMPAGE pPage;
2490 int rc = pgmPhysGetPageEx(pVM, GCPhysEpt, &pPage);
2491 if (RT_SUCCESS(rc))
2492 {
2493 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysEpt, (void **)ppEptPml4);
2494 if (RT_SUCCESS(rc))
2495 {
2496# ifdef IN_RING3
2497 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
2498 pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
2499# else
2500 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
2501 pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
2502# endif
2503 PGM_UNLOCK(pVM);
2504 return VINF_SUCCESS;
2505 }
2506 }
2507
2508 PGM_UNLOCK(pVM);
2509 *ppEptPml4 = NULL;
2510 return rc;
2511}
2512#endif
2513
2514
2515/**
2516 * Gets the current CR3 register value for the shadow memory context.
2517 * @returns CR3 value.
2518 * @param pVCpu The cross context virtual CPU structure.
2519 */
2520VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2521{
2522 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2523 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2524 return pPoolPage->Core.Key;
2525}
2526
2527
2528/**
2529 * Forces lazy remapping of the guest's PAE page-directory structures.
2530 *
2531 * @param pVCpu The cross context virtual CPU structure.
2532 */
2533static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2534{
2535 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2536 {
2537 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2538 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2539 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2540 }
2541}
2542
2543
2544#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2545/**
2546 * Performs second-level address translation for the given CR3 and updates the
2547 * nested-guest CR3 when successful.
2548 *
2549 * @returns VBox status code.
2550 * @param pVCpu The cross context virtual CPU structure.
2551 * @param uCr3 The masked nested-guest CR3 value.
2552 * @param pGCPhysCR3 Where to store the translated CR3.
2553 *
2554 * @warning This updates PGMCPU::GCPhysNstGstCR3 when the translation succeeds. Be
2555 * mindful of this in code that's hyper sensitive to the order of
2556 * operations.
2557 */
2558static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3)
2559{
2560 if (uCr3 != pVCpu->pgm.s.GCPhysNstGstCR3)
2561 {
2562 PGMPTWALK Walk;
2563 PGMPTWALKGST GstWalk;
2564 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, uCr3, &Walk, &GstWalk);
2565 if (RT_SUCCESS(rc))
2566 {
2567 /* Update nested-guest CR3. */
2568 pVCpu->pgm.s.GCPhysNstGstCR3 = uCr3;
2569
2570 /* Pass back the translated result. */
2571 *pGCPhysCr3 = Walk.GCPhys;
2572 return VINF_SUCCESS;
2573 }
2574
2575 /* Translation failed. */
2576 *pGCPhysCr3 = NIL_RTGCPHYS;
2577 return rc;
2578 }
2579
2580 /*
2581 * If the nested-guest CR3 has not changed, then the previously
2582 * translated CR3 result (i.e. GCPhysCR3) is passed back.
2583 */
2584 *pGCPhysCr3 = pVCpu->pgm.s.GCPhysCR3;
2585 return VINF_SUCCESS;
2586}
2587#endif
2588
2589
2590/**
2591 * Performs and schedules necessary updates following a CR3 load or reload.
2592 *
2593 * This will normally involve mapping the guest PD or nPDPT
2594 *
2595 * @returns VBox status code.
2596 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2597 * safely be ignored and overridden since the FF will be set too then.
2598 * @param pVCpu The cross context virtual CPU structure.
2599 * @param cr3 The new cr3.
2600 * @param fGlobal Indicates whether this is a global flush or not.
2601 */
2602VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal)
2603{
2604 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2605 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2606
2607 VMCPU_ASSERT_EMT(pVCpu);
2608
2609 /*
2610 * Always flag the necessary updates; necessary for hardware acceleration
2611 */
2612 /** @todo optimize this, it shouldn't always be necessary. */
2613 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2614 if (fGlobal)
2615 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2616
2617 /*
2618 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2619 */
2620 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2621 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2622#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2623 if ( pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT
2624 && PGMMODE_WITH_PAGING(pVCpu->pgm.s.enmGuestMode))
2625 {
2626 RTGCPHYS GCPhysOut;
2627 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2628 if (RT_SUCCESS(rc))
2629 GCPhysCR3 = GCPhysOut;
2630 else
2631 {
2632 /* CR3 SLAT translation failed but we try to pretend it
2633 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2634 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2635 int const rc2 = pgmGstUnmapCr3(pVCpu);
2636 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2637 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2638 return rc2;
2639 }
2640 }
2641#endif
2642
2643 LogFlowFunc(("cr3=%RX64 old=%RX64 fGlobal=%d\n", cr3, GCPhysOldCR3, fGlobal));
2644 int rc = VINF_SUCCESS;
2645 if (GCPhysOldCR3 != GCPhysCR3)
2646 {
2647 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2648 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2649 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2650
2651 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2652 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2653 if (RT_LIKELY(rc == VINF_SUCCESS))
2654 { }
2655 else
2656 {
2657 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2658 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2659 pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped) = false;
2660 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
2661 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2662 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2663 }
2664
2665 if (fGlobal)
2666 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2667 else
2668 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2669 }
2670 else
2671 {
2672#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2673 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2674 if (pPool->cDirtyPages)
2675 {
2676 PGM_LOCK_VOID(pVM);
2677 pgmPoolResetDirtyPages(pVM);
2678 PGM_UNLOCK(pVM);
2679 }
2680#endif
2681 if (fGlobal)
2682 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2683 else
2684 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2685
2686 /*
2687 * Flush PAE PDPTEs.
2688 */
2689 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2690 pgmGstFlushPaePdpes(pVCpu);
2691 }
2692
2693 IEMTlbInvalidateAll(pVCpu);
2694 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2695 return rc;
2696}
2697
2698
2699/**
2700 * Performs and schedules necessary updates following a CR3 load or reload when
2701 * using nested or extended paging.
2702 *
2703 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2704 * TLB and triggering a SyncCR3.
2705 *
2706 * This will normally involve mapping the guest PD or nPDPT
2707 *
2708 * @returns VBox status code.
2709 * @retval VINF_SUCCESS.
2710 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2711 * paging modes). This can safely be ignored and overridden since the
2712 * FF will be set too then.
2713 * @param pVCpu The cross context virtual CPU structure.
2714 * @param cr3 The new CR3.
2715 */
2716VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3)
2717{
2718 VMCPU_ASSERT_EMT(pVCpu);
2719
2720 /* We assume we're only called in nested paging mode. */
2721 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2722
2723 /*
2724 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2725 */
2726 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2727 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2728#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2729 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2730 {
2731 RTGCPHYS GCPhysOut;
2732 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2733 if (RT_SUCCESS(rc))
2734 GCPhysCR3 = GCPhysOut;
2735 else
2736 {
2737 /* CR3 SLAT translation failed but we try to pretend it
2738 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2739 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2740 int const rc2 = pgmGstUnmapCr3(pVCpu);
2741 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2742 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2743 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2744 return rc2;
2745 }
2746 }
2747#endif
2748
2749 LogFlowFunc(("cr3=%RX64 old=%RX64\n", cr3, GCPhysOldCR3));
2750 int rc = VINF_SUCCESS;
2751 if (GCPhysOldCR3 != GCPhysCR3)
2752 {
2753 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2754 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2755 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2756
2757 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2758 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2759
2760 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2761 }
2762 /*
2763 * Flush PAE PDPTEs.
2764 */
2765 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2766 pgmGstFlushPaePdpes(pVCpu);
2767
2768 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2769 return rc;
2770}
2771
2772
2773/**
2774 * Synchronize the paging structures.
2775 *
2776 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2777 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2778 * in several places, most importantly whenever the CR3 is loaded.
2779 *
2780 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2781 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2782 * the VMM into guest context.
2783 * @param pVCpu The cross context virtual CPU structure.
2784 * @param cr0 Guest context CR0 register
2785 * @param cr3 Guest context CR3 register
2786 * @param cr4 Guest context CR4 register
2787 * @param fGlobal Including global page directories or not
2788 */
2789VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2790{
2791 int rc;
2792
2793 VMCPU_ASSERT_EMT(pVCpu);
2794
2795 /*
2796 * The pool may have pending stuff and even require a return to ring-3 to
2797 * clear the whole thing.
2798 */
2799 rc = pgmPoolSyncCR3(pVCpu);
2800 if (rc != VINF_SUCCESS)
2801 return rc;
2802
2803 /*
2804 * We might be called when we shouldn't.
2805 *
2806 * The mode switching will ensure that the PD is resynced after every mode
2807 * switch. So, if we find ourselves here when in protected or real mode
2808 * we can safely clear the FF and return immediately.
2809 */
2810 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2811 {
2812 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2813 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2814 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2815 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2816 return VINF_SUCCESS;
2817 }
2818
2819 /* If global pages are not supported, then all flushes are global. */
2820 if (!(cr4 & X86_CR4_PGE))
2821 fGlobal = true;
2822 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2823 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2824
2825 /*
2826 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2827 * This should be done before SyncCR3.
2828 */
2829 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2830 {
2831 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2832
2833 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2834 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2835#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2836 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2837 {
2838 RTGCPHYS GCPhysOut;
2839 int rc2 = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2840 if (RT_SUCCESS(rc2))
2841 GCPhysCR3 = GCPhysOut;
2842 else
2843 {
2844 /* CR3 SLAT translation failed but we try to pretend it
2845 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2846 AssertMsgFailed(("Failed to translate CR3 %#RX64. rc=%Rrc\n", cr3, rc2));
2847 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2848 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2849 return rc2;
2850 }
2851 }
2852#endif
2853 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
2854 if (GCPhysOldCR3 != GCPhysCR3)
2855 {
2856 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2857 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2858 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2859 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2860 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2861 }
2862
2863 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2864 if ( rc == VINF_PGM_SYNC_CR3
2865 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2866 {
2867 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2868#ifdef IN_RING3
2869 rc = pgmPoolSyncCR3(pVCpu);
2870#else
2871 if (rc == VINF_PGM_SYNC_CR3)
2872 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2873 return VINF_PGM_SYNC_CR3;
2874#endif
2875 }
2876 AssertRCReturn(rc, rc);
2877 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2878 }
2879
2880 /*
2881 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2882 */
2883 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2884
2885 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2886 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2887 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2888 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2889
2890 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2891 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2892 if (rc == VINF_SUCCESS)
2893 {
2894 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2895 {
2896 /* Go back to ring 3 if a pgm pool sync is again pending. */
2897 return VINF_PGM_SYNC_CR3;
2898 }
2899
2900 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2901 {
2902 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2903 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2904 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2905 }
2906 }
2907
2908 /*
2909 * Now flush the CR3 (guest context).
2910 */
2911 if (rc == VINF_SUCCESS)
2912 PGM_INVL_VCPU_TLBS(pVCpu);
2913 return rc;
2914}
2915
2916
2917/**
2918 * Maps all the PAE PDPE entries.
2919 *
2920 * @returns VBox status code.
2921 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2922 * @param paPaePdpes The new PAE PDPE values.
2923 *
2924 * @remarks This function may be invoked during the process of changing the guest
2925 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
2926 * reflect PAE paging just yet.
2927 */
2928VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2929{
2930 Assert(paPaePdpes);
2931 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2932 {
2933 X86PDPE const PaePdpe = paPaePdpes[i];
2934
2935 /*
2936 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
2937 * are deferred.[1] Also, different situations require different handling of invalid
2938 * PDPE entries. Here we assume the caller has already validated or doesn't require
2939 * validation of the PDPEs.
2940 *
2941 * In the case of nested EPT (i.e. for nested-guests), the PAE PDPEs have been
2942 * validated by the VMX transition.
2943 *
2944 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
2945 */
2946 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
2947 {
2948 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2949 RTHCPTR HCPtr;
2950
2951 RTGCPHYS GCPhys;
2952#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2953 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2954 {
2955 PGMPTWALK Walk;
2956 PGMPTWALKGST GstWalk;
2957 RTGCPHYS const GCPhysNested = PaePdpe.u & X86_PDPE_PG_MASK;
2958 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysNested, &Walk, &GstWalk);
2959 if (RT_SUCCESS(rc))
2960 GCPhys = Walk.GCPhys;
2961 else
2962 {
2963 /*
2964 * Second-level address translation of the PAE PDPE has failed but we must -NOT-
2965 * abort and return a failure now. This is because we're called from a Mov CRx
2966 * instruction (or similar operation). Let's just pretend success but flag that
2967 * we need to map this PDPE lazily later.
2968 *
2969 * See Intel spec. 25.3 "Changes to instruction behavior in VMX non-root operation".
2970 * See Intel spec. 28.3.1 "EPT Overview".
2971 */
2972 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2973 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2974 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2975 continue;
2976 }
2977 }
2978 else
2979#endif
2980 {
2981 GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
2982 }
2983
2984 PGM_LOCK_VOID(pVM);
2985 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2986 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
2987 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
2988 PGM_UNLOCK(pVM);
2989 if (RT_SUCCESS(rc))
2990 {
2991#ifdef IN_RING3
2992 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
2993 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
2994#else
2995 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
2996 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
2997#endif
2998 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
2999 continue;
3000 }
3001 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
3002 }
3003 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
3004 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
3005 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
3006 }
3007 return VINF_SUCCESS;
3008}
3009
3010
3011/**
3012 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
3013 *
3014 * @returns VBox status code.
3015 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3016 * @param cr3 The guest CR3 value.
3017 *
3018 * @remarks This function may be invoked during the process of changing the guest
3019 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
3020 * PAE paging just yet.
3021 */
3022VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
3023{
3024 /*
3025 * Read the page-directory-pointer table (PDPT) at CR3.
3026 */
3027 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
3028 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
3029
3030#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3031 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
3032 {
3033 RTGCPHYS GCPhysOut;
3034 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
3035 if (RT_SUCCESS(rc))
3036 GCPhysCR3 = GCPhysOut;
3037 else
3038 {
3039 Log(("Failed to load CR3 at %#RX64. rc=%Rrc\n", GCPhysCR3, rc));
3040 return rc;
3041 }
3042 }
3043#endif
3044
3045 RTHCPTR HCPtrGuestCr3;
3046 int rc = pgmGstMapCr3(pVCpu, GCPhysCR3, &HCPtrGuestCr3);
3047 if (RT_SUCCESS(rc))
3048 {
3049 /*
3050 * Validate the page-directory-pointer table entries (PDPE).
3051 */
3052 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
3053 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
3054 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
3055 {
3056 /*
3057 * Map the PDPT.
3058 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
3059 * that PGMFlushTLB will be called soon and only a change to CR3 then
3060 * will cause the shadow page tables to be updated.
3061 */
3062#ifdef IN_RING3
3063 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
3064 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
3065#else
3066 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
3067 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
3068#endif
3069
3070 /*
3071 * Update CPUM and map the 4 PAE PDPEs.
3072 */
3073 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
3074 rc = PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
3075 if (RT_SUCCESS(rc))
3076 {
3077#ifdef IN_RING3
3078 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = true;
3079 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = false;
3080#else
3081 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = false;
3082 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = true;
3083#endif
3084 pVCpu->pgm.s.GCPhysPaeCR3 = GCPhysCR3;
3085 }
3086 }
3087 else
3088 rc = VERR_PGM_PAE_PDPE_RSVD;
3089 }
3090 return rc;
3091}
3092
3093
3094/**
3095 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
3096 *
3097 * @returns VBox status code, with the following informational code for
3098 * VM scheduling.
3099 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
3100 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
3101 *
3102 * @param pVCpu The cross context virtual CPU structure.
3103 * @param cr0 The new cr0.
3104 * @param cr4 The new cr4.
3105 * @param efer The new extended feature enable register.
3106 * @param fForce Whether to force a mode change.
3107 */
3108VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce)
3109{
3110 VMCPU_ASSERT_EMT(pVCpu);
3111
3112 /*
3113 * Calc the new guest mode.
3114 *
3115 * Note! We check PG before PE and without requiring PE because of the
3116 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
3117 */
3118 PGMMODE enmGuestMode;
3119 if (cr0 & X86_CR0_PG)
3120 {
3121 if (!(cr4 & X86_CR4_PAE))
3122 {
3123 bool const fPse = !!(cr4 & X86_CR4_PSE);
3124 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
3125 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
3126 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
3127 enmGuestMode = PGMMODE_32_BIT;
3128 }
3129 else if (!(efer & MSR_K6_EFER_LME))
3130 {
3131 if (!(efer & MSR_K6_EFER_NXE))
3132 enmGuestMode = PGMMODE_PAE;
3133 else
3134 enmGuestMode = PGMMODE_PAE_NX;
3135 }
3136 else
3137 {
3138 if (!(efer & MSR_K6_EFER_NXE))
3139 enmGuestMode = PGMMODE_AMD64;
3140 else
3141 enmGuestMode = PGMMODE_AMD64_NX;
3142 }
3143 }
3144 else if (!(cr0 & X86_CR0_PE))
3145 enmGuestMode = PGMMODE_REAL;
3146 else
3147 enmGuestMode = PGMMODE_PROTECTED;
3148
3149 /*
3150 * Did it change?
3151 */
3152 if ( !fForce
3153 && pVCpu->pgm.s.enmGuestMode == enmGuestMode)
3154 return VINF_SUCCESS;
3155
3156 /* Flush the TLB */
3157 PGM_INVL_VCPU_TLBS(pVCpu);
3158 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode, fForce);
3159}
3160
3161
3162/**
3163 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
3164 *
3165 * @returns PGM_TYPE_*.
3166 * @param pgmMode The mode value to convert.
3167 */
3168DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
3169{
3170 switch (pgmMode)
3171 {
3172 case PGMMODE_REAL: return PGM_TYPE_REAL;
3173 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
3174 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
3175 case PGMMODE_PAE:
3176 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
3177 case PGMMODE_AMD64:
3178 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
3179 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
3180 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
3181 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
3182 case PGMMODE_EPT: return PGM_TYPE_EPT;
3183 case PGMMODE_NONE: return PGM_TYPE_NONE;
3184 default:
3185 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
3186 }
3187}
3188
3189
3190/**
3191 * Calculates the shadow paging mode.
3192 *
3193 * @returns The shadow paging mode.
3194 * @param pVM The cross context VM structure.
3195 * @param enmGuestMode The guest mode.
3196 * @param enmHostMode The host mode.
3197 * @param enmShadowMode The current shadow mode.
3198 */
3199static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
3200{
3201 switch (enmGuestMode)
3202 {
3203 case PGMMODE_REAL:
3204 case PGMMODE_PROTECTED:
3205 switch (enmHostMode)
3206 {
3207 case SUPPAGINGMODE_32_BIT:
3208 case SUPPAGINGMODE_32_BIT_GLOBAL:
3209 enmShadowMode = PGMMODE_32_BIT;
3210 break;
3211
3212 case SUPPAGINGMODE_PAE:
3213 case SUPPAGINGMODE_PAE_NX:
3214 case SUPPAGINGMODE_PAE_GLOBAL:
3215 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3216 enmShadowMode = PGMMODE_PAE;
3217 break;
3218
3219 case SUPPAGINGMODE_AMD64:
3220 case SUPPAGINGMODE_AMD64_GLOBAL:
3221 case SUPPAGINGMODE_AMD64_NX:
3222 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3223 enmShadowMode = PGMMODE_PAE;
3224 break;
3225
3226 default:
3227 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3228 }
3229 break;
3230
3231 case PGMMODE_32_BIT:
3232 switch (enmHostMode)
3233 {
3234 case SUPPAGINGMODE_32_BIT:
3235 case SUPPAGINGMODE_32_BIT_GLOBAL:
3236 enmShadowMode = PGMMODE_32_BIT;
3237 break;
3238
3239 case SUPPAGINGMODE_PAE:
3240 case SUPPAGINGMODE_PAE_NX:
3241 case SUPPAGINGMODE_PAE_GLOBAL:
3242 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3243 enmShadowMode = PGMMODE_PAE;
3244 break;
3245
3246 case SUPPAGINGMODE_AMD64:
3247 case SUPPAGINGMODE_AMD64_GLOBAL:
3248 case SUPPAGINGMODE_AMD64_NX:
3249 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3250 enmShadowMode = PGMMODE_PAE;
3251 break;
3252
3253 default:
3254 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3255 }
3256 break;
3257
3258 case PGMMODE_PAE:
3259 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3260 switch (enmHostMode)
3261 {
3262 case SUPPAGINGMODE_32_BIT:
3263 case SUPPAGINGMODE_32_BIT_GLOBAL:
3264 enmShadowMode = PGMMODE_PAE;
3265 break;
3266
3267 case SUPPAGINGMODE_PAE:
3268 case SUPPAGINGMODE_PAE_NX:
3269 case SUPPAGINGMODE_PAE_GLOBAL:
3270 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3271 enmShadowMode = PGMMODE_PAE;
3272 break;
3273
3274 case SUPPAGINGMODE_AMD64:
3275 case SUPPAGINGMODE_AMD64_GLOBAL:
3276 case SUPPAGINGMODE_AMD64_NX:
3277 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3278 enmShadowMode = PGMMODE_PAE;
3279 break;
3280
3281 default:
3282 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3283 }
3284 break;
3285
3286 case PGMMODE_AMD64:
3287 case PGMMODE_AMD64_NX:
3288 switch (enmHostMode)
3289 {
3290 case SUPPAGINGMODE_32_BIT:
3291 case SUPPAGINGMODE_32_BIT_GLOBAL:
3292 enmShadowMode = PGMMODE_AMD64;
3293 break;
3294
3295 case SUPPAGINGMODE_PAE:
3296 case SUPPAGINGMODE_PAE_NX:
3297 case SUPPAGINGMODE_PAE_GLOBAL:
3298 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3299 enmShadowMode = PGMMODE_AMD64;
3300 break;
3301
3302 case SUPPAGINGMODE_AMD64:
3303 case SUPPAGINGMODE_AMD64_GLOBAL:
3304 case SUPPAGINGMODE_AMD64_NX:
3305 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3306 enmShadowMode = PGMMODE_AMD64;
3307 break;
3308
3309 default:
3310 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3311 }
3312 break;
3313
3314 default:
3315 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3316 }
3317
3318 /*
3319 * Override the shadow mode when NEM, IEM or nested paging is active.
3320 */
3321 if (!VM_IS_HM_ENABLED(pVM))
3322 {
3323 Assert(VM_IS_NEM_ENABLED(pVM) || VM_IS_EXEC_ENGINE_IEM(pVM));
3324 pVM->pgm.s.fNestedPaging = true;
3325 enmShadowMode = PGMMODE_NONE;
3326 }
3327 else
3328 {
3329 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3330 pVM->pgm.s.fNestedPaging = fNestedPaging;
3331 if (fNestedPaging)
3332 {
3333 if (HMIsVmxActive(pVM))
3334 enmShadowMode = PGMMODE_EPT;
3335 else
3336 {
3337 /* The nested SVM paging depends on the host one. */
3338 Assert(HMIsSvmActive(pVM));
3339 if ( enmGuestMode == PGMMODE_AMD64
3340 || enmGuestMode == PGMMODE_AMD64_NX)
3341 enmShadowMode = PGMMODE_NESTED_AMD64;
3342 else
3343 switch (pVM->pgm.s.enmHostMode)
3344 {
3345 case SUPPAGINGMODE_32_BIT:
3346 case SUPPAGINGMODE_32_BIT_GLOBAL:
3347 enmShadowMode = PGMMODE_NESTED_32BIT;
3348 break;
3349
3350 case SUPPAGINGMODE_PAE:
3351 case SUPPAGINGMODE_PAE_GLOBAL:
3352 case SUPPAGINGMODE_PAE_NX:
3353 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3354 enmShadowMode = PGMMODE_NESTED_PAE;
3355 break;
3356
3357 case SUPPAGINGMODE_AMD64:
3358 case SUPPAGINGMODE_AMD64_GLOBAL:
3359 case SUPPAGINGMODE_AMD64_NX:
3360 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3361 enmShadowMode = PGMMODE_NESTED_AMD64;
3362 break;
3363
3364 default:
3365 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3366 }
3367 }
3368 }
3369#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3370 else
3371 {
3372 /* Nested paging is a requirement for nested VT-x. */
3373 AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3374 }
3375#endif
3376 }
3377
3378 return enmShadowMode;
3379}
3380
3381
3382/**
3383 * Performs the actual mode change.
3384 * This is called by PGMChangeMode and pgmR3InitPaging().
3385 *
3386 * @returns VBox status code. May suspend or power off the VM on error, but this
3387 * will trigger using FFs and not informational status codes.
3388 *
3389 * @param pVM The cross context VM structure.
3390 * @param pVCpu The cross context virtual CPU structure.
3391 * @param enmGuestMode The new guest mode. This is assumed to be different from
3392 * the current mode.
3393 * @param fForce Whether to force a shadow paging mode change.
3394 */
3395VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce)
3396{
3397 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3398 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3399
3400 /*
3401 * Calc the shadow mode and switcher.
3402 */
3403 PGMMODE const enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3404 bool const fShadowModeChanged = enmShadowMode != pVCpu->pgm.s.enmShadowMode || fForce;
3405
3406 /*
3407 * Exit old mode(s).
3408 */
3409 /* shadow */
3410 if (fShadowModeChanged)
3411 {
3412 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3413 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3414 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3415 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3416 {
3417 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3418 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3419 }
3420 }
3421 else
3422 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3423
3424 /* guest */
3425 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3426 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3427 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3428 {
3429 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3430 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3431 }
3432 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3433 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
3434 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
3435 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
3436
3437 /*
3438 * Change the paging mode data indexes.
3439 */
3440 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3441 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3442 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3443 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3444 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3445 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3446 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3447#ifdef IN_RING3
3448 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3449#endif
3450
3451 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3452 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3453 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3454 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3455 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3456 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3457 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3458#ifdef IN_RING3
3459 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3460#endif
3461
3462 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3463 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3464 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3465 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3466 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3467 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3468 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3469 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3470 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3471 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3472#ifdef VBOX_STRICT
3473 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3474#endif
3475
3476 /*
3477 * Determine SLAT mode -before- entering the new shadow mode!
3478 */
3479 pVCpu->pgm.s.enmGuestSlatMode = !CPUMIsGuestVmxEptPagingEnabled(pVCpu) ? PGMSLAT_DIRECT : PGMSLAT_EPT;
3480
3481 /*
3482 * Enter new shadow mode (if changed).
3483 */
3484 if (fShadowModeChanged)
3485 {
3486 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3487 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu);
3488 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3489 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3490 }
3491
3492 /*
3493 * Always flag the necessary updates
3494 */
3495 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3496
3497 /*
3498 * Enter the new guest and shadow+guest modes.
3499 */
3500 /* Calc the new CR3 value. */
3501 RTGCPHYS GCPhysCR3;
3502 switch (enmGuestMode)
3503 {
3504 case PGMMODE_REAL:
3505 case PGMMODE_PROTECTED:
3506 GCPhysCR3 = NIL_RTGCPHYS;
3507 break;
3508
3509 case PGMMODE_32_BIT:
3510 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3511 break;
3512
3513 case PGMMODE_PAE_NX:
3514 case PGMMODE_PAE:
3515 if (!pVM->cpum.ro.GuestFeatures.fPae)
3516#ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3517 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3518 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3519#else
3520 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3521
3522#endif
3523 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3524 break;
3525
3526#ifdef VBOX_WITH_64_BITS_GUESTS
3527 case PGMMODE_AMD64_NX:
3528 case PGMMODE_AMD64:
3529 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3530 break;
3531#endif
3532 default:
3533 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3534 }
3535
3536#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3537 /*
3538 * If a nested-guest is using EPT paging:
3539 * - Update the second-level address translation (SLAT) mode.
3540 * - Indicate that the CR3 is nested-guest physical address.
3541 */
3542 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
3543 {
3544 if (PGMMODE_WITH_PAGING(enmGuestMode))
3545 {
3546 /*
3547 * Translate CR3 to its guest-physical address.
3548 * We don't use pgmGstSlatTranslateCr3() here as we want to update GCPhysNstGstCR3 -after-
3549 * switching modes to keep it consistent with how GCPhysCR3 is updated.
3550 */
3551 PGMPTWALK Walk;
3552 PGMPTWALKGST GstWalk;
3553 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysCR3, &Walk, &GstWalk);
3554 if (RT_SUCCESS(rc))
3555 { /* likely */ }
3556 else
3557 {
3558 /*
3559 * SLAT failed but we avoid reporting this to the caller because the caller
3560 * is not supposed to fail. The only time the caller needs to indicate a
3561 * failure to software is when PAE paging is used by the nested-guest, but
3562 * we handle the PAE case separately (e.g., see VMX transition in IEM).
3563 * In all other cases, the failure will be indicated when CR3 tries to be
3564 * translated on the next linear-address memory access.
3565 * See Intel spec. 27.2.1 "EPT Overview".
3566 */
3567 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));
3568
3569 /* Trying to coax PGM to succeed for the time being... */
3570 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
3571 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3572 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3573 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3574 return VINF_SUCCESS;
3575 }
3576 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3577 GCPhysCR3 = Walk.GCPhys & X86_CR3_EPT_PAGE_MASK;
3578 }
3579 }
3580 else
3581 Assert(pVCpu->pgm.s.GCPhysNstGstCR3 == NIL_RTGCPHYS);
3582#endif
3583
3584 /*
3585 * Enter the new guest mode.
3586 */
3587 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3588 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3589 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3590
3591 /* Set the new guest CR3 (and nested-guest CR3). */
3592 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3593
3594 /* status codes. */
3595 AssertRC(rc);
3596 AssertRC(rc2);
3597 if (RT_SUCCESS(rc))
3598 {
3599 rc = rc2;
3600 if (RT_SUCCESS(rc)) /* no informational status codes. */
3601 rc = VINF_SUCCESS;
3602 }
3603
3604 /*
3605 * Notify HM.
3606 */
3607 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3608 return rc;
3609}
3610
3611
3612/**
3613 * Called by CPUM or REM when CR0.WP changes to 1.
3614 *
3615 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3616 * @thread EMT
3617 */
3618VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3619{
3620 /*
3621 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3622 *
3623 * Use the counter to judge whether there might be pool pages with active
3624 * hacks in them. If there are, we will be running the risk of messing up
3625 * the guest by allowing it to write to read-only pages. Thus, we have to
3626 * clear the page pool ASAP if there is the slightest chance.
3627 */
3628 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3629 {
3630 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3631
3632 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3633 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3634 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3635 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3636 }
3637}
3638
3639
3640/**
3641 * Gets the current guest paging mode.
3642 *
3643 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3644 *
3645 * @returns The current paging mode.
3646 * @param pVCpu The cross context virtual CPU structure.
3647 */
3648VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3649{
3650 return pVCpu->pgm.s.enmGuestMode;
3651}
3652
3653
3654/**
3655 * Gets the current shadow paging mode.
3656 *
3657 * @returns The current paging mode.
3658 * @param pVCpu The cross context virtual CPU structure.
3659 */
3660VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3661{
3662 return pVCpu->pgm.s.enmShadowMode;
3663}
3664
3665
3666/**
3667 * Gets the current host paging mode.
3668 *
3669 * @returns The current paging mode.
3670 * @param pVM The cross context VM structure.
3671 */
3672VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3673{
3674 switch (pVM->pgm.s.enmHostMode)
3675 {
3676 case SUPPAGINGMODE_32_BIT:
3677 case SUPPAGINGMODE_32_BIT_GLOBAL:
3678 return PGMMODE_32_BIT;
3679
3680 case SUPPAGINGMODE_PAE:
3681 case SUPPAGINGMODE_PAE_GLOBAL:
3682 return PGMMODE_PAE;
3683
3684 case SUPPAGINGMODE_PAE_NX:
3685 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3686 return PGMMODE_PAE_NX;
3687
3688 case SUPPAGINGMODE_AMD64:
3689 case SUPPAGINGMODE_AMD64_GLOBAL:
3690 return PGMMODE_AMD64;
3691
3692 case SUPPAGINGMODE_AMD64_NX:
3693 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3694 return PGMMODE_AMD64_NX;
3695
3696 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3697 }
3698
3699 return PGMMODE_INVALID;
3700}
3701
3702
3703/**
3704 * Get mode name.
3705 *
3706 * @returns read-only name string.
3707 * @param enmMode The mode which name is desired.
3708 */
3709VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3710{
3711 switch (enmMode)
3712 {
3713 case PGMMODE_REAL: return "Real";
3714 case PGMMODE_PROTECTED: return "Protected";
3715 case PGMMODE_32_BIT: return "32-bit";
3716 case PGMMODE_PAE: return "PAE";
3717 case PGMMODE_PAE_NX: return "PAE+NX";
3718 case PGMMODE_AMD64: return "AMD64";
3719 case PGMMODE_AMD64_NX: return "AMD64+NX";
3720 case PGMMODE_NESTED_32BIT: return "Nested-32";
3721 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3722 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3723 case PGMMODE_EPT: return "EPT";
3724 case PGMMODE_NONE: return "None";
3725 default: return "unknown mode value";
3726 }
3727}
3728
3729
3730#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3731/**
3732 * Gets the SLAT mode name.
3733 *
3734 * @returns The read-only SLAT mode descriptive string.
3735 * @param enmSlatMode The SLAT mode value.
3736 */
3737VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
3738{
3739 switch (enmSlatMode)
3740 {
3741 case PGMSLAT_DIRECT: return "Direct";
3742 case PGMSLAT_EPT: return "EPT";
3743 case PGMSLAT_32BIT: return "32-bit";
3744 case PGMSLAT_PAE: return "PAE";
3745 case PGMSLAT_AMD64: return "AMD64";
3746 default: return "Unknown";
3747 }
3748}
3749#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
3750
3751
3752/**
3753 * Gets the physical address represented in the guest CR3 as PGM sees it.
3754 *
3755 * This is mainly for logging and debugging.
3756 *
3757 * @returns PGM's guest CR3 value.
3758 * @param pVCpu The cross context virtual CPU structure.
3759 */
3760VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3761{
3762 return pVCpu->pgm.s.GCPhysCR3;
3763}
3764
3765
3766
3767/**
3768 * Notification from CPUM that the EFER.NXE bit has changed.
3769 *
3770 * @param pVCpu The cross context virtual CPU structure of the CPU for
3771 * which EFER changed.
3772 * @param fNxe The new NXE state.
3773 */
3774VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3775{
3776/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3777 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3778
3779 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3780 if (fNxe)
3781 {
3782 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3783 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3784 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3785 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3786 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3787 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3788 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3789 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3790 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3791 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3792 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3793
3794 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3795 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3796 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3797 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3798 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3799 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3800 }
3801 else
3802 {
3803 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3804 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3805 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3806 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3807 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3808 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3809 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3810 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3811 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3812 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3813 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3814
3815 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3816 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3817 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3818 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3819 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3820 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3821 }
3822}
3823
3824
3825/**
3826 * Check if any pgm pool pages are marked dirty (not monitored)
3827 *
3828 * @returns bool locked/not locked
3829 * @param pVM The cross context VM structure.
3830 */
3831VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3832{
3833 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3834}
3835
3836
3837/**
3838 * Check if this VCPU currently owns the PGM lock.
3839 *
3840 * @returns bool owner/not owner
3841 * @param pVM The cross context VM structure.
3842 */
3843VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
3844{
3845 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
3846}
3847
3848
3849/**
3850 * Enable or disable large page usage
3851 *
3852 * @returns VBox status code.
3853 * @param pVM The cross context VM structure.
3854 * @param fUseLargePages Use/not use large pages
3855 */
3856VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3857{
3858 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3859
3860 pVM->pgm.s.fUseLargePages = fUseLargePages;
3861 return VINF_SUCCESS;
3862}
3863
3864
3865/**
3866 * Acquire the PGM lock.
3867 *
3868 * @returns VBox status code
3869 * @param pVM The cross context VM structure.
3870 * @param fVoid Set if the caller cannot handle failure returns.
3871 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3872 */
3873#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
3874int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
3875#else
3876int pgmLock(PVMCC pVM, bool fVoid)
3877#endif
3878{
3879#if defined(VBOX_STRICT)
3880 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3881#else
3882 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
3883#endif
3884 if (RT_SUCCESS(rc))
3885 return rc;
3886 if (fVoid)
3887 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
3888 else
3889 AssertRC(rc);
3890 return rc;
3891}
3892
3893
3894/**
3895 * Release the PGM lock.
3896 *
3897 * @returns VBox status code
3898 * @param pVM The cross context VM structure.
3899 */
3900void pgmUnlock(PVMCC pVM)
3901{
3902 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3903 pVM->pgm.s.cDeprecatedPageLocks = 0;
3904 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
3905 if (rc == VINF_SEM_NESTED)
3906 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3907}
3908
3909
3910#if !defined(IN_R0) || defined(LOG_ENABLED)
3911
3912/** Format handler for PGMPAGE.
3913 * @copydoc FNRTSTRFORMATTYPE */
3914static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3915 const char *pszType, void const *pvValue,
3916 int cchWidth, int cchPrecision, unsigned fFlags,
3917 void *pvUser)
3918{
3919 size_t cch;
3920 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3921 if (RT_VALID_PTR(pPage))
3922 {
3923 char szTmp[64+80];
3924
3925 cch = 0;
3926
3927 /* The single char state stuff. */
3928 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3929 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3930
3931# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3932 if (IS_PART_INCLUDED(5))
3933 {
3934 static const char s_achHandlerStates[4*2] = { '-', 't', 'w', 'a' , '_', 'T', 'W', 'A' };
3935 szTmp[cch++] = s_achHandlerStates[ PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)
3936 | ((uint8_t)PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) << 2)];
3937 }
3938
3939 /* The type. */
3940 if (IS_PART_INCLUDED(4))
3941 {
3942 szTmp[cch++] = ':';
3943 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3944 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3945 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3946 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3947 }
3948
3949 /* The numbers. */
3950 if (IS_PART_INCLUDED(3))
3951 {
3952 szTmp[cch++] = ':';
3953 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3954 }
3955
3956 if (IS_PART_INCLUDED(2))
3957 {
3958 szTmp[cch++] = ':';
3959 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3960 }
3961
3962 if (IS_PART_INCLUDED(6))
3963 {
3964 szTmp[cch++] = ':';
3965 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3966 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3967 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3968 }
3969# undef IS_PART_INCLUDED
3970
3971 cch = pfnOutput(pvArgOutput, szTmp, cch);
3972 }
3973 else
3974 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3975 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3976 return cch;
3977}
3978
3979
3980/** Format handler for PGMRAMRANGE.
3981 * @copydoc FNRTSTRFORMATTYPE */
3982static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3983 const char *pszType, void const *pvValue,
3984 int cchWidth, int cchPrecision, unsigned fFlags,
3985 void *pvUser)
3986{
3987 size_t cch;
3988 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3989 if (RT_VALID_PTR(pRam))
3990 {
3991 char szTmp[80];
3992 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3993 cch = pfnOutput(pvArgOutput, szTmp, cch);
3994 }
3995 else
3996 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3997 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3998 return cch;
3999}
4000
4001/** Format type andlers to be registered/deregistered. */
4002static const struct
4003{
4004 char szType[24];
4005 PFNRTSTRFORMATTYPE pfnHandler;
4006} g_aPgmFormatTypes[] =
4007{
4008 { "pgmpage", pgmFormatTypeHandlerPage },
4009 { "pgmramrange", pgmFormatTypeHandlerRamRange }
4010};
4011
4012#endif /* !IN_R0 || LOG_ENABLED */
4013
4014/**
4015 * Registers the global string format types.
4016 *
4017 * This should be called at module load time or in some other manner that ensure
4018 * that it's called exactly one time.
4019 *
4020 * @returns IPRT status code on RTStrFormatTypeRegister failure.
4021 */
4022VMMDECL(int) PGMRegisterStringFormatTypes(void)
4023{
4024#if !defined(IN_R0) || defined(LOG_ENABLED)
4025 int rc = VINF_SUCCESS;
4026 unsigned i;
4027 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
4028 {
4029 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
4030# ifdef IN_RING0
4031 if (rc == VERR_ALREADY_EXISTS)
4032 {
4033 /* in case of cleanup failure in ring-0 */
4034 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4035 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
4036 }
4037# endif
4038 }
4039 if (RT_FAILURE(rc))
4040 while (i-- > 0)
4041 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4042
4043 return rc;
4044#else
4045 return VINF_SUCCESS;
4046#endif
4047}
4048
4049
4050/**
4051 * Deregisters the global string format types.
4052 *
4053 * This should be called at module unload time or in some other manner that
4054 * ensure that it's called exactly one time.
4055 */
4056VMMDECL(void) PGMDeregisterStringFormatTypes(void)
4057{
4058#if !defined(IN_R0) || defined(LOG_ENABLED)
4059 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
4060 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4061#endif
4062}
4063
4064
4065#ifdef VBOX_STRICT
4066/**
4067 * Asserts that everything related to the guest CR3 is correctly shadowed.
4068 *
4069 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
4070 * and assert the correctness of the guest CR3 mapping before asserting that the
4071 * shadow page tables is in sync with the guest page tables.
4072 *
4073 * @returns Number of conflicts.
4074 * @param pVM The cross context VM structure.
4075 * @param pVCpu The cross context virtual CPU structure.
4076 * @param cr3 The current guest CR3 register value.
4077 * @param cr4 The current guest CR4 register value.
4078 */
4079VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
4080{
4081 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
4082
4083 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
4084 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
4085 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
4086
4087 PGM_LOCK_VOID(pVM);
4088 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
4089 PGM_UNLOCK(pVM);
4090
4091 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
4092 return cErrors;
4093}
4094#endif /* VBOX_STRICT */
4095
4096
4097/**
4098 * Updates PGM's copy of the guest's EPT pointer.
4099 *
4100 * @param pVCpu The cross context virtual CPU structure.
4101 * @param uEptPtr The EPT pointer.
4102 *
4103 * @remarks This can be called as part of VM-entry so we might be in the midst of
4104 * switching to VMX non-root mode.
4105 */
4106VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
4107{
4108 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4109 PGM_LOCK_VOID(pVM);
4110 pVCpu->pgm.s.uEptPtr = uEptPtr;
4111 pVCpu->pgm.s.pGstEptPml4R3 = 0;
4112 pVCpu->pgm.s.pGstEptPml4R0 = 0;
4113 PGM_UNLOCK(pVM);
4114}
4115
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette