VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 97039

Last change on this file since 97039 was 97039, checked in by vboxsync, 2 years ago

VMM/PGM: Nested VMX: bugref:10092 Comment nit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 149.1 KB
Line 
1/* $Id: PGMAll.cpp 97039 2022-10-07 08:18:18Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/selm.h>
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/iom.h>
39#include <VBox/sup.h>
40#include <VBox/vmm/mm.h>
41#include <VBox/vmm/stam.h>
42#include <VBox/vmm/trpm.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/hm_vmx.h>
46#include "PGMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include "PGMInline.h"
49#include <iprt/assert.h>
50#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
51# include <iprt/asm-amd64-x86.h>
52#endif
53#include <iprt/string.h>
54#include <VBox/log.h>
55#include <VBox/param.h>
56#include <VBox/err.h>
57
58
59/*********************************************************************************************************************************
60* Internal Functions *
61*********************************************************************************************************************************/
62DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
63DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
64DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3);
65#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
66static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
67 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
68static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
69 PPGMPTWALKGST pGstWalk);
70static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3);
71static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD,
72 PPGMPTWALKGST pGstWalkAll);
73#endif
74static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
75static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
76
77
78/*
79 * Second level transation - EPT.
80 */
81#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
82# define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT
83# include "PGMSlatDefs.h"
84# include "PGMAllGstSlatEpt.cpp.h"
85# undef PGM_SLAT_TYPE
86#endif
87
88
89/*
90 * Shadow - 32-bit mode
91 */
92#define PGM_SHW_TYPE PGM_TYPE_32BIT
93#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
94#include "PGMAllShw.h"
95
96/* Guest - real mode */
97#define PGM_GST_TYPE PGM_TYPE_REAL
98#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
99#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
100#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
101#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
102#include "PGMGstDefs.h"
103#include "PGMAllGst.h"
104#include "PGMAllBth.h"
105#undef BTH_PGMPOOLKIND_PT_FOR_PT
106#undef BTH_PGMPOOLKIND_ROOT
107#undef PGM_BTH_NAME
108#undef PGM_GST_TYPE
109#undef PGM_GST_NAME
110
111/* Guest - protected mode */
112#define PGM_GST_TYPE PGM_TYPE_PROT
113#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
114#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
115#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
116#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
117#include "PGMGstDefs.h"
118#include "PGMAllGst.h"
119#include "PGMAllBth.h"
120#undef BTH_PGMPOOLKIND_PT_FOR_PT
121#undef BTH_PGMPOOLKIND_ROOT
122#undef PGM_BTH_NAME
123#undef PGM_GST_TYPE
124#undef PGM_GST_NAME
125
126/* Guest - 32-bit mode */
127#define PGM_GST_TYPE PGM_TYPE_32BIT
128#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
129#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
130#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
131#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
132#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
133#include "PGMGstDefs.h"
134#include "PGMAllGst.h"
135#include "PGMAllBth.h"
136#undef BTH_PGMPOOLKIND_PT_FOR_BIG
137#undef BTH_PGMPOOLKIND_PT_FOR_PT
138#undef BTH_PGMPOOLKIND_ROOT
139#undef PGM_BTH_NAME
140#undef PGM_GST_TYPE
141#undef PGM_GST_NAME
142
143#undef PGM_SHW_TYPE
144#undef PGM_SHW_NAME
145
146
147/*
148 * Shadow - PAE mode
149 */
150#define PGM_SHW_TYPE PGM_TYPE_PAE
151#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
152#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
153#include "PGMAllShw.h"
154
155/* Guest - real mode */
156#define PGM_GST_TYPE PGM_TYPE_REAL
157#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
158#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
159#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
160#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
161#include "PGMGstDefs.h"
162#include "PGMAllBth.h"
163#undef BTH_PGMPOOLKIND_PT_FOR_PT
164#undef BTH_PGMPOOLKIND_ROOT
165#undef PGM_BTH_NAME
166#undef PGM_GST_TYPE
167#undef PGM_GST_NAME
168
169/* Guest - protected mode */
170#define PGM_GST_TYPE PGM_TYPE_PROT
171#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
172#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
173#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
174#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
175#include "PGMGstDefs.h"
176#include "PGMAllBth.h"
177#undef BTH_PGMPOOLKIND_PT_FOR_PT
178#undef BTH_PGMPOOLKIND_ROOT
179#undef PGM_BTH_NAME
180#undef PGM_GST_TYPE
181#undef PGM_GST_NAME
182
183/* Guest - 32-bit mode */
184#define PGM_GST_TYPE PGM_TYPE_32BIT
185#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
186#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
187#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
188#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
189#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
190#include "PGMGstDefs.h"
191#include "PGMAllBth.h"
192#undef BTH_PGMPOOLKIND_PT_FOR_BIG
193#undef BTH_PGMPOOLKIND_PT_FOR_PT
194#undef BTH_PGMPOOLKIND_ROOT
195#undef PGM_BTH_NAME
196#undef PGM_GST_TYPE
197#undef PGM_GST_NAME
198
199
200/* Guest - PAE mode */
201#define PGM_GST_TYPE PGM_TYPE_PAE
202#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
203#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
204#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
205#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
206#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
207#include "PGMGstDefs.h"
208#include "PGMAllGst.h"
209#include "PGMAllBth.h"
210#undef BTH_PGMPOOLKIND_PT_FOR_BIG
211#undef BTH_PGMPOOLKIND_PT_FOR_PT
212#undef BTH_PGMPOOLKIND_ROOT
213#undef PGM_BTH_NAME
214#undef PGM_GST_TYPE
215#undef PGM_GST_NAME
216
217#undef PGM_SHW_TYPE
218#undef PGM_SHW_NAME
219
220
221/*
222 * Shadow - AMD64 mode
223 */
224#define PGM_SHW_TYPE PGM_TYPE_AMD64
225#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
226#include "PGMAllShw.h"
227
228/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
229/** @todo retire this hack. */
230#define PGM_GST_TYPE PGM_TYPE_PROT
231#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
232#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
233#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
234#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
235#include "PGMGstDefs.h"
236#include "PGMAllBth.h"
237#undef BTH_PGMPOOLKIND_PT_FOR_PT
238#undef BTH_PGMPOOLKIND_ROOT
239#undef PGM_BTH_NAME
240#undef PGM_GST_TYPE
241#undef PGM_GST_NAME
242
243#ifdef VBOX_WITH_64_BITS_GUESTS
244/* Guest - AMD64 mode */
245# define PGM_GST_TYPE PGM_TYPE_AMD64
246# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
247# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
248# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
249# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
250# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
251# include "PGMGstDefs.h"
252# include "PGMAllGst.h"
253# include "PGMAllBth.h"
254# undef BTH_PGMPOOLKIND_PT_FOR_BIG
255# undef BTH_PGMPOOLKIND_PT_FOR_PT
256# undef BTH_PGMPOOLKIND_ROOT
257# undef PGM_BTH_NAME
258# undef PGM_GST_TYPE
259# undef PGM_GST_NAME
260#endif /* VBOX_WITH_64_BITS_GUESTS */
261
262#undef PGM_SHW_TYPE
263#undef PGM_SHW_NAME
264
265
266/*
267 * Shadow - 32-bit nested paging mode.
268 */
269#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
270#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
271#include "PGMAllShw.h"
272
273/* Guest - real mode */
274#define PGM_GST_TYPE PGM_TYPE_REAL
275#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
276#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
277#include "PGMGstDefs.h"
278#include "PGMAllBth.h"
279#undef PGM_BTH_NAME
280#undef PGM_GST_TYPE
281#undef PGM_GST_NAME
282
283/* Guest - protected mode */
284#define PGM_GST_TYPE PGM_TYPE_PROT
285#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
286#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
287#include "PGMGstDefs.h"
288#include "PGMAllBth.h"
289#undef PGM_BTH_NAME
290#undef PGM_GST_TYPE
291#undef PGM_GST_NAME
292
293/* Guest - 32-bit mode */
294#define PGM_GST_TYPE PGM_TYPE_32BIT
295#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
296#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
297#include "PGMGstDefs.h"
298#include "PGMAllBth.h"
299#undef PGM_BTH_NAME
300#undef PGM_GST_TYPE
301#undef PGM_GST_NAME
302
303/* Guest - PAE mode */
304#define PGM_GST_TYPE PGM_TYPE_PAE
305#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
306#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
307#include "PGMGstDefs.h"
308#include "PGMAllBth.h"
309#undef PGM_BTH_NAME
310#undef PGM_GST_TYPE
311#undef PGM_GST_NAME
312
313#ifdef VBOX_WITH_64_BITS_GUESTS
314/* Guest - AMD64 mode */
315# define PGM_GST_TYPE PGM_TYPE_AMD64
316# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
317# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
318# include "PGMGstDefs.h"
319# include "PGMAllBth.h"
320# undef PGM_BTH_NAME
321# undef PGM_GST_TYPE
322# undef PGM_GST_NAME
323#endif /* VBOX_WITH_64_BITS_GUESTS */
324
325#undef PGM_SHW_TYPE
326#undef PGM_SHW_NAME
327
328
329/*
330 * Shadow - PAE nested paging mode.
331 */
332#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
333#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
334#include "PGMAllShw.h"
335
336/* Guest - real mode */
337#define PGM_GST_TYPE PGM_TYPE_REAL
338#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
339#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
340#include "PGMGstDefs.h"
341#include "PGMAllBth.h"
342#undef PGM_BTH_NAME
343#undef PGM_GST_TYPE
344#undef PGM_GST_NAME
345
346/* Guest - protected mode */
347#define PGM_GST_TYPE PGM_TYPE_PROT
348#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
349#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
350#include "PGMGstDefs.h"
351#include "PGMAllBth.h"
352#undef PGM_BTH_NAME
353#undef PGM_GST_TYPE
354#undef PGM_GST_NAME
355
356/* Guest - 32-bit mode */
357#define PGM_GST_TYPE PGM_TYPE_32BIT
358#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
359#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
360#include "PGMGstDefs.h"
361#include "PGMAllBth.h"
362#undef PGM_BTH_NAME
363#undef PGM_GST_TYPE
364#undef PGM_GST_NAME
365
366/* Guest - PAE mode */
367#define PGM_GST_TYPE PGM_TYPE_PAE
368#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
369#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
370#include "PGMGstDefs.h"
371#include "PGMAllBth.h"
372#undef PGM_BTH_NAME
373#undef PGM_GST_TYPE
374#undef PGM_GST_NAME
375
376#ifdef VBOX_WITH_64_BITS_GUESTS
377/* Guest - AMD64 mode */
378# define PGM_GST_TYPE PGM_TYPE_AMD64
379# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
380# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
381# include "PGMGstDefs.h"
382# include "PGMAllBth.h"
383# undef PGM_BTH_NAME
384# undef PGM_GST_TYPE
385# undef PGM_GST_NAME
386#endif /* VBOX_WITH_64_BITS_GUESTS */
387
388#undef PGM_SHW_TYPE
389#undef PGM_SHW_NAME
390
391
392/*
393 * Shadow - AMD64 nested paging mode.
394 */
395#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
396#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
397#include "PGMAllShw.h"
398
399/* Guest - real mode */
400#define PGM_GST_TYPE PGM_TYPE_REAL
401#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
402#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
403#include "PGMGstDefs.h"
404#include "PGMAllBth.h"
405#undef PGM_BTH_NAME
406#undef PGM_GST_TYPE
407#undef PGM_GST_NAME
408
409/* Guest - protected mode */
410#define PGM_GST_TYPE PGM_TYPE_PROT
411#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
412#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
413#include "PGMGstDefs.h"
414#include "PGMAllBth.h"
415#undef PGM_BTH_NAME
416#undef PGM_GST_TYPE
417#undef PGM_GST_NAME
418
419/* Guest - 32-bit mode */
420#define PGM_GST_TYPE PGM_TYPE_32BIT
421#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
422#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
423#include "PGMGstDefs.h"
424#include "PGMAllBth.h"
425#undef PGM_BTH_NAME
426#undef PGM_GST_TYPE
427#undef PGM_GST_NAME
428
429/* Guest - PAE mode */
430#define PGM_GST_TYPE PGM_TYPE_PAE
431#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
432#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
433#include "PGMGstDefs.h"
434#include "PGMAllBth.h"
435#undef PGM_BTH_NAME
436#undef PGM_GST_TYPE
437#undef PGM_GST_NAME
438
439#ifdef VBOX_WITH_64_BITS_GUESTS
440/* Guest - AMD64 mode */
441# define PGM_GST_TYPE PGM_TYPE_AMD64
442# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
443# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
444# include "PGMGstDefs.h"
445# include "PGMAllBth.h"
446# undef PGM_BTH_NAME
447# undef PGM_GST_TYPE
448# undef PGM_GST_NAME
449#endif /* VBOX_WITH_64_BITS_GUESTS */
450
451#undef PGM_SHW_TYPE
452#undef PGM_SHW_NAME
453
454
455/*
456 * Shadow - EPT.
457 */
458#define PGM_SHW_TYPE PGM_TYPE_EPT
459#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
460#include "PGMAllShw.h"
461
462/* Guest - real mode */
463#define PGM_GST_TYPE PGM_TYPE_REAL
464#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
465#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
466#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
467#include "PGMGstDefs.h"
468#include "PGMAllBth.h"
469#undef BTH_PGMPOOLKIND_PT_FOR_PT
470#undef PGM_BTH_NAME
471#undef PGM_GST_TYPE
472#undef PGM_GST_NAME
473
474/* Guest - protected mode */
475#define PGM_GST_TYPE PGM_TYPE_PROT
476#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
477#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
478#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
479#include "PGMGstDefs.h"
480#include "PGMAllBth.h"
481#undef BTH_PGMPOOLKIND_PT_FOR_PT
482#undef PGM_BTH_NAME
483#undef PGM_GST_TYPE
484#undef PGM_GST_NAME
485
486/* Guest - 32-bit mode */
487#define PGM_GST_TYPE PGM_TYPE_32BIT
488#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
489#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
490#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
491#include "PGMGstDefs.h"
492#include "PGMAllBth.h"
493#undef BTH_PGMPOOLKIND_PT_FOR_PT
494#undef PGM_BTH_NAME
495#undef PGM_GST_TYPE
496#undef PGM_GST_NAME
497
498/* Guest - PAE mode */
499#define PGM_GST_TYPE PGM_TYPE_PAE
500#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
501#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
502#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
503#include "PGMGstDefs.h"
504#include "PGMAllBth.h"
505#undef BTH_PGMPOOLKIND_PT_FOR_PT
506#undef PGM_BTH_NAME
507#undef PGM_GST_TYPE
508#undef PGM_GST_NAME
509
510#ifdef VBOX_WITH_64_BITS_GUESTS
511/* Guest - AMD64 mode */
512# define PGM_GST_TYPE PGM_TYPE_AMD64
513# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
514# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
515# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
516# include "PGMGstDefs.h"
517# include "PGMAllBth.h"
518# undef BTH_PGMPOOLKIND_PT_FOR_PT
519# undef PGM_BTH_NAME
520# undef PGM_GST_TYPE
521# undef PGM_GST_NAME
522#endif /* VBOX_WITH_64_BITS_GUESTS */
523
524#undef PGM_SHW_TYPE
525#undef PGM_SHW_NAME
526
527
528/*
529 * Shadow - NEM / None.
530 */
531#define PGM_SHW_TYPE PGM_TYPE_NONE
532#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
533#include "PGMAllShw.h"
534
535/* Guest - real mode */
536#define PGM_GST_TYPE PGM_TYPE_REAL
537#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
538#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
539#include "PGMGstDefs.h"
540#include "PGMAllBth.h"
541#undef PGM_BTH_NAME
542#undef PGM_GST_TYPE
543#undef PGM_GST_NAME
544
545/* Guest - protected mode */
546#define PGM_GST_TYPE PGM_TYPE_PROT
547#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
548#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
549#include "PGMGstDefs.h"
550#include "PGMAllBth.h"
551#undef PGM_BTH_NAME
552#undef PGM_GST_TYPE
553#undef PGM_GST_NAME
554
555/* Guest - 32-bit mode */
556#define PGM_GST_TYPE PGM_TYPE_32BIT
557#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
558#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
559#include "PGMGstDefs.h"
560#include "PGMAllBth.h"
561#undef PGM_BTH_NAME
562#undef PGM_GST_TYPE
563#undef PGM_GST_NAME
564
565/* Guest - PAE mode */
566#define PGM_GST_TYPE PGM_TYPE_PAE
567#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
568#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
569#include "PGMGstDefs.h"
570#include "PGMAllBth.h"
571#undef PGM_BTH_NAME
572#undef PGM_GST_TYPE
573#undef PGM_GST_NAME
574
575#ifdef VBOX_WITH_64_BITS_GUESTS
576/* Guest - AMD64 mode */
577# define PGM_GST_TYPE PGM_TYPE_AMD64
578# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
579# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
580# include "PGMGstDefs.h"
581# include "PGMAllBth.h"
582# undef PGM_BTH_NAME
583# undef PGM_GST_TYPE
584# undef PGM_GST_NAME
585#endif /* VBOX_WITH_64_BITS_GUESTS */
586
587#undef PGM_SHW_TYPE
588#undef PGM_SHW_NAME
589
590
591
592/**
593 * Guest mode data array.
594 */
595PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
596{
597 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* 0 */
598 {
599 PGM_TYPE_REAL,
600 PGM_GST_NAME_REAL(GetPage),
601 PGM_GST_NAME_REAL(ModifyPage),
602 PGM_GST_NAME_REAL(Enter),
603 PGM_GST_NAME_REAL(Exit),
604#ifdef IN_RING3
605 PGM_GST_NAME_REAL(Relocate),
606#endif
607 },
608 {
609 PGM_TYPE_PROT,
610 PGM_GST_NAME_PROT(GetPage),
611 PGM_GST_NAME_PROT(ModifyPage),
612 PGM_GST_NAME_PROT(Enter),
613 PGM_GST_NAME_PROT(Exit),
614#ifdef IN_RING3
615 PGM_GST_NAME_PROT(Relocate),
616#endif
617 },
618 {
619 PGM_TYPE_32BIT,
620 PGM_GST_NAME_32BIT(GetPage),
621 PGM_GST_NAME_32BIT(ModifyPage),
622 PGM_GST_NAME_32BIT(Enter),
623 PGM_GST_NAME_32BIT(Exit),
624#ifdef IN_RING3
625 PGM_GST_NAME_32BIT(Relocate),
626#endif
627 },
628 {
629 PGM_TYPE_PAE,
630 PGM_GST_NAME_PAE(GetPage),
631 PGM_GST_NAME_PAE(ModifyPage),
632 PGM_GST_NAME_PAE(Enter),
633 PGM_GST_NAME_PAE(Exit),
634#ifdef IN_RING3
635 PGM_GST_NAME_PAE(Relocate),
636#endif
637 },
638#ifdef VBOX_WITH_64_BITS_GUESTS
639 {
640 PGM_TYPE_AMD64,
641 PGM_GST_NAME_AMD64(GetPage),
642 PGM_GST_NAME_AMD64(ModifyPage),
643 PGM_GST_NAME_AMD64(Enter),
644 PGM_GST_NAME_AMD64(Exit),
645# ifdef IN_RING3
646 PGM_GST_NAME_AMD64(Relocate),
647# endif
648 },
649#endif
650};
651
652
653/**
654 * The shadow mode data array.
655 */
656PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
657{
658 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
659 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
660 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
661 {
662 PGM_TYPE_32BIT,
663 PGM_SHW_NAME_32BIT(GetPage),
664 PGM_SHW_NAME_32BIT(ModifyPage),
665 PGM_SHW_NAME_32BIT(Enter),
666 PGM_SHW_NAME_32BIT(Exit),
667#ifdef IN_RING3
668 PGM_SHW_NAME_32BIT(Relocate),
669#endif
670 },
671 {
672 PGM_TYPE_PAE,
673 PGM_SHW_NAME_PAE(GetPage),
674 PGM_SHW_NAME_PAE(ModifyPage),
675 PGM_SHW_NAME_PAE(Enter),
676 PGM_SHW_NAME_PAE(Exit),
677#ifdef IN_RING3
678 PGM_SHW_NAME_PAE(Relocate),
679#endif
680 },
681 {
682 PGM_TYPE_AMD64,
683 PGM_SHW_NAME_AMD64(GetPage),
684 PGM_SHW_NAME_AMD64(ModifyPage),
685 PGM_SHW_NAME_AMD64(Enter),
686 PGM_SHW_NAME_AMD64(Exit),
687#ifdef IN_RING3
688 PGM_SHW_NAME_AMD64(Relocate),
689#endif
690 },
691 {
692 PGM_TYPE_NESTED_32BIT,
693 PGM_SHW_NAME_NESTED_32BIT(GetPage),
694 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
695 PGM_SHW_NAME_NESTED_32BIT(Enter),
696 PGM_SHW_NAME_NESTED_32BIT(Exit),
697#ifdef IN_RING3
698 PGM_SHW_NAME_NESTED_32BIT(Relocate),
699#endif
700 },
701 {
702 PGM_TYPE_NESTED_PAE,
703 PGM_SHW_NAME_NESTED_PAE(GetPage),
704 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
705 PGM_SHW_NAME_NESTED_PAE(Enter),
706 PGM_SHW_NAME_NESTED_PAE(Exit),
707#ifdef IN_RING3
708 PGM_SHW_NAME_NESTED_PAE(Relocate),
709#endif
710 },
711 {
712 PGM_TYPE_NESTED_AMD64,
713 PGM_SHW_NAME_NESTED_AMD64(GetPage),
714 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
715 PGM_SHW_NAME_NESTED_AMD64(Enter),
716 PGM_SHW_NAME_NESTED_AMD64(Exit),
717#ifdef IN_RING3
718 PGM_SHW_NAME_NESTED_AMD64(Relocate),
719#endif
720 },
721 {
722 PGM_TYPE_EPT,
723 PGM_SHW_NAME_EPT(GetPage),
724 PGM_SHW_NAME_EPT(ModifyPage),
725 PGM_SHW_NAME_EPT(Enter),
726 PGM_SHW_NAME_EPT(Exit),
727#ifdef IN_RING3
728 PGM_SHW_NAME_EPT(Relocate),
729#endif
730 },
731 {
732 PGM_TYPE_NONE,
733 PGM_SHW_NAME_NONE(GetPage),
734 PGM_SHW_NAME_NONE(ModifyPage),
735 PGM_SHW_NAME_NONE(Enter),
736 PGM_SHW_NAME_NONE(Exit),
737#ifdef IN_RING3
738 PGM_SHW_NAME_NONE(Relocate),
739#endif
740 },
741};
742
743
744/**
745 * The guest+shadow mode data array.
746 */
747PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
748{
749#if !defined(IN_RING3) && !defined(VBOX_STRICT)
750# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
751# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
752 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler) }
753
754#elif !defined(IN_RING3) && defined(VBOX_STRICT)
755# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
756# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
757 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler), Nm(AssertCR3) }
758
759#elif defined(IN_RING3) && !defined(VBOX_STRICT)
760# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
761# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
762 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
763
764#elif defined(IN_RING3) && defined(VBOX_STRICT)
765# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
766# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
767 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
768
769#else
770# error "Misconfig."
771#endif
772
773 /* 32-bit shadow paging mode: */
774 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
775 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
776 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
777 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
778 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
779 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
780 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
781 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
782 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
783 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
784 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
785
786 /* PAE shadow paging mode: */
787 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
788 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
789 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
790 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
791 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
792 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
793 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
794 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
795 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
796 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
797 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
798
799 /* AMD64 shadow paging mode: */
800 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
801 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
802 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
803 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
804 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
805#ifdef VBOX_WITH_64_BITS_GUESTS
806 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
807#else
808 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
809#endif
810 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
812 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
814 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
815
816 /* 32-bit nested paging mode: */
817 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
818 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
819 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
820 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
821 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
822#ifdef VBOX_WITH_64_BITS_GUESTS
823 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
824#else
825 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
826#endif
827 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
828 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
829 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
831 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
832
833 /* PAE nested paging mode: */
834 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
835 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
836 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
837 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
838 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
839#ifdef VBOX_WITH_64_BITS_GUESTS
840 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
841#else
842 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
843#endif
844 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
845 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
846 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
848 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
849
850 /* AMD64 nested paging mode: */
851 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
852 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
853 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
854 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
855 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
856#ifdef VBOX_WITH_64_BITS_GUESTS
857 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
858#else
859 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
860#endif
861 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
862 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
863 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
865 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
866
867 /* EPT nested paging mode: */
868 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
869 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
870 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
871 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
872 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
873#ifdef VBOX_WITH_64_BITS_GUESTS
874 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
875#else
876 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
877#endif
878 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
880 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
881 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
882 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
883
884 /* NONE / NEM: */
885 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
886 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
887 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
888 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
889 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
890#ifdef VBOX_WITH_64_BITS_GUESTS
891 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
892#else
893 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
894#endif
895 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
896 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
897 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
898 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
899 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
900
901
902#undef PGMMODEDATABTH_ENTRY
903#undef PGMMODEDATABTH_NULL_ENTRY
904};
905
906
907/** Mask array used by pgmGetCr3MaskForMode.
908 * X86_CR3_AMD64_PAGE_MASK is used for modes that doesn't have a CR3 or EPTP. */
909static uint64_t const g_auCr3MaskForMode[PGMMODE_MAX] =
910{
911 /* [PGMMODE_INVALID] = */ X86_CR3_AMD64_PAGE_MASK,
912 /* [PGMMODE_REAL] = */ X86_CR3_AMD64_PAGE_MASK,
913 /* [PGMMODE_PROTECTED] = */ X86_CR3_AMD64_PAGE_MASK,
914 /* [PGMMODE_32_BIT] = */ X86_CR3_PAGE_MASK,
915 /* [PGMMODE_PAE] = */ X86_CR3_PAE_PAGE_MASK,
916 /* [PGMMODE_PAE_NX] = */ X86_CR3_PAE_PAGE_MASK,
917 /* [PGMMODE_AMD64] = */ X86_CR3_AMD64_PAGE_MASK,
918 /* [PGMMODE_AMD64_NX] = */ X86_CR3_AMD64_PAGE_MASK,
919 /* [PGMMODE_NESTED_32BIT = */ X86_CR3_PAGE_MASK,
920 /* [PGMMODE_NESTED_PAE] = */ X86_CR3_PAE_PAGE_MASK,
921 /* [PGMMODE_NESTED_AMD64] = */ X86_CR3_AMD64_PAGE_MASK,
922 /* [PGMMODE_EPT] = */ X86_CR3_EPT_PAGE_MASK,
923 /* [PGMMODE_NONE] = */ X86_CR3_AMD64_PAGE_MASK,
924};
925
926
927/**
928 * Gets the physical address mask for CR3 in the given paging mode.
929 *
930 * The mask is for eliminating flags and other stuff in CR3/EPTP when
931 * extracting the physical address. It is not for validating whether there are
932 * reserved bits set. PGM ASSUMES that whoever loaded the CR3 value and passed
933 * it to PGM checked for reserved bits, including reserved physical address
934 * bits.
935 *
936 * @returns The CR3 mask.
937 * @param enmMode The paging mode.
938 * @param enmSlatMode The second-level address translation mode.
939 */
940DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode, PGMSLAT enmSlatMode)
941{
942 if (enmSlatMode == PGMSLAT_DIRECT)
943 {
944 Assert(enmMode != PGMMODE_EPT);
945 return g_auCr3MaskForMode[(unsigned)enmMode < (unsigned)PGMMODE_MAX ? enmMode : 0];
946 }
947 Assert(enmSlatMode == PGMSLAT_EPT);
948 return X86_CR3_EPT_PAGE_MASK;
949}
950
951
952/**
953 * Gets the masked CR3 value according to the current guest paging mode.
954 *
955 * See disclaimer in pgmGetCr3MaskForMode.
956 *
957 * @returns The masked PGM CR3 value.
958 * @param pVCpu The cross context virtual CPU structure.
959 * @param uCr3 The raw guest CR3 value.
960 */
961DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
962{
963 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode, pVCpu->pgm.s.enmGuestSlatMode);
964 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);
965 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
966 return GCPhysCR3;
967}
968
969
970#ifdef IN_RING0
971/**
972 * #PF Handler.
973 *
974 * @returns VBox status code (appropriate for trap handling and GC return).
975 * @param pVCpu The cross context virtual CPU structure.
976 * @param uErr The trap error code.
977 * @param pRegFrame Trap register frame.
978 * @param pvFault The fault address.
979 */
980VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
981{
982 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
983
984 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
985 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
986 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
987
988
989# ifdef VBOX_WITH_STATISTICS
990 /*
991 * Error code stats.
992 */
993 if (uErr & X86_TRAP_PF_US)
994 {
995 if (!(uErr & X86_TRAP_PF_P))
996 {
997 if (uErr & X86_TRAP_PF_RW)
998 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
999 else
1000 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
1001 }
1002 else if (uErr & X86_TRAP_PF_RW)
1003 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
1004 else if (uErr & X86_TRAP_PF_RSVD)
1005 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
1006 else if (uErr & X86_TRAP_PF_ID)
1007 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
1008 else
1009 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
1010 }
1011 else
1012 { /* Supervisor */
1013 if (!(uErr & X86_TRAP_PF_P))
1014 {
1015 if (uErr & X86_TRAP_PF_RW)
1016 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
1017 else
1018 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
1019 }
1020 else if (uErr & X86_TRAP_PF_RW)
1021 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
1022 else if (uErr & X86_TRAP_PF_ID)
1023 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
1024 else if (uErr & X86_TRAP_PF_RSVD)
1025 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
1026 }
1027# endif /* VBOX_WITH_STATISTICS */
1028
1029 /*
1030 * Call the worker.
1031 */
1032 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1033 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1034 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
1035 bool fLockTaken = false;
1036 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
1037 if (fLockTaken)
1038 {
1039 PGM_LOCK_ASSERT_OWNER(pVM);
1040 PGM_UNLOCK(pVM);
1041 }
1042 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
1043
1044 /*
1045 * Return code tweaks.
1046 */
1047 if (rc != VINF_SUCCESS)
1048 {
1049 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
1050 rc = VINF_SUCCESS;
1051
1052 /* Note: hack alert for difficult to reproduce problem. */
1053 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
1054 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
1055 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
1056 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
1057 {
1058 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
1059 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
1060 rc = VINF_SUCCESS;
1061 }
1062 }
1063
1064 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
1065 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
1066 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
1067 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
1068 return rc;
1069}
1070#endif /* IN_RING0 */
1071
1072
1073/**
1074 * Prefetch a page
1075 *
1076 * Typically used to sync commonly used pages before entering raw mode
1077 * after a CR3 reload.
1078 *
1079 * @returns VBox status code suitable for scheduling.
1080 * @retval VINF_SUCCESS on success.
1081 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
1082 * @param pVCpu The cross context virtual CPU structure.
1083 * @param GCPtrPage Page to invalidate.
1084 */
1085VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1086{
1087 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1088
1089 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1090 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1091 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1092 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1093
1094 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1095 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1096 return rc;
1097}
1098
1099
1100/**
1101 * Emulation of the invlpg instruction (HC only actually).
1102 *
1103 * @returns Strict VBox status code, special care required.
1104 * @retval VINF_PGM_SYNC_CR3 - handled.
1105 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1106 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param GCPtrPage Page to invalidate.
1110 *
1111 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1112 * safe, but there could be edge cases!
1113 *
1114 * @todo Flush page or page directory only if necessary!
1115 * @todo VBOXSTRICTRC
1116 */
1117VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1118{
1119 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1120 int rc;
1121 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1122
1123 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1124
1125 /*
1126 * Call paging mode specific worker.
1127 */
1128 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1129 PGM_LOCK_VOID(pVM);
1130
1131 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1132 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1133 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1134 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1135
1136 PGM_UNLOCK(pVM);
1137 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1138
1139 /* Ignore all irrelevant error codes. */
1140 if ( rc == VERR_PAGE_NOT_PRESENT
1141 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1142 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1143 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1144 rc = VINF_SUCCESS;
1145
1146 return rc;
1147}
1148
1149
1150/**
1151 * Executes an instruction using the interpreter.
1152 *
1153 * @returns VBox status code (appropriate for trap handling and GC return).
1154 * @param pVM The cross context VM structure.
1155 * @param pVCpu The cross context virtual CPU structure.
1156 * @param pRegFrame Register frame.
1157 * @param pvFault Fault address.
1158 */
1159VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1160{
1161 NOREF(pVM);
1162 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1163 if (rc == VERR_EM_INTERPRETER)
1164 rc = VINF_EM_RAW_EMULATE_INSTR;
1165 if (rc != VINF_SUCCESS)
1166 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1167 return rc;
1168}
1169
1170
1171/**
1172 * Gets effective page information (from the VMM page directory).
1173 *
1174 * @returns VBox status code.
1175 * @param pVCpu The cross context virtual CPU structure.
1176 * @param GCPtr Guest Context virtual address of the page.
1177 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1178 * @param pHCPhys Where to store the HC physical address of the page.
1179 * This is page aligned.
1180 * @remark You should use PGMMapGetPage() for pages in a mapping.
1181 */
1182VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1183{
1184 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1185 PGM_LOCK_VOID(pVM);
1186
1187 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1188 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1189 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1190 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1191
1192 PGM_UNLOCK(pVM);
1193 return rc;
1194}
1195
1196
1197/**
1198 * Modify page flags for a range of pages in the shadow context.
1199 *
1200 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1201 *
1202 * @returns VBox status code.
1203 * @param pVCpu The cross context virtual CPU structure.
1204 * @param GCPtr Virtual address of the first page in the range.
1205 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1206 * @param fMask The AND mask - page flags X86_PTE_*.
1207 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1208 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1209 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1210 */
1211DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1212{
1213 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1214 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1215
1216 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK; /** @todo this ain't necessary, right... */
1217
1218 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1219 PGM_LOCK_VOID(pVM);
1220
1221 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1222 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1223 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1224 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, GUEST_PAGE_SIZE, fFlags, fMask, fOpFlags);
1225
1226 PGM_UNLOCK(pVM);
1227 return rc;
1228}
1229
1230
1231/**
1232 * Changing the page flags for a single page in the shadow page tables so as to
1233 * make it read-only.
1234 *
1235 * @returns VBox status code.
1236 * @param pVCpu The cross context virtual CPU structure.
1237 * @param GCPtr Virtual address of the first page in the range.
1238 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1239 */
1240VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1241{
1242 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1243}
1244
1245
1246/**
1247 * Changing the page flags for a single page in the shadow page tables so as to
1248 * make it writable.
1249 *
1250 * The call must know with 101% certainty that the guest page tables maps this
1251 * as writable too. This function will deal shared, zero and write monitored
1252 * pages.
1253 *
1254 * @returns VBox status code.
1255 * @param pVCpu The cross context virtual CPU structure.
1256 * @param GCPtr Virtual address of the first page in the range.
1257 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1258 */
1259VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1260{
1261 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1262 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1263 return VINF_SUCCESS;
1264}
1265
1266
1267/**
1268 * Changing the page flags for a single page in the shadow page tables so as to
1269 * make it not present.
1270 *
1271 * @returns VBox status code.
1272 * @param pVCpu The cross context virtual CPU structure.
1273 * @param GCPtr Virtual address of the first page in the range.
1274 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1275 */
1276VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1277{
1278 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1279}
1280
1281
1282/**
1283 * Changing the page flags for a single page in the shadow page tables so as to
1284 * make it supervisor and writable.
1285 *
1286 * This if for dealing with CR0.WP=0 and readonly user pages.
1287 *
1288 * @returns VBox status code.
1289 * @param pVCpu The cross context virtual CPU structure.
1290 * @param GCPtr Virtual address of the first page in the range.
1291 * @param fBigPage Whether or not this is a big page. If it is, we have to
1292 * change the shadow PDE as well. If it isn't, the caller
1293 * has checked that the shadow PDE doesn't need changing.
1294 * We ASSUME 4KB pages backing the big page here!
1295 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1296 */
1297int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1298{
1299 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1300 if (rc == VINF_SUCCESS && fBigPage)
1301 {
1302 /* this is a bit ugly... */
1303 switch (pVCpu->pgm.s.enmShadowMode)
1304 {
1305 case PGMMODE_32_BIT:
1306 {
1307 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1308 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1309 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1310 pPde->u |= X86_PDE_RW;
1311 Log(("-> PDE=%#llx (32)\n", pPde->u));
1312 break;
1313 }
1314 case PGMMODE_PAE:
1315 case PGMMODE_PAE_NX:
1316 {
1317 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1318 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1319 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1320 pPde->u |= X86_PDE_RW;
1321 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1322 break;
1323 }
1324 default:
1325 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1326 }
1327 }
1328 return rc;
1329}
1330
1331
1332/**
1333 * Gets the shadow page directory for the specified address, PAE.
1334 *
1335 * @returns Pointer to the shadow PD.
1336 * @param pVCpu The cross context virtual CPU structure.
1337 * @param GCPtr The address.
1338 * @param uGstPdpe Guest PDPT entry. Valid.
1339 * @param ppPD Receives address of page directory
1340 */
1341int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1342{
1343 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1344 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1345 PPGMPOOLPAGE pShwPage;
1346 int rc;
1347 PGM_LOCK_ASSERT_OWNER(pVM);
1348
1349
1350 /* Allocate page directory if not present. */
1351 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1352 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1353 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1354 X86PGPAEUINT const uPdpe = pPdpe->u;
1355 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1356 {
1357 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1358 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1359 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1360
1361 pgmPoolCacheUsed(pPool, pShwPage);
1362
1363 /* Update the entry if necessary. */
1364 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1365 if (uPdpeNew == uPdpe)
1366 { /* likely */ }
1367 else
1368 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1369 }
1370 else
1371 {
1372 RTGCPTR64 GCPdPt;
1373 PGMPOOLKIND enmKind;
1374 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1375 {
1376 /* AMD-V nested paging or real/protected mode without paging. */
1377 GCPdPt = GCPtr & ~(RT_BIT_64(X86_PDPT_SHIFT) - 1);
1378 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1379 }
1380 else if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1381 {
1382 if (uGstPdpe & X86_PDPE_P)
1383 {
1384 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1385 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1386 }
1387 else
1388 {
1389 /* PD not present; guest must reload CR3 to change it.
1390 * No need to monitor anything in this case. */
1391 /** @todo r=bird: WTF is hit?!? */
1392 /*Assert(VM_IS_RAW_MODE_ENABLED(pVM)); - ??? */
1393 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1394 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1395 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1396 }
1397 }
1398 else
1399 {
1400 GCPdPt = CPUMGetGuestCR3(pVCpu);
1401 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1402 }
1403
1404 /* Create a reference back to the PDPT by using the index in its shadow page. */
1405 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1406 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1407 &pShwPage);
1408 AssertRCReturn(rc, rc);
1409
1410 /* Hook it up. */
1411 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1412 }
1413 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1414
1415 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1416 return VINF_SUCCESS;
1417}
1418
1419
1420/**
1421 * Gets the pointer to the shadow page directory entry for an address, PAE.
1422 *
1423 * @returns Pointer to the PDE.
1424 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1425 * @param GCPtr The address.
1426 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1427 */
1428DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1429{
1430 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1431 PGM_LOCK_ASSERT_OWNER(pVM);
1432
1433 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1434 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1435 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1436 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1437 if (!(uPdpe & X86_PDPE_P))
1438 {
1439 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1440 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1441 }
1442 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1443
1444 /* Fetch the pgm pool shadow descriptor. */
1445 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1446 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1447
1448 *ppShwPde = pShwPde;
1449 return VINF_SUCCESS;
1450}
1451
1452
1453/**
1454 * Syncs the SHADOW page directory pointer for the specified address.
1455 *
1456 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1457 *
1458 * The caller is responsible for making sure the guest has a valid PD before
1459 * calling this function.
1460 *
1461 * @returns VBox status code.
1462 * @param pVCpu The cross context virtual CPU structure.
1463 * @param GCPtr The address.
1464 * @param uGstPml4e Guest PML4 entry (valid).
1465 * @param uGstPdpe Guest PDPT entry (valid).
1466 * @param ppPD Receives address of page directory
1467 */
1468static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1469{
1470 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1471 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1472 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1473 int rc;
1474
1475 PGM_LOCK_ASSERT_OWNER(pVM);
1476
1477 /*
1478 * PML4.
1479 */
1480 PPGMPOOLPAGE pShwPage;
1481 {
1482 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1483 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1484 X86PGPAEUINT const uPml4e = pPml4e->u;
1485
1486 /* Allocate page directory pointer table if not present. */
1487 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1488 {
1489 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1490 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1491
1492 pgmPoolCacheUsed(pPool, pShwPage);
1493
1494 /* Update the entry if needed. */
1495 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1496 | (uPml4e & PGM_PML4_FLAGS);
1497 if (uPml4e == uPml4eNew)
1498 { /* likely */ }
1499 else
1500 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1501 }
1502 else
1503 {
1504 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1505
1506 RTGCPTR64 GCPml4;
1507 PGMPOOLKIND enmKind;
1508 if (fNestedPagingOrNoGstPaging)
1509 {
1510 /* AMD-V nested paging or real/protected mode without paging */
1511 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; /** @todo bogus calculation for PML5 */
1512 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1513 }
1514 else
1515 {
1516 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1517 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1518 }
1519
1520 /* Create a reference back to the PDPT by using the index in its shadow page. */
1521 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1522 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1523 &pShwPage);
1524 AssertRCReturn(rc, rc);
1525
1526 /* Hook it up. */
1527 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1528 | (uPml4e & PGM_PML4_FLAGS));
1529 }
1530 }
1531
1532 /*
1533 * PDPT.
1534 */
1535 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1536 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1537 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1538 X86PGPAEUINT const uPdpe = pPdpe->u;
1539
1540 /* Allocate page directory if not present. */
1541 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1542 {
1543 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1544 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1545
1546 pgmPoolCacheUsed(pPool, pShwPage);
1547
1548 /* Update the entry if needed. */
1549 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1550 | (uPdpe & PGM_PDPT_FLAGS);
1551 if (uPdpe == uPdpeNew)
1552 { /* likely */ }
1553 else
1554 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1555 }
1556 else
1557 {
1558 RTGCPTR64 GCPdPt;
1559 PGMPOOLKIND enmKind;
1560 if (fNestedPagingOrNoGstPaging)
1561 {
1562 /* AMD-V nested paging or real/protected mode without paging */
1563 GCPdPt = GCPtr & ~(RT_BIT_64(iPdPt << X86_PDPT_SHIFT) - 1);
1564 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1565 }
1566 else
1567 {
1568 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1569 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1570 }
1571
1572 /* Create a reference back to the PDPT by using the index in its shadow page. */
1573 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1574 pShwPage->idx, iPdPt, false /*fLockPage*/,
1575 &pShwPage);
1576 AssertRCReturn(rc, rc);
1577
1578 /* Hook it up. */
1579 ASMAtomicWriteU64(&pPdpe->u,
1580 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1581 }
1582
1583 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1584 return VINF_SUCCESS;
1585}
1586
1587
1588/**
1589 * Gets the SHADOW page directory pointer for the specified address (long mode).
1590 *
1591 * @returns VBox status code.
1592 * @param pVCpu The cross context virtual CPU structure.
1593 * @param GCPtr The address.
1594 * @param ppPml4e Receives the address of the page map level 4 entry.
1595 * @param ppPdpt Receives the address of the page directory pointer table.
1596 * @param ppPD Receives the address of the page directory.
1597 */
1598DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1599{
1600 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1601 PGM_LOCK_ASSERT_OWNER(pVM);
1602
1603 /*
1604 * PML4
1605 */
1606 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1607 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1608 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1609 if (ppPml4e)
1610 *ppPml4e = (PX86PML4E)pPml4e;
1611 X86PGPAEUINT const uPml4e = pPml4e->u;
1612 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1613 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1614 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1615
1616 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1617 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1618 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1619
1620 /*
1621 * PDPT
1622 */
1623 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1624 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1625 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1626 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1627 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1628
1629 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1630 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1631
1632 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1633 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1634 return VINF_SUCCESS;
1635}
1636
1637
1638/**
1639 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1640 * backing pages in case the PDPT or PML4 entry is missing.
1641 *
1642 * @returns VBox status code.
1643 * @param pVCpu The cross context virtual CPU structure.
1644 * @param GCPtr The address.
1645 * @param ppPdpt Receives address of pdpt
1646 * @param ppPD Receives address of page directory
1647 */
1648static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1649{
1650 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1651 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1652 int rc;
1653
1654 Assert(pVM->pgm.s.fNestedPaging);
1655 PGM_LOCK_ASSERT_OWNER(pVM);
1656
1657 /*
1658 * PML4 level.
1659 */
1660 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1661 Assert(pPml4);
1662
1663 /* Allocate page directory pointer table if not present. */
1664 PPGMPOOLPAGE pShwPage;
1665 {
1666 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1667 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1668 EPTPML4E Pml4e;
1669 Pml4e.u = pPml4e->u;
1670 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1671 {
1672 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1673 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1674 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1675 &pShwPage);
1676 AssertRCReturn(rc, rc);
1677
1678 /* Hook up the new PDPT now. */
1679 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1680 }
1681 else
1682 {
1683 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1684 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1685
1686 pgmPoolCacheUsed(pPool, pShwPage);
1687
1688 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1689 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1690 { }
1691 else
1692 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1693 }
1694 }
1695
1696 /*
1697 * PDPT level.
1698 */
1699 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1700 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1701 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1702
1703 if (ppPdpt)
1704 *ppPdpt = pPdpt;
1705
1706 /* Allocate page directory if not present. */
1707 EPTPDPTE Pdpe;
1708 Pdpe.u = pPdpe->u;
1709 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1710 {
1711 RTGCPTR64 const GCPdPt = GCPtr & ~(RT_BIT_64(EPT_PDPT_SHIFT) - 1);
1712 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1713 pShwPage->idx, iPdPt, false /*fLockPage*/,
1714 &pShwPage);
1715 AssertRCReturn(rc, rc);
1716
1717 /* Hook up the new PD now. */
1718 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1719 }
1720 else
1721 {
1722 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1723 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1724
1725 pgmPoolCacheUsed(pPool, pShwPage);
1726
1727 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1728 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1729 { }
1730 else
1731 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1732 }
1733
1734 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1735 return VINF_SUCCESS;
1736}
1737
1738
1739#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1740/**
1741 * Syncs the SHADOW nested-guest page directory pointer for the specified address.
1742 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1743 *
1744 * @returns VBox status code.
1745 * @param pVCpu The cross context virtual CPU structure.
1746 * @param GCPhysNested The nested-guest physical address.
1747 * @param ppPdpt Where to store the PDPT. Optional, can be NULL.
1748 * @param ppPD Where to store the PD. Optional, can be NULL.
1749 * @param pGstWalkAll The guest walk info.
1750 */
1751static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD,
1752 PPGMPTWALKGST pGstWalkAll)
1753{
1754 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1755 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1756 int rc;
1757
1758 PPGMPOOLPAGE pShwPage;
1759 Assert(pVM->pgm.s.fNestedPaging);
1760 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT);
1761 PGM_LOCK_ASSERT_OWNER(pVM);
1762
1763 /*
1764 * PML4 level.
1765 */
1766 {
1767 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1768 Assert(pPml4);
1769
1770 /* Allocate page directory pointer table if not present. */
1771 {
1772 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pml4e.u & pVCpu->pgm.s.fGstEptShadowedPml4eMask;
1773 const unsigned iPml4e = (GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1774 PEPTPML4E pPml4e = &pPml4->a[iPml4e];
1775
1776 if (!(pPml4e->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK)))
1777 {
1778 RTGCPHYS const GCPhysPdpt = pGstWalkAll->u.Ept.Pml4e.u & EPT_PML4E_PG_MASK;
1779 rc = pgmPoolAlloc(pVM, GCPhysPdpt, PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT, PGMPOOLACCESS_DONTCARE,
1780 PGM_A20_IS_ENABLED(pVCpu), pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4e, false /*fLockPage*/,
1781 &pShwPage);
1782 AssertRCReturn(rc, rc);
1783
1784 /* Hook up the new PDPT now. */
1785 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags);
1786 }
1787 else
1788 {
1789 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1790 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1791
1792 pgmPoolCacheUsed(pPool, pShwPage);
1793
1794 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1795 if (pPml4e->u != (pShwPage->Core.Key | fShwFlags))
1796 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags);
1797 }
1798 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
1799 Log7Func(("GstPml4e=%RX64 ShwPml4e=%RX64 iPml4e=%u\n", pGstWalkAll->u.Ept.Pml4e.u, pPml4e->u, iPml4e));
1800 }
1801 }
1802
1803 /*
1804 * PDPT level.
1805 */
1806 {
1807 AssertReturn(!(pGstWalkAll->u.Ept.Pdpte.u & EPT_E_LEAF), VERR_NOT_SUPPORTED); /* shadowing 1GB pages not supported yet. */
1808
1809 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1810 if (ppPdpt)
1811 *ppPdpt = pPdpt;
1812
1813 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pdpte.u & pVCpu->pgm.s.fGstEptShadowedPdpteMask;
1814 const unsigned iPdPte = (GCPhysNested >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1815 PEPTPDPTE pPdpte = &pPdpt->a[iPdPte];
1816
1817 if (!(pPdpte->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK)))
1818 {
1819 RTGCPHYS const GCPhysPd = pGstWalkAll->u.Ept.Pdpte.u & EPT_PDPTE_PG_MASK;
1820 rc = pgmPoolAlloc(pVM, GCPhysPd, PGMPOOLKIND_EPT_PD_FOR_EPT_PD, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1821 pShwPage->idx, iPdPte, false /*fLockPage*/, &pShwPage);
1822 AssertRCReturn(rc, rc);
1823
1824 /* Hook up the new PD now. */
1825 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags);
1826 }
1827 else
1828 {
1829 pShwPage = pgmPoolGetPage(pPool, pPdpte->u & EPT_PDPTE_PG_MASK);
1830 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1831
1832 pgmPoolCacheUsed(pPool, pShwPage);
1833
1834 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1835 if (pPdpte->u != (pShwPage->Core.Key | fShwFlags))
1836 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags);
1837 }
1838 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
1839 Log7Func(("GstPdpte=%RX64 ShwPdpte=%RX64 iPdPte=%u \n", pGstWalkAll->u.Ept.Pdpte.u, pPdpte->u, iPdPte));
1840
1841 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1842 }
1843
1844 return VINF_SUCCESS;
1845}
1846#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1847
1848
1849#ifdef IN_RING0
1850/**
1851 * Synchronizes a range of nested page table entries.
1852 *
1853 * The caller must own the PGM lock.
1854 *
1855 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1856 * @param GCPhys Where to start.
1857 * @param cPages How many pages which entries should be synced.
1858 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1859 * host paging mode for AMD-V).
1860 */
1861int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1862{
1863 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1864
1865/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1866 int rc;
1867 switch (enmShwPagingMode)
1868 {
1869 case PGMMODE_32_BIT:
1870 {
1871 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1872 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1873 break;
1874 }
1875
1876 case PGMMODE_PAE:
1877 case PGMMODE_PAE_NX:
1878 {
1879 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1880 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1881 break;
1882 }
1883
1884 case PGMMODE_AMD64:
1885 case PGMMODE_AMD64_NX:
1886 {
1887 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1888 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1889 break;
1890 }
1891
1892 case PGMMODE_EPT:
1893 {
1894 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1895 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1896 break;
1897 }
1898
1899 default:
1900 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1901 }
1902 return rc;
1903}
1904#endif /* IN_RING0 */
1905
1906
1907/**
1908 * Gets effective Guest OS page information.
1909 *
1910 * When GCPtr is in a big page, the function will return as if it was a normal
1911 * 4KB page. If the need for distinguishing between big and normal page becomes
1912 * necessary at a later point, a PGMGstGetPage() will be created for that
1913 * purpose.
1914 *
1915 * @returns VBox status code.
1916 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1917 * @param GCPtr Guest Context virtual address of the page.
1918 * @param pWalk Where to store the page walk information.
1919 */
1920VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
1921{
1922 VMCPU_ASSERT_EMT(pVCpu);
1923 Assert(pWalk);
1924 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1925 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1926 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1927 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
1928}
1929
1930
1931/**
1932 * Maps the guest CR3.
1933 *
1934 * @returns VBox status code.
1935 * @param pVCpu The cross context virtual CPU structure.
1936 * @param GCPhysCr3 The guest CR3 value.
1937 * @param pHCPtrGuestCr3 Where to store the mapped memory.
1938 */
1939DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3)
1940{
1941 /** @todo this needs some reworking wrt. locking? */
1942 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1943 PGM_LOCK_VOID(pVM);
1944 PPGMPAGE pPageCr3 = pgmPhysGetPage(pVM, GCPhysCr3);
1945 AssertReturnStmt(pPageCr3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
1946
1947 RTHCPTR HCPtrGuestCr3;
1948 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCr3, GCPhysCr3, (void **)&HCPtrGuestCr3);
1949 PGM_UNLOCK(pVM);
1950
1951 *pHCPtrGuestCr3 = HCPtrGuestCr3;
1952 return rc;
1953}
1954
1955
1956/**
1957 * Unmaps the guest CR3.
1958 *
1959 * @returns VBox status code.
1960 * @param pVCpu The cross context virtual CPU structure.
1961 */
1962DECLINLINE(int) pgmGstUnmapCr3(PVMCPUCC pVCpu)
1963{
1964 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1965 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1966 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
1967 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
1968}
1969
1970
1971/**
1972 * Performs a guest page table walk.
1973 *
1974 * The guest should be in paged protect mode or long mode when making a call to
1975 * this function.
1976 *
1977 * @returns VBox status code.
1978 * @retval VINF_SUCCESS on success.
1979 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1980 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1981 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1982 *
1983 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1984 * @param GCPtr The guest virtual address to walk by.
1985 * @param pWalk Where to return the walk result. This is valid for some
1986 * error codes as well.
1987 * @param pGstWalk The guest mode specific page walk information.
1988 */
1989int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1990{
1991 VMCPU_ASSERT_EMT(pVCpu);
1992 switch (pVCpu->pgm.s.enmGuestMode)
1993 {
1994 case PGMMODE_32_BIT:
1995 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1996 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy);
1997
1998 case PGMMODE_PAE:
1999 case PGMMODE_PAE_NX:
2000 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE;
2001 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae);
2002
2003 case PGMMODE_AMD64:
2004 case PGMMODE_AMD64_NX:
2005 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
2006 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64);
2007
2008 case PGMMODE_REAL:
2009 case PGMMODE_PROTECTED:
2010 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2011 return VERR_PGM_NOT_USED_IN_MODE;
2012
2013 case PGMMODE_EPT:
2014 case PGMMODE_NESTED_32BIT:
2015 case PGMMODE_NESTED_PAE:
2016 case PGMMODE_NESTED_AMD64:
2017 default:
2018 AssertFailed();
2019 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2020 return VERR_PGM_NOT_USED_IN_MODE;
2021 }
2022}
2023
2024#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2025
2026/**
2027 * Performs a guest second-level address translation (SLAT).
2028 *
2029 * The guest paging mode must be 32-bit, PAE or AMD64 when making a call to this
2030 * function.
2031 *
2032 * @returns VBox status code.
2033 * @retval VINF_SUCCESS on success.
2034 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
2035 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2036 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2037 *
2038 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2039 * @param GCPhysNested The nested-guest physical address being translated
2040 * (input).
2041 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is
2042 * valid. This indicates the SLAT is caused when
2043 * translating a nested-guest linear address.
2044 * @param GCPtrNested The nested-guest virtual address that initiated the
2045 * SLAT. If none, pass NIL_RTGCPTR.
2046 * @param pWalk Where to return the walk result. This is valid for
2047 * some error codes as well.
2048 * @param pGstWalk The second-level paging-mode specific walk
2049 * information.
2050 */
2051static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
2052 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
2053{
2054 /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
2055 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
2056 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
2057 switch (pVCpu->pgm.s.enmGuestSlatMode)
2058 {
2059 case PGMSLAT_EPT:
2060 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
2061 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
2062
2063 default:
2064 AssertFailed();
2065 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2066 return VERR_PGM_NOT_USED_IN_MODE;
2067 }
2068}
2069
2070
2071/**
2072 * Performs a guest second-level address translation (SLAT) for a nested-guest
2073 * physical address.
2074 *
2075 * This version requires the SLAT mode to be provided by the caller because we could
2076 * be in the process of switching paging modes (MOV CRX) and cannot presume control
2077 * register values.
2078 *
2079 * @returns VBox status code.
2080 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2081 * @param enmSlatMode The second-level paging mode to use.
2082 * @param GCPhysNested The nested-guest physical address to translate.
2083 * @param pWalk Where to store the walk result.
2084 * @param pGstWalk Where to store the second-level paging-mode specific
2085 * walk information.
2086 */
2087static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
2088 PPGMPTWALKGST pGstWalk)
2089{
2090 AssertPtr(pWalk);
2091 AssertPtr(pGstWalk);
2092 switch (enmSlatMode)
2093 {
2094 case PGMSLAT_EPT:
2095 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
2096 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, 0 /* GCPtrNested */,
2097 pWalk, &pGstWalk->u.Ept);
2098
2099 default:
2100 AssertFailed();
2101 return VERR_PGM_NOT_USED_IN_MODE;
2102 }
2103}
2104
2105#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
2106
2107/**
2108 * Tries to continue the previous walk.
2109 *
2110 * @note Requires the caller to hold the PGM lock from the first
2111 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
2112 * we cannot use the pointers.
2113 *
2114 * @returns VBox status code.
2115 * @retval VINF_SUCCESS on success.
2116 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
2117 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2118 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2119 *
2120 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2121 * @param GCPtr The guest virtual address to walk by.
2122 * @param pWalk Pointer to the previous walk result and where to return
2123 * the result of this walk. This is valid for some error
2124 * codes as well.
2125 * @param pGstWalk The guest-mode specific walk information.
2126 */
2127int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
2128{
2129 /*
2130 * We can only handle successfully walks.
2131 * We also limit ourselves to the next page.
2132 */
2133 if ( pWalk->fSucceeded
2134 && GCPtr - pWalk->GCPtr == GUEST_PAGE_SIZE)
2135 {
2136 Assert(pWalk->uLevel == 0);
2137 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
2138 {
2139 /*
2140 * AMD64
2141 */
2142 if (!pWalk->fGigantPage && !pWalk->fBigPage)
2143 {
2144 /*
2145 * We fall back to full walk if the PDE table changes, if any
2146 * reserved bits are set, or if the effective page access changes.
2147 */
2148 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
2149 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
2150 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
2151 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
2152
2153 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT))
2154 {
2155 if (pGstWalk->u.Amd64.pPte)
2156 {
2157 X86PTEPAE Pte;
2158 Pte.u = pGstWalk->u.Amd64.pPte[1].u;
2159 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2160 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2161 {
2162 pWalk->GCPtr = GCPtr;
2163 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2164 pGstWalk->u.Amd64.Pte.u = Pte.u;
2165 pGstWalk->u.Amd64.pPte++;
2166 return VINF_SUCCESS;
2167 }
2168 }
2169 }
2170 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT))
2171 {
2172 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
2173 if (pGstWalk->u.Amd64.pPde)
2174 {
2175 X86PDEPAE Pde;
2176 Pde.u = pGstWalk->u.Amd64.pPde[1].u;
2177 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame)
2178 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
2179 {
2180 /* Get the new PTE and check out the first entry. */
2181 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
2182 &pGstWalk->u.Amd64.pPt);
2183 if (RT_SUCCESS(rc))
2184 {
2185 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0];
2186 X86PTEPAE Pte;
2187 Pte.u = pGstWalk->u.Amd64.pPte->u;
2188 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2189 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2190 {
2191 pWalk->GCPtr = GCPtr;
2192 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2193 pGstWalk->u.Amd64.Pte.u = Pte.u;
2194 pGstWalk->u.Amd64.Pde.u = Pde.u;
2195 pGstWalk->u.Amd64.pPde++;
2196 return VINF_SUCCESS;
2197 }
2198 }
2199 }
2200 }
2201 }
2202 }
2203 else if (!pWalk->fGigantPage)
2204 {
2205 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK))
2206 {
2207 pWalk->GCPtr = GCPtr;
2208 pWalk->GCPhys += GUEST_PAGE_SIZE;
2209 return VINF_SUCCESS;
2210 }
2211 }
2212 else
2213 {
2214 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK))
2215 {
2216 pWalk->GCPtr = GCPtr;
2217 pWalk->GCPhys += GUEST_PAGE_SIZE;
2218 return VINF_SUCCESS;
2219 }
2220 }
2221 }
2222 }
2223 /* Case we don't handle. Do full walk. */
2224 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk);
2225}
2226
2227
2228/**
2229 * Modify page flags for a range of pages in the guest's tables
2230 *
2231 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2232 *
2233 * @returns VBox status code.
2234 * @param pVCpu The cross context virtual CPU structure.
2235 * @param GCPtr Virtual address of the first page in the range.
2236 * @param cb Size (in bytes) of the range to apply the modification to.
2237 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2238 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2239 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2240 */
2241VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2242{
2243 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2244 VMCPU_ASSERT_EMT(pVCpu);
2245
2246 /*
2247 * Validate input.
2248 */
2249 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2250 Assert(cb);
2251
2252 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2253
2254 /*
2255 * Adjust input.
2256 */
2257 cb += GCPtr & GUEST_PAGE_OFFSET_MASK;
2258 cb = RT_ALIGN_Z(cb, GUEST_PAGE_SIZE);
2259 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
2260
2261 /*
2262 * Call worker.
2263 */
2264 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2265 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2266 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2267 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2268
2269 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2270 return rc;
2271}
2272
2273
2274/**
2275 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2276 *
2277 * @returns @c true if the PDPE is valid, @c false otherwise.
2278 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2279 * @param paPaePdpes The PAE PDPEs to validate.
2280 *
2281 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2282 */
2283VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2284{
2285 Assert(paPaePdpes);
2286 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2287 {
2288 X86PDPE const PaePdpe = paPaePdpes[i];
2289 if ( !(PaePdpe.u & X86_PDPE_P)
2290 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2291 { /* likely */ }
2292 else
2293 return false;
2294 }
2295 return true;
2296}
2297
2298
2299/**
2300 * Performs the lazy mapping of the 32-bit guest PD.
2301 *
2302 * @returns VBox status code.
2303 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2304 * @param ppPd Where to return the pointer to the mapping. This is
2305 * always set.
2306 */
2307int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2308{
2309 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2310 PGM_LOCK_VOID(pVM);
2311
2312 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2313
2314 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2315 PPGMPAGE pPage;
2316 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2317 if (RT_SUCCESS(rc))
2318 {
2319 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2320 if (RT_SUCCESS(rc))
2321 {
2322# ifdef IN_RING3
2323 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2324 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2325# else
2326 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2327 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2328# endif
2329 PGM_UNLOCK(pVM);
2330 return VINF_SUCCESS;
2331 }
2332 AssertRC(rc);
2333 }
2334 PGM_UNLOCK(pVM);
2335
2336 *ppPd = NULL;
2337 return rc;
2338}
2339
2340
2341/**
2342 * Performs the lazy mapping of the PAE guest PDPT.
2343 *
2344 * @returns VBox status code.
2345 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2346 * @param ppPdpt Where to return the pointer to the mapping. This is
2347 * always set.
2348 */
2349int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2350{
2351 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2352 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2353 PGM_LOCK_VOID(pVM);
2354
2355 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2356 PPGMPAGE pPage;
2357 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2358 if (RT_SUCCESS(rc))
2359 {
2360 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2361 if (RT_SUCCESS(rc))
2362 {
2363# ifdef IN_RING3
2364 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2365 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2366# else
2367 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2368 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2369# endif
2370 PGM_UNLOCK(pVM);
2371 return VINF_SUCCESS;
2372 }
2373 AssertRC(rc);
2374 }
2375
2376 PGM_UNLOCK(pVM);
2377 *ppPdpt = NULL;
2378 return rc;
2379}
2380
2381
2382/**
2383 * Performs the lazy mapping / updating of a PAE guest PD.
2384 *
2385 * @returns Pointer to the mapping.
2386 * @returns VBox status code.
2387 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2388 * @param iPdpt Which PD entry to map (0..3).
2389 * @param ppPd Where to return the pointer to the mapping. This is
2390 * always set.
2391 */
2392int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2393{
2394 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2395 PGM_LOCK_VOID(pVM);
2396
2397 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2398 Assert(pGuestPDPT);
2399 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2400 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2401 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2402
2403 PPGMPAGE pPage;
2404 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2405 if (RT_SUCCESS(rc))
2406 {
2407 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2408 AssertRC(rc);
2409 if (RT_SUCCESS(rc))
2410 {
2411# ifdef IN_RING3
2412 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2413 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2414# else
2415 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2416 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2417# endif
2418 if (fChanged)
2419 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2420 PGM_UNLOCK(pVM);
2421 return VINF_SUCCESS;
2422 }
2423 }
2424
2425 /* Invalid page or some failure, invalidate the entry. */
2426 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2427 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2428 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2429
2430 PGM_UNLOCK(pVM);
2431 return rc;
2432}
2433
2434
2435/**
2436 * Performs the lazy mapping of the 32-bit guest PD.
2437 *
2438 * @returns VBox status code.
2439 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2440 * @param ppPml4 Where to return the pointer to the mapping. This will
2441 * always be set.
2442 */
2443int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2444{
2445 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2446 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2447 PGM_LOCK_VOID(pVM);
2448
2449 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2450 PPGMPAGE pPage;
2451 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2452 if (RT_SUCCESS(rc))
2453 {
2454 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2455 if (RT_SUCCESS(rc))
2456 {
2457# ifdef IN_RING3
2458 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2459 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2460# else
2461 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2462 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2463# endif
2464 PGM_UNLOCK(pVM);
2465 return VINF_SUCCESS;
2466 }
2467 }
2468
2469 PGM_UNLOCK(pVM);
2470 *ppPml4 = NULL;
2471 return rc;
2472}
2473
2474
2475#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2476 /**
2477 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
2478 *
2479 * @returns VBox status code.
2480 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2481 * @param ppEptPml4 Where to return the pointer to the mapping. This will
2482 * always be set.
2483 */
2484int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
2485{
2486 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
2487 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2488 PGM_LOCK_VOID(pVM);
2489
2490 RTGCPHYS const GCPhysEpt = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
2491 PPGMPAGE pPage;
2492 int rc = pgmPhysGetPageEx(pVM, GCPhysEpt, &pPage);
2493 if (RT_SUCCESS(rc))
2494 {
2495 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysEpt, (void **)ppEptPml4);
2496 if (RT_SUCCESS(rc))
2497 {
2498# ifdef IN_RING3
2499 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
2500 pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
2501# else
2502 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
2503 pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
2504# endif
2505 PGM_UNLOCK(pVM);
2506 return VINF_SUCCESS;
2507 }
2508 }
2509
2510 PGM_UNLOCK(pVM);
2511 *ppEptPml4 = NULL;
2512 return rc;
2513}
2514#endif
2515
2516
2517/**
2518 * Gets the current CR3 register value for the shadow memory context.
2519 * @returns CR3 value.
2520 * @param pVCpu The cross context virtual CPU structure.
2521 */
2522VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2523{
2524 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2525 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2526 return pPoolPage->Core.Key;
2527}
2528
2529
2530/**
2531 * Forces lazy remapping of the guest's PAE page-directory structures.
2532 *
2533 * @param pVCpu The cross context virtual CPU structure.
2534 */
2535static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2536{
2537 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2538 {
2539 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2540 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2541 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2542 }
2543}
2544
2545
2546#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2547/**
2548 * Performs second-level address translation for the given CR3 and updates the
2549 * nested-guest CR3 when successful.
2550 *
2551 * @returns VBox status code.
2552 * @param pVCpu The cross context virtual CPU structure.
2553 * @param uCr3 The masked nested-guest CR3 value.
2554 * @param pGCPhysCR3 Where to store the translated CR3.
2555 *
2556 * @warning This updates PGMCPU::GCPhysNstGstCR3 when the translation succeeds. Be
2557 * mindful of this in code that's hyper sensitive to the order of
2558 * operations.
2559 */
2560static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3)
2561{
2562 if (uCr3 != pVCpu->pgm.s.GCPhysNstGstCR3)
2563 {
2564 PGMPTWALK Walk;
2565 PGMPTWALKGST GstWalk;
2566 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, uCr3, &Walk, &GstWalk);
2567 if (RT_SUCCESS(rc))
2568 {
2569 /* Update nested-guest CR3. */
2570 pVCpu->pgm.s.GCPhysNstGstCR3 = uCr3;
2571
2572 /* Pass back the translated result. */
2573 *pGCPhysCr3 = Walk.GCPhys;
2574 return VINF_SUCCESS;
2575 }
2576
2577 /* Translation failed. */
2578 *pGCPhysCr3 = NIL_RTGCPHYS;
2579 return rc;
2580 }
2581
2582 /*
2583 * If the nested-guest CR3 has not changed, then the previously
2584 * translated CR3 result (i.e. GCPhysCR3) is passed back.
2585 */
2586 *pGCPhysCr3 = pVCpu->pgm.s.GCPhysCR3;
2587 return VINF_SUCCESS;
2588}
2589#endif
2590
2591
2592/**
2593 * Performs and schedules necessary updates following a CR3 load or reload.
2594 *
2595 * This will normally involve mapping the guest PD or nPDPT
2596 *
2597 * @returns VBox status code.
2598 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2599 * safely be ignored and overridden since the FF will be set too then.
2600 * @param pVCpu The cross context virtual CPU structure.
2601 * @param cr3 The new cr3.
2602 * @param fGlobal Indicates whether this is a global flush or not.
2603 */
2604VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal)
2605{
2606 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2607 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2608
2609 VMCPU_ASSERT_EMT(pVCpu);
2610
2611 /*
2612 * Always flag the necessary updates; necessary for hardware acceleration
2613 */
2614 /** @todo optimize this, it shouldn't always be necessary. */
2615 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2616 if (fGlobal)
2617 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2618
2619 /*
2620 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2621 */
2622 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2623 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2624#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2625 if ( pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT
2626 && PGMMODE_WITH_PAGING(pVCpu->pgm.s.enmGuestMode))
2627 {
2628 RTGCPHYS GCPhysOut;
2629 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2630 if (RT_SUCCESS(rc))
2631 GCPhysCR3 = GCPhysOut;
2632 else
2633 {
2634 /* CR3 SLAT translation failed but we try to pretend it
2635 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2636 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2637 int const rc2 = pgmGstUnmapCr3(pVCpu);
2638 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2639 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2640 return rc2;
2641 }
2642 }
2643#endif
2644
2645 LogFlowFunc(("cr3=%RX64 old=%RX64 fGlobal=%d\n", cr3, GCPhysOldCR3, fGlobal));
2646 int rc = VINF_SUCCESS;
2647 if (GCPhysOldCR3 != GCPhysCR3)
2648 {
2649 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2650 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2651 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2652
2653 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2654 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2655 if (RT_LIKELY(rc == VINF_SUCCESS))
2656 { }
2657 else
2658 {
2659 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2660 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2661 pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped) = false;
2662 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
2663 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2664 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2665 }
2666
2667 if (fGlobal)
2668 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2669 else
2670 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2671 }
2672 else
2673 {
2674#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2675 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2676 if (pPool->cDirtyPages)
2677 {
2678 PGM_LOCK_VOID(pVM);
2679 pgmPoolResetDirtyPages(pVM);
2680 PGM_UNLOCK(pVM);
2681 }
2682#endif
2683 if (fGlobal)
2684 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2685 else
2686 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2687
2688 /*
2689 * Flush PAE PDPTEs.
2690 */
2691 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2692 pgmGstFlushPaePdpes(pVCpu);
2693 }
2694
2695 IEMTlbInvalidateAll(pVCpu);
2696 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2697 return rc;
2698}
2699
2700
2701/**
2702 * Performs and schedules necessary updates following a CR3 load or reload when
2703 * using nested or extended paging.
2704 *
2705 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2706 * TLB and triggering a SyncCR3.
2707 *
2708 * This will normally involve mapping the guest PD or nPDPT
2709 *
2710 * @returns VBox status code.
2711 * @retval VINF_SUCCESS.
2712 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2713 * paging modes). This can safely be ignored and overridden since the
2714 * FF will be set too then.
2715 * @param pVCpu The cross context virtual CPU structure.
2716 * @param cr3 The new CR3.
2717 */
2718VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3)
2719{
2720 VMCPU_ASSERT_EMT(pVCpu);
2721
2722 /* We assume we're only called in nested paging mode. */
2723 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2724
2725 /*
2726 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2727 */
2728 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2729 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2730#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2731 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2732 {
2733 RTGCPHYS GCPhysOut;
2734 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2735 if (RT_SUCCESS(rc))
2736 GCPhysCR3 = GCPhysOut;
2737 else
2738 {
2739 /* CR3 SLAT translation failed but we try to pretend it
2740 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2741 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2742 int const rc2 = pgmGstUnmapCr3(pVCpu);
2743 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2744 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2745 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2746 return rc2;
2747 }
2748 }
2749#endif
2750
2751 LogFlowFunc(("cr3=%RX64 old=%RX64\n", cr3, GCPhysOldCR3));
2752 int rc = VINF_SUCCESS;
2753 if (GCPhysOldCR3 != GCPhysCR3)
2754 {
2755 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2756 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2757 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2758
2759 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2760 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2761
2762 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2763 }
2764 /*
2765 * Flush PAE PDPTEs.
2766 */
2767 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2768 pgmGstFlushPaePdpes(pVCpu);
2769
2770 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2771 return rc;
2772}
2773
2774
2775/**
2776 * Synchronize the paging structures.
2777 *
2778 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2779 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2780 * in several places, most importantly whenever the CR3 is loaded.
2781 *
2782 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2783 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2784 * the VMM into guest context.
2785 * @param pVCpu The cross context virtual CPU structure.
2786 * @param cr0 Guest context CR0 register
2787 * @param cr3 Guest context CR3 register
2788 * @param cr4 Guest context CR4 register
2789 * @param fGlobal Including global page directories or not
2790 */
2791VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2792{
2793 int rc;
2794
2795 VMCPU_ASSERT_EMT(pVCpu);
2796
2797 /*
2798 * The pool may have pending stuff and even require a return to ring-3 to
2799 * clear the whole thing.
2800 */
2801 rc = pgmPoolSyncCR3(pVCpu);
2802 if (rc != VINF_SUCCESS)
2803 return rc;
2804
2805 /*
2806 * We might be called when we shouldn't.
2807 *
2808 * The mode switching will ensure that the PD is resynced after every mode
2809 * switch. So, if we find ourselves here when in protected or real mode
2810 * we can safely clear the FF and return immediately.
2811 */
2812 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2813 {
2814 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2815 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2816 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2817 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2818 return VINF_SUCCESS;
2819 }
2820
2821 /* If global pages are not supported, then all flushes are global. */
2822 if (!(cr4 & X86_CR4_PGE))
2823 fGlobal = true;
2824 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2825 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2826
2827 /*
2828 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2829 * This should be done before SyncCR3.
2830 */
2831 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2832 {
2833 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2834
2835 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2836 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2837#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2838 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2839 {
2840 RTGCPHYS GCPhysOut;
2841 int rc2 = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2842 if (RT_SUCCESS(rc2))
2843 GCPhysCR3 = GCPhysOut;
2844 else
2845 {
2846 /* CR3 SLAT translation failed but we try to pretend it
2847 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2848 AssertMsgFailed(("Failed to translate CR3 %#RX64. rc=%Rrc\n", cr3, rc2));
2849 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2850 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2851 return rc2;
2852 }
2853 }
2854#endif
2855 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
2856 if (GCPhysOldCR3 != GCPhysCR3)
2857 {
2858 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2859 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2860 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2861 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2862 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2863 }
2864
2865 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2866 if ( rc == VINF_PGM_SYNC_CR3
2867 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2868 {
2869 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2870#ifdef IN_RING3
2871 rc = pgmPoolSyncCR3(pVCpu);
2872#else
2873 if (rc == VINF_PGM_SYNC_CR3)
2874 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2875 return VINF_PGM_SYNC_CR3;
2876#endif
2877 }
2878 AssertRCReturn(rc, rc);
2879 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2880 }
2881
2882 /*
2883 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2884 */
2885 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2886
2887 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2888 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2889 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2890 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2891
2892 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2893 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2894 if (rc == VINF_SUCCESS)
2895 {
2896 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2897 {
2898 /* Go back to ring 3 if a pgm pool sync is again pending. */
2899 return VINF_PGM_SYNC_CR3;
2900 }
2901
2902 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2903 {
2904 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2905 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2906 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2907 }
2908 }
2909
2910 /*
2911 * Now flush the CR3 (guest context).
2912 */
2913 if (rc == VINF_SUCCESS)
2914 PGM_INVL_VCPU_TLBS(pVCpu);
2915 return rc;
2916}
2917
2918
2919/**
2920 * Maps all the PAE PDPE entries.
2921 *
2922 * @returns VBox status code.
2923 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2924 * @param paPaePdpes The new PAE PDPE values.
2925 *
2926 * @remarks This function may be invoked during the process of changing the guest
2927 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
2928 * reflect PAE paging just yet.
2929 */
2930VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2931{
2932 Assert(paPaePdpes);
2933 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2934 {
2935 X86PDPE const PaePdpe = paPaePdpes[i];
2936
2937 /*
2938 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
2939 * are deferred.[1] Also, different situations require different handling of invalid
2940 * PDPE entries. Here we assume the caller has already validated or doesn't require
2941 * validation of the PDPEs.
2942 *
2943 * In the case of nested EPT (i.e. for nested-guests), the PAE PDPEs have been
2944 * validated by the VMX transition.
2945 *
2946 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
2947 */
2948 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
2949 {
2950 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2951 RTHCPTR HCPtr;
2952
2953 RTGCPHYS GCPhys;
2954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2955 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2956 {
2957 PGMPTWALK Walk;
2958 PGMPTWALKGST GstWalk;
2959 RTGCPHYS const GCPhysNested = PaePdpe.u & X86_PDPE_PG_MASK;
2960 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysNested, &Walk, &GstWalk);
2961 if (RT_SUCCESS(rc))
2962 GCPhys = Walk.GCPhys;
2963 else
2964 {
2965 /*
2966 * Second-level address translation of the PAE PDPE has failed but we must -NOT-
2967 * abort and return a failure now. This is because we're called from a Mov CRx
2968 * instruction (or similar operation). Let's just pretend success but flag that
2969 * we need to map this PDPE lazily later.
2970 *
2971 * See Intel spec. 25.3 "Changes to instruction behavior in VMX non-root operation".
2972 * See Intel spec. 28.3.1 "EPT Overview".
2973 */
2974 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2975 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2976 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2977 continue;
2978 }
2979 }
2980 else
2981#endif
2982 {
2983 GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
2984 }
2985
2986 PGM_LOCK_VOID(pVM);
2987 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2988 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
2989 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
2990 PGM_UNLOCK(pVM);
2991 if (RT_SUCCESS(rc))
2992 {
2993#ifdef IN_RING3
2994 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
2995 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
2996#else
2997 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
2998 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
2999#endif
3000 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
3001 continue;
3002 }
3003 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
3004 }
3005 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
3006 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
3007 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
3008 }
3009 return VINF_SUCCESS;
3010}
3011
3012
3013/**
3014 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
3015 *
3016 * @returns VBox status code.
3017 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3018 * @param cr3 The guest CR3 value.
3019 *
3020 * @remarks This function may be invoked during the process of changing the guest
3021 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
3022 * PAE paging just yet.
3023 */
3024VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
3025{
3026 /*
3027 * Read the page-directory-pointer table (PDPT) at CR3.
3028 */
3029 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
3030 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
3031
3032#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3033 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
3034 {
3035 RTGCPHYS GCPhysOut;
3036 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
3037 if (RT_SUCCESS(rc))
3038 GCPhysCR3 = GCPhysOut;
3039 else
3040 {
3041 Log(("Failed to load CR3 at %#RX64. rc=%Rrc\n", GCPhysCR3, rc));
3042 return rc;
3043 }
3044 }
3045#endif
3046
3047 RTHCPTR HCPtrGuestCr3;
3048 int rc = pgmGstMapCr3(pVCpu, GCPhysCR3, &HCPtrGuestCr3);
3049 if (RT_SUCCESS(rc))
3050 {
3051 /*
3052 * Validate the page-directory-pointer table entries (PDPE).
3053 */
3054 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
3055 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
3056 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
3057 {
3058 /*
3059 * Map the PDPT.
3060 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
3061 * that PGMFlushTLB will be called soon and only a change to CR3 then
3062 * will cause the shadow page tables to be updated.
3063 */
3064#ifdef IN_RING3
3065 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
3066 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
3067#else
3068 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
3069 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
3070#endif
3071
3072 /*
3073 * Update CPUM and map the 4 PAE PDPEs.
3074 */
3075 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
3076 rc = PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
3077 if (RT_SUCCESS(rc))
3078 {
3079#ifdef IN_RING3
3080 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = true;
3081 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = false;
3082#else
3083 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = false;
3084 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = true;
3085#endif
3086 pVCpu->pgm.s.GCPhysPaeCR3 = GCPhysCR3;
3087 }
3088 }
3089 else
3090 rc = VERR_PGM_PAE_PDPE_RSVD;
3091 }
3092 return rc;
3093}
3094
3095
3096/**
3097 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
3098 *
3099 * @returns VBox status code, with the following informational code for
3100 * VM scheduling.
3101 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
3102 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
3103 *
3104 * @param pVCpu The cross context virtual CPU structure.
3105 * @param cr0 The new cr0.
3106 * @param cr4 The new cr4.
3107 * @param efer The new extended feature enable register.
3108 * @param fForce Whether to force a mode change.
3109 */
3110VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce)
3111{
3112 VMCPU_ASSERT_EMT(pVCpu);
3113
3114 /*
3115 * Calc the new guest mode.
3116 *
3117 * Note! We check PG before PE and without requiring PE because of the
3118 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
3119 */
3120 PGMMODE enmGuestMode;
3121 if (cr0 & X86_CR0_PG)
3122 {
3123 if (!(cr4 & X86_CR4_PAE))
3124 {
3125 bool const fPse = !!(cr4 & X86_CR4_PSE);
3126 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
3127 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
3128 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
3129 enmGuestMode = PGMMODE_32_BIT;
3130 }
3131 else if (!(efer & MSR_K6_EFER_LME))
3132 {
3133 if (!(efer & MSR_K6_EFER_NXE))
3134 enmGuestMode = PGMMODE_PAE;
3135 else
3136 enmGuestMode = PGMMODE_PAE_NX;
3137 }
3138 else
3139 {
3140 if (!(efer & MSR_K6_EFER_NXE))
3141 enmGuestMode = PGMMODE_AMD64;
3142 else
3143 enmGuestMode = PGMMODE_AMD64_NX;
3144 }
3145 }
3146 else if (!(cr0 & X86_CR0_PE))
3147 enmGuestMode = PGMMODE_REAL;
3148 else
3149 enmGuestMode = PGMMODE_PROTECTED;
3150
3151 /*
3152 * Did it change?
3153 */
3154 if ( !fForce
3155 && pVCpu->pgm.s.enmGuestMode == enmGuestMode)
3156 return VINF_SUCCESS;
3157
3158 /* Flush the TLB */
3159 PGM_INVL_VCPU_TLBS(pVCpu);
3160 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode, fForce);
3161}
3162
3163
3164/**
3165 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
3166 *
3167 * @returns PGM_TYPE_*.
3168 * @param pgmMode The mode value to convert.
3169 */
3170DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
3171{
3172 switch (pgmMode)
3173 {
3174 case PGMMODE_REAL: return PGM_TYPE_REAL;
3175 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
3176 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
3177 case PGMMODE_PAE:
3178 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
3179 case PGMMODE_AMD64:
3180 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
3181 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
3182 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
3183 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
3184 case PGMMODE_EPT: return PGM_TYPE_EPT;
3185 case PGMMODE_NONE: return PGM_TYPE_NONE;
3186 default:
3187 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
3188 }
3189}
3190
3191
3192/**
3193 * Calculates the shadow paging mode.
3194 *
3195 * @returns The shadow paging mode.
3196 * @param pVM The cross context VM structure.
3197 * @param enmGuestMode The guest mode.
3198 * @param enmHostMode The host mode.
3199 * @param enmShadowMode The current shadow mode.
3200 */
3201static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
3202{
3203 switch (enmGuestMode)
3204 {
3205 case PGMMODE_REAL:
3206 case PGMMODE_PROTECTED:
3207 switch (enmHostMode)
3208 {
3209 case SUPPAGINGMODE_32_BIT:
3210 case SUPPAGINGMODE_32_BIT_GLOBAL:
3211 enmShadowMode = PGMMODE_32_BIT;
3212 break;
3213
3214 case SUPPAGINGMODE_PAE:
3215 case SUPPAGINGMODE_PAE_NX:
3216 case SUPPAGINGMODE_PAE_GLOBAL:
3217 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3218 enmShadowMode = PGMMODE_PAE;
3219 break;
3220
3221 case SUPPAGINGMODE_AMD64:
3222 case SUPPAGINGMODE_AMD64_GLOBAL:
3223 case SUPPAGINGMODE_AMD64_NX:
3224 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3225 enmShadowMode = PGMMODE_PAE;
3226 break;
3227
3228 default:
3229 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3230 }
3231 break;
3232
3233 case PGMMODE_32_BIT:
3234 switch (enmHostMode)
3235 {
3236 case SUPPAGINGMODE_32_BIT:
3237 case SUPPAGINGMODE_32_BIT_GLOBAL:
3238 enmShadowMode = PGMMODE_32_BIT;
3239 break;
3240
3241 case SUPPAGINGMODE_PAE:
3242 case SUPPAGINGMODE_PAE_NX:
3243 case SUPPAGINGMODE_PAE_GLOBAL:
3244 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3245 enmShadowMode = PGMMODE_PAE;
3246 break;
3247
3248 case SUPPAGINGMODE_AMD64:
3249 case SUPPAGINGMODE_AMD64_GLOBAL:
3250 case SUPPAGINGMODE_AMD64_NX:
3251 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3252 enmShadowMode = PGMMODE_PAE;
3253 break;
3254
3255 default:
3256 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3257 }
3258 break;
3259
3260 case PGMMODE_PAE:
3261 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3262 switch (enmHostMode)
3263 {
3264 case SUPPAGINGMODE_32_BIT:
3265 case SUPPAGINGMODE_32_BIT_GLOBAL:
3266 enmShadowMode = PGMMODE_PAE;
3267 break;
3268
3269 case SUPPAGINGMODE_PAE:
3270 case SUPPAGINGMODE_PAE_NX:
3271 case SUPPAGINGMODE_PAE_GLOBAL:
3272 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3273 enmShadowMode = PGMMODE_PAE;
3274 break;
3275
3276 case SUPPAGINGMODE_AMD64:
3277 case SUPPAGINGMODE_AMD64_GLOBAL:
3278 case SUPPAGINGMODE_AMD64_NX:
3279 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3280 enmShadowMode = PGMMODE_PAE;
3281 break;
3282
3283 default:
3284 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3285 }
3286 break;
3287
3288 case PGMMODE_AMD64:
3289 case PGMMODE_AMD64_NX:
3290 switch (enmHostMode)
3291 {
3292 case SUPPAGINGMODE_32_BIT:
3293 case SUPPAGINGMODE_32_BIT_GLOBAL:
3294 enmShadowMode = PGMMODE_AMD64;
3295 break;
3296
3297 case SUPPAGINGMODE_PAE:
3298 case SUPPAGINGMODE_PAE_NX:
3299 case SUPPAGINGMODE_PAE_GLOBAL:
3300 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3301 enmShadowMode = PGMMODE_AMD64;
3302 break;
3303
3304 case SUPPAGINGMODE_AMD64:
3305 case SUPPAGINGMODE_AMD64_GLOBAL:
3306 case SUPPAGINGMODE_AMD64_NX:
3307 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3308 enmShadowMode = PGMMODE_AMD64;
3309 break;
3310
3311 default:
3312 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3313 }
3314 break;
3315
3316 default:
3317 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3318 }
3319
3320 /*
3321 * Override the shadow mode when NEM, IEM or nested paging is active.
3322 */
3323 if (!VM_IS_HM_ENABLED(pVM))
3324 {
3325 Assert(VM_IS_NEM_ENABLED(pVM) || VM_IS_EXEC_ENGINE_IEM(pVM));
3326 pVM->pgm.s.fNestedPaging = true;
3327 enmShadowMode = PGMMODE_NONE;
3328 }
3329 else
3330 {
3331 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3332 pVM->pgm.s.fNestedPaging = fNestedPaging;
3333 if (fNestedPaging)
3334 {
3335 if (HMIsVmxActive(pVM))
3336 enmShadowMode = PGMMODE_EPT;
3337 else
3338 {
3339 /* The nested SVM paging depends on the host one. */
3340 Assert(HMIsSvmActive(pVM));
3341 if ( enmGuestMode == PGMMODE_AMD64
3342 || enmGuestMode == PGMMODE_AMD64_NX)
3343 enmShadowMode = PGMMODE_NESTED_AMD64;
3344 else
3345 switch (pVM->pgm.s.enmHostMode)
3346 {
3347 case SUPPAGINGMODE_32_BIT:
3348 case SUPPAGINGMODE_32_BIT_GLOBAL:
3349 enmShadowMode = PGMMODE_NESTED_32BIT;
3350 break;
3351
3352 case SUPPAGINGMODE_PAE:
3353 case SUPPAGINGMODE_PAE_GLOBAL:
3354 case SUPPAGINGMODE_PAE_NX:
3355 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3356 enmShadowMode = PGMMODE_NESTED_PAE;
3357 break;
3358
3359 case SUPPAGINGMODE_AMD64:
3360 case SUPPAGINGMODE_AMD64_GLOBAL:
3361 case SUPPAGINGMODE_AMD64_NX:
3362 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3363 enmShadowMode = PGMMODE_NESTED_AMD64;
3364 break;
3365
3366 default:
3367 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3368 }
3369 }
3370 }
3371#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3372 else
3373 {
3374 /* Nested paging is a requirement for nested VT-x. */
3375 AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3376 }
3377#endif
3378 }
3379
3380 return enmShadowMode;
3381}
3382
3383
3384/**
3385 * Performs the actual mode change.
3386 * This is called by PGMChangeMode and pgmR3InitPaging().
3387 *
3388 * @returns VBox status code. May suspend or power off the VM on error, but this
3389 * will trigger using FFs and not informational status codes.
3390 *
3391 * @param pVM The cross context VM structure.
3392 * @param pVCpu The cross context virtual CPU structure.
3393 * @param enmGuestMode The new guest mode. This is assumed to be different from
3394 * the current mode.
3395 * @param fForce Whether to force a shadow paging mode change.
3396 */
3397VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce)
3398{
3399 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3400 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3401
3402 /*
3403 * Calc the shadow mode and switcher.
3404 */
3405 PGMMODE const enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3406 bool const fShadowModeChanged = enmShadowMode != pVCpu->pgm.s.enmShadowMode || fForce;
3407
3408 /*
3409 * Exit old mode(s).
3410 */
3411 /* shadow */
3412 if (fShadowModeChanged)
3413 {
3414 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3415 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3416 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3417 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3418 {
3419 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3420 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3421 }
3422 }
3423 else
3424 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3425
3426 /* guest */
3427 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3428 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3429 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3430 {
3431 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3432 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3433 }
3434 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3435 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
3436 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
3437 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
3438
3439 /*
3440 * Change the paging mode data indexes.
3441 */
3442 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3443 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3444 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3445 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3446 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3447 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3448 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3449#ifdef IN_RING3
3450 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3451#endif
3452
3453 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3454 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3455 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3456 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3457 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3458 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3459 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3460#ifdef IN_RING3
3461 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3462#endif
3463
3464 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3465 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3466 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3467 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3468 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3469 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3470 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3471 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3472 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3473 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3474#ifdef VBOX_STRICT
3475 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3476#endif
3477
3478 /*
3479 * Determine SLAT mode -before- entering the new shadow mode!
3480 */
3481 pVCpu->pgm.s.enmGuestSlatMode = !CPUMIsGuestVmxEptPagingEnabled(pVCpu) ? PGMSLAT_DIRECT : PGMSLAT_EPT;
3482
3483 /*
3484 * Enter new shadow mode (if changed).
3485 */
3486 if (fShadowModeChanged)
3487 {
3488 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3489 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu);
3490 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3491 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3492 }
3493
3494 /*
3495 * Always flag the necessary updates
3496 */
3497 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3498
3499 /*
3500 * Enter the new guest and shadow+guest modes.
3501 */
3502 /* Calc the new CR3 value. */
3503 RTGCPHYS GCPhysCR3;
3504 switch (enmGuestMode)
3505 {
3506 case PGMMODE_REAL:
3507 case PGMMODE_PROTECTED:
3508 GCPhysCR3 = NIL_RTGCPHYS;
3509 break;
3510
3511 case PGMMODE_32_BIT:
3512 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3513 break;
3514
3515 case PGMMODE_PAE_NX:
3516 case PGMMODE_PAE:
3517 if (!pVM->cpum.ro.GuestFeatures.fPae)
3518#ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3519 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3520 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3521#else
3522 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3523
3524#endif
3525 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3526 break;
3527
3528#ifdef VBOX_WITH_64_BITS_GUESTS
3529 case PGMMODE_AMD64_NX:
3530 case PGMMODE_AMD64:
3531 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3532 break;
3533#endif
3534 default:
3535 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3536 }
3537
3538#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3539 /*
3540 * If a nested-guest is using EPT paging:
3541 * - Update the second-level address translation (SLAT) mode.
3542 * - Indicate that the CR3 is nested-guest physical address.
3543 */
3544 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
3545 {
3546 if (PGMMODE_WITH_PAGING(enmGuestMode))
3547 {
3548 /*
3549 * Translate CR3 to its guest-physical address.
3550 * We don't use pgmGstSlatTranslateCr3() here as we want to update GCPhysNstGstCR3 -after-
3551 * switching modes to keep it consistent with how GCPhysCR3 is updated.
3552 */
3553 PGMPTWALK Walk;
3554 PGMPTWALKGST GstWalk;
3555 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysCR3, &Walk, &GstWalk);
3556 if (RT_SUCCESS(rc))
3557 { /* likely */ }
3558 else
3559 {
3560 /*
3561 * SLAT failed but we avoid reporting this to the caller because the caller
3562 * is not supposed to fail. The only time the caller needs to indicate a
3563 * failure to software is when PAE paging is used by the nested-guest, but
3564 * we handle the PAE case separately (e.g., see VMX transition in IEM).
3565 * In all other cases, the failure will be indicated when CR3 tries to be
3566 * translated on the next linear-address memory access.
3567 * See Intel spec. 27.2.1 "EPT Overview".
3568 */
3569 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));
3570
3571 /* Trying to coax PGM to succeed for the time being... */
3572 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
3573 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3574 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3575 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3576 return VINF_SUCCESS;
3577 }
3578 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3579 GCPhysCR3 = Walk.GCPhys & X86_CR3_EPT_PAGE_MASK;
3580 }
3581 }
3582 else
3583 Assert(pVCpu->pgm.s.GCPhysNstGstCR3 == NIL_RTGCPHYS);
3584#endif
3585
3586 /*
3587 * Enter the new guest mode.
3588 */
3589 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3590 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3591 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3592
3593 /* Set the new guest CR3 (and nested-guest CR3). */
3594 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3595
3596 /* status codes. */
3597 AssertRC(rc);
3598 AssertRC(rc2);
3599 if (RT_SUCCESS(rc))
3600 {
3601 rc = rc2;
3602 if (RT_SUCCESS(rc)) /* no informational status codes. */
3603 rc = VINF_SUCCESS;
3604 }
3605
3606 /*
3607 * Notify HM.
3608 */
3609 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3610 return rc;
3611}
3612
3613
3614/**
3615 * Called by CPUM or REM when CR0.WP changes to 1.
3616 *
3617 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3618 * @thread EMT
3619 */
3620VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3621{
3622 /*
3623 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3624 *
3625 * Use the counter to judge whether there might be pool pages with active
3626 * hacks in them. If there are, we will be running the risk of messing up
3627 * the guest by allowing it to write to read-only pages. Thus, we have to
3628 * clear the page pool ASAP if there is the slightest chance.
3629 */
3630 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3631 {
3632 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3633
3634 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3635 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3636 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3637 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3638 }
3639}
3640
3641
3642/**
3643 * Gets the current guest paging mode.
3644 *
3645 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3646 *
3647 * @returns The current paging mode.
3648 * @param pVCpu The cross context virtual CPU structure.
3649 */
3650VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3651{
3652 return pVCpu->pgm.s.enmGuestMode;
3653}
3654
3655
3656/**
3657 * Gets the current shadow paging mode.
3658 *
3659 * @returns The current paging mode.
3660 * @param pVCpu The cross context virtual CPU structure.
3661 */
3662VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3663{
3664 return pVCpu->pgm.s.enmShadowMode;
3665}
3666
3667
3668/**
3669 * Gets the current host paging mode.
3670 *
3671 * @returns The current paging mode.
3672 * @param pVM The cross context VM structure.
3673 */
3674VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3675{
3676 switch (pVM->pgm.s.enmHostMode)
3677 {
3678 case SUPPAGINGMODE_32_BIT:
3679 case SUPPAGINGMODE_32_BIT_GLOBAL:
3680 return PGMMODE_32_BIT;
3681
3682 case SUPPAGINGMODE_PAE:
3683 case SUPPAGINGMODE_PAE_GLOBAL:
3684 return PGMMODE_PAE;
3685
3686 case SUPPAGINGMODE_PAE_NX:
3687 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3688 return PGMMODE_PAE_NX;
3689
3690 case SUPPAGINGMODE_AMD64:
3691 case SUPPAGINGMODE_AMD64_GLOBAL:
3692 return PGMMODE_AMD64;
3693
3694 case SUPPAGINGMODE_AMD64_NX:
3695 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3696 return PGMMODE_AMD64_NX;
3697
3698 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3699 }
3700
3701 return PGMMODE_INVALID;
3702}
3703
3704
3705/**
3706 * Get mode name.
3707 *
3708 * @returns read-only name string.
3709 * @param enmMode The mode which name is desired.
3710 */
3711VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3712{
3713 switch (enmMode)
3714 {
3715 case PGMMODE_REAL: return "Real";
3716 case PGMMODE_PROTECTED: return "Protected";
3717 case PGMMODE_32_BIT: return "32-bit";
3718 case PGMMODE_PAE: return "PAE";
3719 case PGMMODE_PAE_NX: return "PAE+NX";
3720 case PGMMODE_AMD64: return "AMD64";
3721 case PGMMODE_AMD64_NX: return "AMD64+NX";
3722 case PGMMODE_NESTED_32BIT: return "Nested-32";
3723 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3724 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3725 case PGMMODE_EPT: return "EPT";
3726 case PGMMODE_NONE: return "None";
3727 default: return "unknown mode value";
3728 }
3729}
3730
3731
3732#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3733/**
3734 * Gets the SLAT mode name.
3735 *
3736 * @returns The read-only SLAT mode descriptive string.
3737 * @param enmSlatMode The SLAT mode value.
3738 */
3739VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
3740{
3741 switch (enmSlatMode)
3742 {
3743 case PGMSLAT_DIRECT: return "Direct";
3744 case PGMSLAT_EPT: return "EPT";
3745 case PGMSLAT_32BIT: return "32-bit";
3746 case PGMSLAT_PAE: return "PAE";
3747 case PGMSLAT_AMD64: return "AMD64";
3748 default: return "Unknown";
3749 }
3750}
3751#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
3752
3753
3754/**
3755 * Gets the physical address represented in the guest CR3 as PGM sees it.
3756 *
3757 * This is mainly for logging and debugging.
3758 *
3759 * @returns PGM's guest CR3 value.
3760 * @param pVCpu The cross context virtual CPU structure.
3761 */
3762VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3763{
3764 return pVCpu->pgm.s.GCPhysCR3;
3765}
3766
3767
3768
3769/**
3770 * Notification from CPUM that the EFER.NXE bit has changed.
3771 *
3772 * @param pVCpu The cross context virtual CPU structure of the CPU for
3773 * which EFER changed.
3774 * @param fNxe The new NXE state.
3775 */
3776VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3777{
3778/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3779 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3780
3781 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3782 if (fNxe)
3783 {
3784 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3785 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3786 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3787 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3788 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3789 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3790 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3791 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3792 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3793 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3794 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3795
3796 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3797 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3798 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3799 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3800 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3801 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3802 }
3803 else
3804 {
3805 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3806 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3807 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3808 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3809 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3810 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3811 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3812 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3813 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3814 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3815 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3816
3817 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3818 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3819 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3820 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3821 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3822 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3823 }
3824}
3825
3826
3827/**
3828 * Check if any pgm pool pages are marked dirty (not monitored)
3829 *
3830 * @returns bool locked/not locked
3831 * @param pVM The cross context VM structure.
3832 */
3833VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3834{
3835 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3836}
3837
3838
3839/**
3840 * Check if this VCPU currently owns the PGM lock.
3841 *
3842 * @returns bool owner/not owner
3843 * @param pVM The cross context VM structure.
3844 */
3845VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
3846{
3847 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
3848}
3849
3850
3851/**
3852 * Enable or disable large page usage
3853 *
3854 * @returns VBox status code.
3855 * @param pVM The cross context VM structure.
3856 * @param fUseLargePages Use/not use large pages
3857 */
3858VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3859{
3860 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3861
3862 pVM->pgm.s.fUseLargePages = fUseLargePages;
3863 return VINF_SUCCESS;
3864}
3865
3866
3867/**
3868 * Acquire the PGM lock.
3869 *
3870 * @returns VBox status code
3871 * @param pVM The cross context VM structure.
3872 * @param fVoid Set if the caller cannot handle failure returns.
3873 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3874 */
3875#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
3876int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
3877#else
3878int pgmLock(PVMCC pVM, bool fVoid)
3879#endif
3880{
3881#if defined(VBOX_STRICT)
3882 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3883#else
3884 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
3885#endif
3886 if (RT_SUCCESS(rc))
3887 return rc;
3888 if (fVoid)
3889 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
3890 else
3891 AssertRC(rc);
3892 return rc;
3893}
3894
3895
3896/**
3897 * Release the PGM lock.
3898 *
3899 * @returns VBox status code
3900 * @param pVM The cross context VM structure.
3901 */
3902void pgmUnlock(PVMCC pVM)
3903{
3904 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3905 pVM->pgm.s.cDeprecatedPageLocks = 0;
3906 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
3907 if (rc == VINF_SEM_NESTED)
3908 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3909}
3910
3911
3912#if !defined(IN_R0) || defined(LOG_ENABLED)
3913
3914/** Format handler for PGMPAGE.
3915 * @copydoc FNRTSTRFORMATTYPE */
3916static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3917 const char *pszType, void const *pvValue,
3918 int cchWidth, int cchPrecision, unsigned fFlags,
3919 void *pvUser)
3920{
3921 size_t cch;
3922 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3923 if (RT_VALID_PTR(pPage))
3924 {
3925 char szTmp[64+80];
3926
3927 cch = 0;
3928
3929 /* The single char state stuff. */
3930 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3931 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3932
3933# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3934 if (IS_PART_INCLUDED(5))
3935 {
3936 static const char s_achHandlerStates[4*2] = { '-', 't', 'w', 'a' , '_', 'T', 'W', 'A' };
3937 szTmp[cch++] = s_achHandlerStates[ PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)
3938 | ((uint8_t)PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) << 2)];
3939 }
3940
3941 /* The type. */
3942 if (IS_PART_INCLUDED(4))
3943 {
3944 szTmp[cch++] = ':';
3945 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3946 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3947 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3948 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3949 }
3950
3951 /* The numbers. */
3952 if (IS_PART_INCLUDED(3))
3953 {
3954 szTmp[cch++] = ':';
3955 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3956 }
3957
3958 if (IS_PART_INCLUDED(2))
3959 {
3960 szTmp[cch++] = ':';
3961 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3962 }
3963
3964 if (IS_PART_INCLUDED(6))
3965 {
3966 szTmp[cch++] = ':';
3967 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3968 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3969 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3970 }
3971# undef IS_PART_INCLUDED
3972
3973 cch = pfnOutput(pvArgOutput, szTmp, cch);
3974 }
3975 else
3976 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3977 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3978 return cch;
3979}
3980
3981
3982/** Format handler for PGMRAMRANGE.
3983 * @copydoc FNRTSTRFORMATTYPE */
3984static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3985 const char *pszType, void const *pvValue,
3986 int cchWidth, int cchPrecision, unsigned fFlags,
3987 void *pvUser)
3988{
3989 size_t cch;
3990 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3991 if (RT_VALID_PTR(pRam))
3992 {
3993 char szTmp[80];
3994 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3995 cch = pfnOutput(pvArgOutput, szTmp, cch);
3996 }
3997 else
3998 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3999 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
4000 return cch;
4001}
4002
4003/** Format type andlers to be registered/deregistered. */
4004static const struct
4005{
4006 char szType[24];
4007 PFNRTSTRFORMATTYPE pfnHandler;
4008} g_aPgmFormatTypes[] =
4009{
4010 { "pgmpage", pgmFormatTypeHandlerPage },
4011 { "pgmramrange", pgmFormatTypeHandlerRamRange }
4012};
4013
4014#endif /* !IN_R0 || LOG_ENABLED */
4015
4016/**
4017 * Registers the global string format types.
4018 *
4019 * This should be called at module load time or in some other manner that ensure
4020 * that it's called exactly one time.
4021 *
4022 * @returns IPRT status code on RTStrFormatTypeRegister failure.
4023 */
4024VMMDECL(int) PGMRegisterStringFormatTypes(void)
4025{
4026#if !defined(IN_R0) || defined(LOG_ENABLED)
4027 int rc = VINF_SUCCESS;
4028 unsigned i;
4029 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
4030 {
4031 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
4032# ifdef IN_RING0
4033 if (rc == VERR_ALREADY_EXISTS)
4034 {
4035 /* in case of cleanup failure in ring-0 */
4036 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4037 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
4038 }
4039# endif
4040 }
4041 if (RT_FAILURE(rc))
4042 while (i-- > 0)
4043 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4044
4045 return rc;
4046#else
4047 return VINF_SUCCESS;
4048#endif
4049}
4050
4051
4052/**
4053 * Deregisters the global string format types.
4054 *
4055 * This should be called at module unload time or in some other manner that
4056 * ensure that it's called exactly one time.
4057 */
4058VMMDECL(void) PGMDeregisterStringFormatTypes(void)
4059{
4060#if !defined(IN_R0) || defined(LOG_ENABLED)
4061 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
4062 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4063#endif
4064}
4065
4066
4067#ifdef VBOX_STRICT
4068/**
4069 * Asserts that everything related to the guest CR3 is correctly shadowed.
4070 *
4071 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
4072 * and assert the correctness of the guest CR3 mapping before asserting that the
4073 * shadow page tables is in sync with the guest page tables.
4074 *
4075 * @returns Number of conflicts.
4076 * @param pVM The cross context VM structure.
4077 * @param pVCpu The cross context virtual CPU structure.
4078 * @param cr3 The current guest CR3 register value.
4079 * @param cr4 The current guest CR4 register value.
4080 */
4081VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
4082{
4083 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
4084
4085 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
4086 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
4087 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
4088
4089 PGM_LOCK_VOID(pVM);
4090 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
4091 PGM_UNLOCK(pVM);
4092
4093 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
4094 return cErrors;
4095}
4096#endif /* VBOX_STRICT */
4097
4098
4099/**
4100 * Updates PGM's copy of the guest's EPT pointer.
4101 *
4102 * @param pVCpu The cross context virtual CPU structure.
4103 * @param uEptPtr The EPT pointer.
4104 *
4105 * @remarks This can be called as part of VM-entry so we might be in the midst of
4106 * switching to VMX non-root mode.
4107 */
4108VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
4109{
4110 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4111 PGM_LOCK_VOID(pVM);
4112 pVCpu->pgm.s.uEptPtr = uEptPtr;
4113 pVCpu->pgm.s.pGstEptPml4R3 = 0;
4114 pVCpu->pgm.s.pGstEptPml4R0 = 0;
4115 PGM_UNLOCK(pVM);
4116}
4117
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette