VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 96979

Last change on this file since 96979 was 96979, checked in by vboxsync, 2 years ago

VMM/PGM,IEM,HM: Added a PGMPHYSHANDLER_F_NOT_IN_HM flag to better deal with a nested APIC access page. bugref:10092

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 147.8 KB
Line 
1/* $Id: PGMAll.cpp 96979 2022-10-04 12:46:05Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/selm.h>
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/iom.h>
39#include <VBox/sup.h>
40#include <VBox/vmm/mm.h>
41#include <VBox/vmm/stam.h>
42#include <VBox/vmm/trpm.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/hm_vmx.h>
46#include "PGMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include "PGMInline.h"
49#include <iprt/assert.h>
50#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
51# include <iprt/asm-amd64-x86.h>
52#endif
53#include <iprt/string.h>
54#include <VBox/log.h>
55#include <VBox/param.h>
56#include <VBox/err.h>
57
58
59/*********************************************************************************************************************************
60* Internal Functions *
61*********************************************************************************************************************************/
62DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
63DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
64DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3);
65#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
66static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
67 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
68static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
69 PPGMPTWALKGST pGstWalk);
70static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3);
71static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD,
72 PPGMPTWALKGST pGstWalkAll);
73#endif
74static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
75static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
76
77
78/*
79 * Second level transation - EPT.
80 */
81#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
82# define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT
83# include "PGMSlatDefs.h"
84# include "PGMAllGstSlatEpt.cpp.h"
85# undef PGM_SLAT_TYPE
86#endif
87
88
89/*
90 * Shadow - 32-bit mode
91 */
92#define PGM_SHW_TYPE PGM_TYPE_32BIT
93#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
94#include "PGMAllShw.h"
95
96/* Guest - real mode */
97#define PGM_GST_TYPE PGM_TYPE_REAL
98#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
99#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
100#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
101#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
102#include "PGMGstDefs.h"
103#include "PGMAllGst.h"
104#include "PGMAllBth.h"
105#undef BTH_PGMPOOLKIND_PT_FOR_PT
106#undef BTH_PGMPOOLKIND_ROOT
107#undef PGM_BTH_NAME
108#undef PGM_GST_TYPE
109#undef PGM_GST_NAME
110
111/* Guest - protected mode */
112#define PGM_GST_TYPE PGM_TYPE_PROT
113#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
114#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
115#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
116#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
117#include "PGMGstDefs.h"
118#include "PGMAllGst.h"
119#include "PGMAllBth.h"
120#undef BTH_PGMPOOLKIND_PT_FOR_PT
121#undef BTH_PGMPOOLKIND_ROOT
122#undef PGM_BTH_NAME
123#undef PGM_GST_TYPE
124#undef PGM_GST_NAME
125
126/* Guest - 32-bit mode */
127#define PGM_GST_TYPE PGM_TYPE_32BIT
128#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
129#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
130#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
131#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
132#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
133#include "PGMGstDefs.h"
134#include "PGMAllGst.h"
135#include "PGMAllBth.h"
136#undef BTH_PGMPOOLKIND_PT_FOR_BIG
137#undef BTH_PGMPOOLKIND_PT_FOR_PT
138#undef BTH_PGMPOOLKIND_ROOT
139#undef PGM_BTH_NAME
140#undef PGM_GST_TYPE
141#undef PGM_GST_NAME
142
143#undef PGM_SHW_TYPE
144#undef PGM_SHW_NAME
145
146
147/*
148 * Shadow - PAE mode
149 */
150#define PGM_SHW_TYPE PGM_TYPE_PAE
151#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
152#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
153#include "PGMAllShw.h"
154
155/* Guest - real mode */
156#define PGM_GST_TYPE PGM_TYPE_REAL
157#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
158#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
159#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
160#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
161#include "PGMGstDefs.h"
162#include "PGMAllBth.h"
163#undef BTH_PGMPOOLKIND_PT_FOR_PT
164#undef BTH_PGMPOOLKIND_ROOT
165#undef PGM_BTH_NAME
166#undef PGM_GST_TYPE
167#undef PGM_GST_NAME
168
169/* Guest - protected mode */
170#define PGM_GST_TYPE PGM_TYPE_PROT
171#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
172#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
173#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
174#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
175#include "PGMGstDefs.h"
176#include "PGMAllBth.h"
177#undef BTH_PGMPOOLKIND_PT_FOR_PT
178#undef BTH_PGMPOOLKIND_ROOT
179#undef PGM_BTH_NAME
180#undef PGM_GST_TYPE
181#undef PGM_GST_NAME
182
183/* Guest - 32-bit mode */
184#define PGM_GST_TYPE PGM_TYPE_32BIT
185#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
186#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
187#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
188#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
189#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
190#include "PGMGstDefs.h"
191#include "PGMAllBth.h"
192#undef BTH_PGMPOOLKIND_PT_FOR_BIG
193#undef BTH_PGMPOOLKIND_PT_FOR_PT
194#undef BTH_PGMPOOLKIND_ROOT
195#undef PGM_BTH_NAME
196#undef PGM_GST_TYPE
197#undef PGM_GST_NAME
198
199
200/* Guest - PAE mode */
201#define PGM_GST_TYPE PGM_TYPE_PAE
202#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
203#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
204#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
205#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
206#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
207#include "PGMGstDefs.h"
208#include "PGMAllGst.h"
209#include "PGMAllBth.h"
210#undef BTH_PGMPOOLKIND_PT_FOR_BIG
211#undef BTH_PGMPOOLKIND_PT_FOR_PT
212#undef BTH_PGMPOOLKIND_ROOT
213#undef PGM_BTH_NAME
214#undef PGM_GST_TYPE
215#undef PGM_GST_NAME
216
217#undef PGM_SHW_TYPE
218#undef PGM_SHW_NAME
219
220
221/*
222 * Shadow - AMD64 mode
223 */
224#define PGM_SHW_TYPE PGM_TYPE_AMD64
225#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
226#include "PGMAllShw.h"
227
228/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
229/** @todo retire this hack. */
230#define PGM_GST_TYPE PGM_TYPE_PROT
231#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
232#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
233#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
234#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
235#include "PGMGstDefs.h"
236#include "PGMAllBth.h"
237#undef BTH_PGMPOOLKIND_PT_FOR_PT
238#undef BTH_PGMPOOLKIND_ROOT
239#undef PGM_BTH_NAME
240#undef PGM_GST_TYPE
241#undef PGM_GST_NAME
242
243#ifdef VBOX_WITH_64_BITS_GUESTS
244/* Guest - AMD64 mode */
245# define PGM_GST_TYPE PGM_TYPE_AMD64
246# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
247# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
248# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
249# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
250# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
251# include "PGMGstDefs.h"
252# include "PGMAllGst.h"
253# include "PGMAllBth.h"
254# undef BTH_PGMPOOLKIND_PT_FOR_BIG
255# undef BTH_PGMPOOLKIND_PT_FOR_PT
256# undef BTH_PGMPOOLKIND_ROOT
257# undef PGM_BTH_NAME
258# undef PGM_GST_TYPE
259# undef PGM_GST_NAME
260#endif /* VBOX_WITH_64_BITS_GUESTS */
261
262#undef PGM_SHW_TYPE
263#undef PGM_SHW_NAME
264
265
266/*
267 * Shadow - 32-bit nested paging mode.
268 */
269#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
270#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
271#include "PGMAllShw.h"
272
273/* Guest - real mode */
274#define PGM_GST_TYPE PGM_TYPE_REAL
275#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
276#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
277#include "PGMGstDefs.h"
278#include "PGMAllBth.h"
279#undef PGM_BTH_NAME
280#undef PGM_GST_TYPE
281#undef PGM_GST_NAME
282
283/* Guest - protected mode */
284#define PGM_GST_TYPE PGM_TYPE_PROT
285#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
286#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
287#include "PGMGstDefs.h"
288#include "PGMAllBth.h"
289#undef PGM_BTH_NAME
290#undef PGM_GST_TYPE
291#undef PGM_GST_NAME
292
293/* Guest - 32-bit mode */
294#define PGM_GST_TYPE PGM_TYPE_32BIT
295#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
296#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
297#include "PGMGstDefs.h"
298#include "PGMAllBth.h"
299#undef PGM_BTH_NAME
300#undef PGM_GST_TYPE
301#undef PGM_GST_NAME
302
303/* Guest - PAE mode */
304#define PGM_GST_TYPE PGM_TYPE_PAE
305#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
306#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
307#include "PGMGstDefs.h"
308#include "PGMAllBth.h"
309#undef PGM_BTH_NAME
310#undef PGM_GST_TYPE
311#undef PGM_GST_NAME
312
313#ifdef VBOX_WITH_64_BITS_GUESTS
314/* Guest - AMD64 mode */
315# define PGM_GST_TYPE PGM_TYPE_AMD64
316# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
317# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
318# include "PGMGstDefs.h"
319# include "PGMAllBth.h"
320# undef PGM_BTH_NAME
321# undef PGM_GST_TYPE
322# undef PGM_GST_NAME
323#endif /* VBOX_WITH_64_BITS_GUESTS */
324
325#undef PGM_SHW_TYPE
326#undef PGM_SHW_NAME
327
328
329/*
330 * Shadow - PAE nested paging mode.
331 */
332#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
333#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
334#include "PGMAllShw.h"
335
336/* Guest - real mode */
337#define PGM_GST_TYPE PGM_TYPE_REAL
338#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
339#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
340#include "PGMGstDefs.h"
341#include "PGMAllBth.h"
342#undef PGM_BTH_NAME
343#undef PGM_GST_TYPE
344#undef PGM_GST_NAME
345
346/* Guest - protected mode */
347#define PGM_GST_TYPE PGM_TYPE_PROT
348#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
349#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
350#include "PGMGstDefs.h"
351#include "PGMAllBth.h"
352#undef PGM_BTH_NAME
353#undef PGM_GST_TYPE
354#undef PGM_GST_NAME
355
356/* Guest - 32-bit mode */
357#define PGM_GST_TYPE PGM_TYPE_32BIT
358#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
359#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
360#include "PGMGstDefs.h"
361#include "PGMAllBth.h"
362#undef PGM_BTH_NAME
363#undef PGM_GST_TYPE
364#undef PGM_GST_NAME
365
366/* Guest - PAE mode */
367#define PGM_GST_TYPE PGM_TYPE_PAE
368#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
369#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
370#include "PGMGstDefs.h"
371#include "PGMAllBth.h"
372#undef PGM_BTH_NAME
373#undef PGM_GST_TYPE
374#undef PGM_GST_NAME
375
376#ifdef VBOX_WITH_64_BITS_GUESTS
377/* Guest - AMD64 mode */
378# define PGM_GST_TYPE PGM_TYPE_AMD64
379# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
380# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
381# include "PGMGstDefs.h"
382# include "PGMAllBth.h"
383# undef PGM_BTH_NAME
384# undef PGM_GST_TYPE
385# undef PGM_GST_NAME
386#endif /* VBOX_WITH_64_BITS_GUESTS */
387
388#undef PGM_SHW_TYPE
389#undef PGM_SHW_NAME
390
391
392/*
393 * Shadow - AMD64 nested paging mode.
394 */
395#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
396#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
397#include "PGMAllShw.h"
398
399/* Guest - real mode */
400#define PGM_GST_TYPE PGM_TYPE_REAL
401#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
402#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
403#include "PGMGstDefs.h"
404#include "PGMAllBth.h"
405#undef PGM_BTH_NAME
406#undef PGM_GST_TYPE
407#undef PGM_GST_NAME
408
409/* Guest - protected mode */
410#define PGM_GST_TYPE PGM_TYPE_PROT
411#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
412#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
413#include "PGMGstDefs.h"
414#include "PGMAllBth.h"
415#undef PGM_BTH_NAME
416#undef PGM_GST_TYPE
417#undef PGM_GST_NAME
418
419/* Guest - 32-bit mode */
420#define PGM_GST_TYPE PGM_TYPE_32BIT
421#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
422#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
423#include "PGMGstDefs.h"
424#include "PGMAllBth.h"
425#undef PGM_BTH_NAME
426#undef PGM_GST_TYPE
427#undef PGM_GST_NAME
428
429/* Guest - PAE mode */
430#define PGM_GST_TYPE PGM_TYPE_PAE
431#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
432#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
433#include "PGMGstDefs.h"
434#include "PGMAllBth.h"
435#undef PGM_BTH_NAME
436#undef PGM_GST_TYPE
437#undef PGM_GST_NAME
438
439#ifdef VBOX_WITH_64_BITS_GUESTS
440/* Guest - AMD64 mode */
441# define PGM_GST_TYPE PGM_TYPE_AMD64
442# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
443# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
444# include "PGMGstDefs.h"
445# include "PGMAllBth.h"
446# undef PGM_BTH_NAME
447# undef PGM_GST_TYPE
448# undef PGM_GST_NAME
449#endif /* VBOX_WITH_64_BITS_GUESTS */
450
451#undef PGM_SHW_TYPE
452#undef PGM_SHW_NAME
453
454
455/*
456 * Shadow - EPT.
457 */
458#define PGM_SHW_TYPE PGM_TYPE_EPT
459#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
460#include "PGMAllShw.h"
461
462/* Guest - real mode */
463#define PGM_GST_TYPE PGM_TYPE_REAL
464#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
465#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
466#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
467#include "PGMGstDefs.h"
468#include "PGMAllBth.h"
469#undef BTH_PGMPOOLKIND_PT_FOR_PT
470#undef PGM_BTH_NAME
471#undef PGM_GST_TYPE
472#undef PGM_GST_NAME
473
474/* Guest - protected mode */
475#define PGM_GST_TYPE PGM_TYPE_PROT
476#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
477#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
478#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
479#include "PGMGstDefs.h"
480#include "PGMAllBth.h"
481#undef BTH_PGMPOOLKIND_PT_FOR_PT
482#undef PGM_BTH_NAME
483#undef PGM_GST_TYPE
484#undef PGM_GST_NAME
485
486/* Guest - 32-bit mode */
487#define PGM_GST_TYPE PGM_TYPE_32BIT
488#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
489#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
490#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
491#include "PGMGstDefs.h"
492#include "PGMAllBth.h"
493#undef BTH_PGMPOOLKIND_PT_FOR_PT
494#undef PGM_BTH_NAME
495#undef PGM_GST_TYPE
496#undef PGM_GST_NAME
497
498/* Guest - PAE mode */
499#define PGM_GST_TYPE PGM_TYPE_PAE
500#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
501#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
502#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
503#include "PGMGstDefs.h"
504#include "PGMAllBth.h"
505#undef BTH_PGMPOOLKIND_PT_FOR_PT
506#undef PGM_BTH_NAME
507#undef PGM_GST_TYPE
508#undef PGM_GST_NAME
509
510#ifdef VBOX_WITH_64_BITS_GUESTS
511/* Guest - AMD64 mode */
512# define PGM_GST_TYPE PGM_TYPE_AMD64
513# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
514# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
515# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
516# include "PGMGstDefs.h"
517# include "PGMAllBth.h"
518# undef BTH_PGMPOOLKIND_PT_FOR_PT
519# undef PGM_BTH_NAME
520# undef PGM_GST_TYPE
521# undef PGM_GST_NAME
522#endif /* VBOX_WITH_64_BITS_GUESTS */
523
524#undef PGM_SHW_TYPE
525#undef PGM_SHW_NAME
526
527
528/*
529 * Shadow - NEM / None.
530 */
531#define PGM_SHW_TYPE PGM_TYPE_NONE
532#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
533#include "PGMAllShw.h"
534
535/* Guest - real mode */
536#define PGM_GST_TYPE PGM_TYPE_REAL
537#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
538#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
539#include "PGMGstDefs.h"
540#include "PGMAllBth.h"
541#undef PGM_BTH_NAME
542#undef PGM_GST_TYPE
543#undef PGM_GST_NAME
544
545/* Guest - protected mode */
546#define PGM_GST_TYPE PGM_TYPE_PROT
547#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
548#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
549#include "PGMGstDefs.h"
550#include "PGMAllBth.h"
551#undef PGM_BTH_NAME
552#undef PGM_GST_TYPE
553#undef PGM_GST_NAME
554
555/* Guest - 32-bit mode */
556#define PGM_GST_TYPE PGM_TYPE_32BIT
557#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
558#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
559#include "PGMGstDefs.h"
560#include "PGMAllBth.h"
561#undef PGM_BTH_NAME
562#undef PGM_GST_TYPE
563#undef PGM_GST_NAME
564
565/* Guest - PAE mode */
566#define PGM_GST_TYPE PGM_TYPE_PAE
567#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
568#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
569#include "PGMGstDefs.h"
570#include "PGMAllBth.h"
571#undef PGM_BTH_NAME
572#undef PGM_GST_TYPE
573#undef PGM_GST_NAME
574
575#ifdef VBOX_WITH_64_BITS_GUESTS
576/* Guest - AMD64 mode */
577# define PGM_GST_TYPE PGM_TYPE_AMD64
578# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
579# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
580# include "PGMGstDefs.h"
581# include "PGMAllBth.h"
582# undef PGM_BTH_NAME
583# undef PGM_GST_TYPE
584# undef PGM_GST_NAME
585#endif /* VBOX_WITH_64_BITS_GUESTS */
586
587#undef PGM_SHW_TYPE
588#undef PGM_SHW_NAME
589
590
591
592/**
593 * Guest mode data array.
594 */
595PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
596{
597 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* 0 */
598 {
599 PGM_TYPE_REAL,
600 PGM_GST_NAME_REAL(GetPage),
601 PGM_GST_NAME_REAL(ModifyPage),
602 PGM_GST_NAME_REAL(Enter),
603 PGM_GST_NAME_REAL(Exit),
604#ifdef IN_RING3
605 PGM_GST_NAME_REAL(Relocate),
606#endif
607 },
608 {
609 PGM_TYPE_PROT,
610 PGM_GST_NAME_PROT(GetPage),
611 PGM_GST_NAME_PROT(ModifyPage),
612 PGM_GST_NAME_PROT(Enter),
613 PGM_GST_NAME_PROT(Exit),
614#ifdef IN_RING3
615 PGM_GST_NAME_PROT(Relocate),
616#endif
617 },
618 {
619 PGM_TYPE_32BIT,
620 PGM_GST_NAME_32BIT(GetPage),
621 PGM_GST_NAME_32BIT(ModifyPage),
622 PGM_GST_NAME_32BIT(Enter),
623 PGM_GST_NAME_32BIT(Exit),
624#ifdef IN_RING3
625 PGM_GST_NAME_32BIT(Relocate),
626#endif
627 },
628 {
629 PGM_TYPE_PAE,
630 PGM_GST_NAME_PAE(GetPage),
631 PGM_GST_NAME_PAE(ModifyPage),
632 PGM_GST_NAME_PAE(Enter),
633 PGM_GST_NAME_PAE(Exit),
634#ifdef IN_RING3
635 PGM_GST_NAME_PAE(Relocate),
636#endif
637 },
638#ifdef VBOX_WITH_64_BITS_GUESTS
639 {
640 PGM_TYPE_AMD64,
641 PGM_GST_NAME_AMD64(GetPage),
642 PGM_GST_NAME_AMD64(ModifyPage),
643 PGM_GST_NAME_AMD64(Enter),
644 PGM_GST_NAME_AMD64(Exit),
645# ifdef IN_RING3
646 PGM_GST_NAME_AMD64(Relocate),
647# endif
648 },
649#endif
650};
651
652
653/**
654 * The shadow mode data array.
655 */
656PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
657{
658 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
659 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
660 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
661 {
662 PGM_TYPE_32BIT,
663 PGM_SHW_NAME_32BIT(GetPage),
664 PGM_SHW_NAME_32BIT(ModifyPage),
665 PGM_SHW_NAME_32BIT(Enter),
666 PGM_SHW_NAME_32BIT(Exit),
667#ifdef IN_RING3
668 PGM_SHW_NAME_32BIT(Relocate),
669#endif
670 },
671 {
672 PGM_TYPE_PAE,
673 PGM_SHW_NAME_PAE(GetPage),
674 PGM_SHW_NAME_PAE(ModifyPage),
675 PGM_SHW_NAME_PAE(Enter),
676 PGM_SHW_NAME_PAE(Exit),
677#ifdef IN_RING3
678 PGM_SHW_NAME_PAE(Relocate),
679#endif
680 },
681 {
682 PGM_TYPE_AMD64,
683 PGM_SHW_NAME_AMD64(GetPage),
684 PGM_SHW_NAME_AMD64(ModifyPage),
685 PGM_SHW_NAME_AMD64(Enter),
686 PGM_SHW_NAME_AMD64(Exit),
687#ifdef IN_RING3
688 PGM_SHW_NAME_AMD64(Relocate),
689#endif
690 },
691 {
692 PGM_TYPE_NESTED_32BIT,
693 PGM_SHW_NAME_NESTED_32BIT(GetPage),
694 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
695 PGM_SHW_NAME_NESTED_32BIT(Enter),
696 PGM_SHW_NAME_NESTED_32BIT(Exit),
697#ifdef IN_RING3
698 PGM_SHW_NAME_NESTED_32BIT(Relocate),
699#endif
700 },
701 {
702 PGM_TYPE_NESTED_PAE,
703 PGM_SHW_NAME_NESTED_PAE(GetPage),
704 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
705 PGM_SHW_NAME_NESTED_PAE(Enter),
706 PGM_SHW_NAME_NESTED_PAE(Exit),
707#ifdef IN_RING3
708 PGM_SHW_NAME_NESTED_PAE(Relocate),
709#endif
710 },
711 {
712 PGM_TYPE_NESTED_AMD64,
713 PGM_SHW_NAME_NESTED_AMD64(GetPage),
714 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
715 PGM_SHW_NAME_NESTED_AMD64(Enter),
716 PGM_SHW_NAME_NESTED_AMD64(Exit),
717#ifdef IN_RING3
718 PGM_SHW_NAME_NESTED_AMD64(Relocate),
719#endif
720 },
721 {
722 PGM_TYPE_EPT,
723 PGM_SHW_NAME_EPT(GetPage),
724 PGM_SHW_NAME_EPT(ModifyPage),
725 PGM_SHW_NAME_EPT(Enter),
726 PGM_SHW_NAME_EPT(Exit),
727#ifdef IN_RING3
728 PGM_SHW_NAME_EPT(Relocate),
729#endif
730 },
731 {
732 PGM_TYPE_NONE,
733 PGM_SHW_NAME_NONE(GetPage),
734 PGM_SHW_NAME_NONE(ModifyPage),
735 PGM_SHW_NAME_NONE(Enter),
736 PGM_SHW_NAME_NONE(Exit),
737#ifdef IN_RING3
738 PGM_SHW_NAME_NONE(Relocate),
739#endif
740 },
741};
742
743
744/**
745 * The guest+shadow mode data array.
746 */
747PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
748{
749#if !defined(IN_RING3) && !defined(VBOX_STRICT)
750# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
751# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
752 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler) }
753
754#elif !defined(IN_RING3) && defined(VBOX_STRICT)
755# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
756# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
757 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler), Nm(AssertCR3) }
758
759#elif defined(IN_RING3) && !defined(VBOX_STRICT)
760# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
761# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
762 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
763
764#elif defined(IN_RING3) && defined(VBOX_STRICT)
765# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
766# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
767 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
768
769#else
770# error "Misconfig."
771#endif
772
773 /* 32-bit shadow paging mode: */
774 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
775 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
776 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
777 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
778 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
779 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
780 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
781 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
782 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
783 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
784 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
785
786 /* PAE shadow paging mode: */
787 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
788 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
789 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
790 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
791 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
792 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
793 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
794 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
795 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
796 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
797 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
798
799 /* AMD64 shadow paging mode: */
800 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
801 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
802 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
803 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
804 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
805#ifdef VBOX_WITH_64_BITS_GUESTS
806 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
807#else
808 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
809#endif
810 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
812 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
814 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
815
816 /* 32-bit nested paging mode: */
817 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
818 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
819 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
820 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
821 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
822#ifdef VBOX_WITH_64_BITS_GUESTS
823 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
824#else
825 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
826#endif
827 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
828 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
829 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
831 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
832
833 /* PAE nested paging mode: */
834 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
835 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
836 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
837 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
838 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
839#ifdef VBOX_WITH_64_BITS_GUESTS
840 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
841#else
842 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
843#endif
844 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
845 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
846 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
848 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
849
850 /* AMD64 nested paging mode: */
851 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
852 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
853 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
854 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
855 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
856#ifdef VBOX_WITH_64_BITS_GUESTS
857 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
858#else
859 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
860#endif
861 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
862 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
863 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
865 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
866
867 /* EPT nested paging mode: */
868 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
869 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
870 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
871 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
872 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
873#ifdef VBOX_WITH_64_BITS_GUESTS
874 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
875#else
876 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
877#endif
878 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
880 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
881 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
882 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
883
884 /* NONE / NEM: */
885 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
886 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
887 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
888 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
889 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
890#ifdef VBOX_WITH_64_BITS_GUESTS
891 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
892#else
893 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
894#endif
895 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
896 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
897 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
898 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
899 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
900
901
902#undef PGMMODEDATABTH_ENTRY
903#undef PGMMODEDATABTH_NULL_ENTRY
904};
905
906
907/** Mask array used by pgmGetCr3MaskForMode.
908 * X86_CR3_AMD64_PAGE_MASK is used for modes that doesn't have a CR3 or EPTP. */
909static uint64_t const g_auCr3MaskForMode[PGMMODE_MAX] =
910{
911 /* [PGMMODE_INVALID] = */ X86_CR3_AMD64_PAGE_MASK,
912 /* [PGMMODE_REAL] = */ X86_CR3_AMD64_PAGE_MASK,
913 /* [PGMMODE_PROTECTED] = */ X86_CR3_AMD64_PAGE_MASK,
914 /* [PGMMODE_32_BIT] = */ X86_CR3_PAGE_MASK,
915 /* [PGMMODE_PAE] = */ X86_CR3_PAE_PAGE_MASK,
916 /* [PGMMODE_PAE_NX] = */ X86_CR3_PAE_PAGE_MASK,
917 /* [PGMMODE_AMD64] = */ X86_CR3_AMD64_PAGE_MASK,
918 /* [PGMMODE_AMD64_NX] = */ X86_CR3_AMD64_PAGE_MASK,
919 /* [PGMMODE_NESTED_32BIT = */ X86_CR3_PAGE_MASK,
920 /* [PGMMODE_NESTED_PAE] = */ X86_CR3_PAE_PAGE_MASK,
921 /* [PGMMODE_NESTED_AMD64] = */ X86_CR3_AMD64_PAGE_MASK,
922 /* [PGMMODE_EPT] = */ X86_CR3_EPT_PAGE_MASK,
923 /* [PGMMODE_NONE] = */ X86_CR3_AMD64_PAGE_MASK,
924};
925
926
927/**
928 * Gets the physical address mask for CR3 in the given paging mode.
929 *
930 * The mask is for eliminating flags and other stuff in CR3/EPTP when
931 * extracting the physical address. It is not for validating whether there are
932 * reserved bits set. PGM ASSUMES that whoever loaded the CR3 value and passed
933 * it to PGM checked for reserved bits, including reserved physical address
934 * bits.
935 *
936 * @returns The CR3 mask.
937 * @param enmMode The paging mode.
938 * @param enmSlatMode The second-level address translation mode.
939 */
940DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode, PGMSLAT enmSlatMode)
941{
942 if (enmSlatMode == PGMSLAT_DIRECT)
943 {
944 Assert(enmMode != PGMMODE_EPT);
945 return g_auCr3MaskForMode[(unsigned)enmMode < (unsigned)PGMMODE_MAX ? enmMode : 0];
946 }
947 Assert(enmSlatMode == PGMSLAT_EPT);
948 return X86_CR3_EPT_PAGE_MASK;
949}
950
951
952/**
953 * Gets the masked CR3 value according to the current guest paging mode.
954 *
955 * See disclaimer in pgmGetCr3MaskForMode.
956 *
957 * @returns The masked PGM CR3 value.
958 * @param pVCpu The cross context virtual CPU structure.
959 * @param uCr3 The raw guest CR3 value.
960 */
961DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
962{
963 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode, pVCpu->pgm.s.enmGuestSlatMode);
964 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);
965 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
966 return GCPhysCR3;
967}
968
969
970#ifdef IN_RING0
971/**
972 * #PF Handler.
973 *
974 * @returns VBox status code (appropriate for trap handling and GC return).
975 * @param pVCpu The cross context virtual CPU structure.
976 * @param uErr The trap error code.
977 * @param pRegFrame Trap register frame.
978 * @param pvFault The fault address.
979 */
980VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
981{
982 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
983
984 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
985 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
986 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
987
988
989# ifdef VBOX_WITH_STATISTICS
990 /*
991 * Error code stats.
992 */
993 if (uErr & X86_TRAP_PF_US)
994 {
995 if (!(uErr & X86_TRAP_PF_P))
996 {
997 if (uErr & X86_TRAP_PF_RW)
998 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
999 else
1000 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
1001 }
1002 else if (uErr & X86_TRAP_PF_RW)
1003 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
1004 else if (uErr & X86_TRAP_PF_RSVD)
1005 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
1006 else if (uErr & X86_TRAP_PF_ID)
1007 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
1008 else
1009 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
1010 }
1011 else
1012 { /* Supervisor */
1013 if (!(uErr & X86_TRAP_PF_P))
1014 {
1015 if (uErr & X86_TRAP_PF_RW)
1016 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
1017 else
1018 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
1019 }
1020 else if (uErr & X86_TRAP_PF_RW)
1021 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
1022 else if (uErr & X86_TRAP_PF_ID)
1023 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
1024 else if (uErr & X86_TRAP_PF_RSVD)
1025 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
1026 }
1027# endif /* VBOX_WITH_STATISTICS */
1028
1029 /*
1030 * Call the worker.
1031 */
1032 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1033 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1034 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
1035 bool fLockTaken = false;
1036 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
1037 if (fLockTaken)
1038 {
1039 PGM_LOCK_ASSERT_OWNER(pVM);
1040 PGM_UNLOCK(pVM);
1041 }
1042 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
1043
1044 /*
1045 * Return code tweaks.
1046 */
1047 if (rc != VINF_SUCCESS)
1048 {
1049 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
1050 rc = VINF_SUCCESS;
1051
1052 /* Note: hack alert for difficult to reproduce problem. */
1053 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
1054 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
1055 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
1056 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
1057 {
1058 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
1059 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
1060 rc = VINF_SUCCESS;
1061 }
1062 }
1063
1064 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
1065 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
1066 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
1067 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
1068 return rc;
1069}
1070#endif /* IN_RING0 */
1071
1072
1073/**
1074 * Prefetch a page
1075 *
1076 * Typically used to sync commonly used pages before entering raw mode
1077 * after a CR3 reload.
1078 *
1079 * @returns VBox status code suitable for scheduling.
1080 * @retval VINF_SUCCESS on success.
1081 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
1082 * @param pVCpu The cross context virtual CPU structure.
1083 * @param GCPtrPage Page to invalidate.
1084 */
1085VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1086{
1087 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1088
1089 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1090 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1091 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1092 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1093
1094 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1095 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1096 return rc;
1097}
1098
1099
1100/**
1101 * Emulation of the invlpg instruction (HC only actually).
1102 *
1103 * @returns Strict VBox status code, special care required.
1104 * @retval VINF_PGM_SYNC_CR3 - handled.
1105 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1106 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param GCPtrPage Page to invalidate.
1110 *
1111 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1112 * safe, but there could be edge cases!
1113 *
1114 * @todo Flush page or page directory only if necessary!
1115 * @todo VBOXSTRICTRC
1116 */
1117VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1118{
1119 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1120 int rc;
1121 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1122
1123 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1124
1125 /*
1126 * Call paging mode specific worker.
1127 */
1128 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1129 PGM_LOCK_VOID(pVM);
1130
1131 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1132 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1133 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1134 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1135
1136 PGM_UNLOCK(pVM);
1137 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1138
1139 /* Ignore all irrelevant error codes. */
1140 if ( rc == VERR_PAGE_NOT_PRESENT
1141 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1142 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1143 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1144 rc = VINF_SUCCESS;
1145
1146 return rc;
1147}
1148
1149
1150/**
1151 * Executes an instruction using the interpreter.
1152 *
1153 * @returns VBox status code (appropriate for trap handling and GC return).
1154 * @param pVM The cross context VM structure.
1155 * @param pVCpu The cross context virtual CPU structure.
1156 * @param pRegFrame Register frame.
1157 * @param pvFault Fault address.
1158 */
1159VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1160{
1161 NOREF(pVM);
1162 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1163 if (rc == VERR_EM_INTERPRETER)
1164 rc = VINF_EM_RAW_EMULATE_INSTR;
1165 if (rc != VINF_SUCCESS)
1166 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1167 return rc;
1168}
1169
1170
1171/**
1172 * Gets effective page information (from the VMM page directory).
1173 *
1174 * @returns VBox status code.
1175 * @param pVCpu The cross context virtual CPU structure.
1176 * @param GCPtr Guest Context virtual address of the page.
1177 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1178 * @param pHCPhys Where to store the HC physical address of the page.
1179 * This is page aligned.
1180 * @remark You should use PGMMapGetPage() for pages in a mapping.
1181 */
1182VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1183{
1184 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1185 PGM_LOCK_VOID(pVM);
1186
1187 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1188 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1189 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1190 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1191
1192 PGM_UNLOCK(pVM);
1193 return rc;
1194}
1195
1196
1197/**
1198 * Modify page flags for a range of pages in the shadow context.
1199 *
1200 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1201 *
1202 * @returns VBox status code.
1203 * @param pVCpu The cross context virtual CPU structure.
1204 * @param GCPtr Virtual address of the first page in the range.
1205 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1206 * @param fMask The AND mask - page flags X86_PTE_*.
1207 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1208 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1209 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1210 */
1211DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1212{
1213 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1214 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1215
1216 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK; /** @todo this ain't necessary, right... */
1217
1218 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1219 PGM_LOCK_VOID(pVM);
1220
1221 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1222 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1223 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1224 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, GUEST_PAGE_SIZE, fFlags, fMask, fOpFlags);
1225
1226 PGM_UNLOCK(pVM);
1227 return rc;
1228}
1229
1230
1231/**
1232 * Changing the page flags for a single page in the shadow page tables so as to
1233 * make it read-only.
1234 *
1235 * @returns VBox status code.
1236 * @param pVCpu The cross context virtual CPU structure.
1237 * @param GCPtr Virtual address of the first page in the range.
1238 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1239 */
1240VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1241{
1242 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1243}
1244
1245
1246/**
1247 * Changing the page flags for a single page in the shadow page tables so as to
1248 * make it writable.
1249 *
1250 * The call must know with 101% certainty that the guest page tables maps this
1251 * as writable too. This function will deal shared, zero and write monitored
1252 * pages.
1253 *
1254 * @returns VBox status code.
1255 * @param pVCpu The cross context virtual CPU structure.
1256 * @param GCPtr Virtual address of the first page in the range.
1257 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1258 */
1259VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1260{
1261 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1262 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1263 return VINF_SUCCESS;
1264}
1265
1266
1267/**
1268 * Changing the page flags for a single page in the shadow page tables so as to
1269 * make it not present.
1270 *
1271 * @returns VBox status code.
1272 * @param pVCpu The cross context virtual CPU structure.
1273 * @param GCPtr Virtual address of the first page in the range.
1274 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1275 */
1276VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1277{
1278 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1279}
1280
1281
1282/**
1283 * Changing the page flags for a single page in the shadow page tables so as to
1284 * make it supervisor and writable.
1285 *
1286 * This if for dealing with CR0.WP=0 and readonly user pages.
1287 *
1288 * @returns VBox status code.
1289 * @param pVCpu The cross context virtual CPU structure.
1290 * @param GCPtr Virtual address of the first page in the range.
1291 * @param fBigPage Whether or not this is a big page. If it is, we have to
1292 * change the shadow PDE as well. If it isn't, the caller
1293 * has checked that the shadow PDE doesn't need changing.
1294 * We ASSUME 4KB pages backing the big page here!
1295 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1296 */
1297int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1298{
1299 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1300 if (rc == VINF_SUCCESS && fBigPage)
1301 {
1302 /* this is a bit ugly... */
1303 switch (pVCpu->pgm.s.enmShadowMode)
1304 {
1305 case PGMMODE_32_BIT:
1306 {
1307 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1308 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1309 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1310 pPde->u |= X86_PDE_RW;
1311 Log(("-> PDE=%#llx (32)\n", pPde->u));
1312 break;
1313 }
1314 case PGMMODE_PAE:
1315 case PGMMODE_PAE_NX:
1316 {
1317 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1318 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1319 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1320 pPde->u |= X86_PDE_RW;
1321 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1322 break;
1323 }
1324 default:
1325 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1326 }
1327 }
1328 return rc;
1329}
1330
1331
1332/**
1333 * Gets the shadow page directory for the specified address, PAE.
1334 *
1335 * @returns Pointer to the shadow PD.
1336 * @param pVCpu The cross context virtual CPU structure.
1337 * @param GCPtr The address.
1338 * @param uGstPdpe Guest PDPT entry. Valid.
1339 * @param ppPD Receives address of page directory
1340 */
1341int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1342{
1343 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1344 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1345 PPGMPOOLPAGE pShwPage;
1346 int rc;
1347 PGM_LOCK_ASSERT_OWNER(pVM);
1348
1349
1350 /* Allocate page directory if not present. */
1351 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1352 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1353 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1354 X86PGPAEUINT const uPdpe = pPdpe->u;
1355 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1356 {
1357 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1358 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1359 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1360
1361 pgmPoolCacheUsed(pPool, pShwPage);
1362
1363 /* Update the entry if necessary. */
1364 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1365 if (uPdpeNew == uPdpe)
1366 { /* likely */ }
1367 else
1368 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1369 }
1370 else
1371 {
1372 RTGCPTR64 GCPdPt;
1373 PGMPOOLKIND enmKind;
1374 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1375 {
1376 /* AMD-V nested paging or real/protected mode without paging. */
1377 GCPdPt = GCPtr & ~(RT_BIT_64(X86_PDPT_SHIFT) - 1);
1378 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1379 }
1380 else if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1381 {
1382 if (uGstPdpe & X86_PDPE_P)
1383 {
1384 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1385 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1386 }
1387 else
1388 {
1389 /* PD not present; guest must reload CR3 to change it.
1390 * No need to monitor anything in this case. */
1391 /** @todo r=bird: WTF is hit?!? */
1392 /*Assert(VM_IS_RAW_MODE_ENABLED(pVM)); - ??? */
1393 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1394 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1395 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1396 }
1397 }
1398 else
1399 {
1400 GCPdPt = CPUMGetGuestCR3(pVCpu);
1401 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1402 }
1403
1404 /* Create a reference back to the PDPT by using the index in its shadow page. */
1405 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1406 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1407 &pShwPage);
1408 AssertRCReturn(rc, rc);
1409
1410 /* Hook it up. */
1411 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1412 }
1413 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1414
1415 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1416 return VINF_SUCCESS;
1417}
1418
1419
1420/**
1421 * Gets the pointer to the shadow page directory entry for an address, PAE.
1422 *
1423 * @returns Pointer to the PDE.
1424 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1425 * @param GCPtr The address.
1426 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1427 */
1428DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1429{
1430 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1431 PGM_LOCK_ASSERT_OWNER(pVM);
1432
1433 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1434 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1435 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1436 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1437 if (!(uPdpe & X86_PDPE_P))
1438 {
1439 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1440 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1441 }
1442 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1443
1444 /* Fetch the pgm pool shadow descriptor. */
1445 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1446 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1447
1448 *ppShwPde = pShwPde;
1449 return VINF_SUCCESS;
1450}
1451
1452
1453/**
1454 * Syncs the SHADOW page directory pointer for the specified address.
1455 *
1456 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1457 *
1458 * The caller is responsible for making sure the guest has a valid PD before
1459 * calling this function.
1460 *
1461 * @returns VBox status code.
1462 * @param pVCpu The cross context virtual CPU structure.
1463 * @param GCPtr The address.
1464 * @param uGstPml4e Guest PML4 entry (valid).
1465 * @param uGstPdpe Guest PDPT entry (valid).
1466 * @param ppPD Receives address of page directory
1467 */
1468static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1469{
1470 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1471 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1472 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1473 int rc;
1474
1475 PGM_LOCK_ASSERT_OWNER(pVM);
1476
1477 /*
1478 * PML4.
1479 */
1480 PPGMPOOLPAGE pShwPage;
1481 {
1482 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1483 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1484 X86PGPAEUINT const uPml4e = pPml4e->u;
1485
1486 /* Allocate page directory pointer table if not present. */
1487 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1488 {
1489 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1490 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1491
1492 pgmPoolCacheUsed(pPool, pShwPage);
1493
1494 /* Update the entry if needed. */
1495 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1496 | (uPml4e & PGM_PML4_FLAGS);
1497 if (uPml4e == uPml4eNew)
1498 { /* likely */ }
1499 else
1500 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1501 }
1502 else
1503 {
1504 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1505
1506 RTGCPTR64 GCPml4;
1507 PGMPOOLKIND enmKind;
1508 if (fNestedPagingOrNoGstPaging)
1509 {
1510 /* AMD-V nested paging or real/protected mode without paging */
1511 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; /** @todo bogus calculation for PML5 */
1512 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1513 }
1514 else
1515 {
1516 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1517 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1518 }
1519
1520 /* Create a reference back to the PDPT by using the index in its shadow page. */
1521 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1522 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1523 &pShwPage);
1524 AssertRCReturn(rc, rc);
1525
1526 /* Hook it up. */
1527 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1528 | (uPml4e & PGM_PML4_FLAGS));
1529 }
1530 }
1531
1532 /*
1533 * PDPT.
1534 */
1535 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1536 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1537 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1538 X86PGPAEUINT const uPdpe = pPdpe->u;
1539
1540 /* Allocate page directory if not present. */
1541 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1542 {
1543 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1544 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1545
1546 pgmPoolCacheUsed(pPool, pShwPage);
1547
1548 /* Update the entry if needed. */
1549 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1550 | (uPdpe & PGM_PDPT_FLAGS);
1551 if (uPdpe == uPdpeNew)
1552 { /* likely */ }
1553 else
1554 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1555 }
1556 else
1557 {
1558 RTGCPTR64 GCPdPt;
1559 PGMPOOLKIND enmKind;
1560 if (fNestedPagingOrNoGstPaging)
1561 {
1562 /* AMD-V nested paging or real/protected mode without paging */
1563 GCPdPt = GCPtr & ~(RT_BIT_64(iPdPt << X86_PDPT_SHIFT) - 1);
1564 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1565 }
1566 else
1567 {
1568 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1569 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1570 }
1571
1572 /* Create a reference back to the PDPT by using the index in its shadow page. */
1573 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1574 pShwPage->idx, iPdPt, false /*fLockPage*/,
1575 &pShwPage);
1576 AssertRCReturn(rc, rc);
1577
1578 /* Hook it up. */
1579 ASMAtomicWriteU64(&pPdpe->u,
1580 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1581 }
1582
1583 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1584 return VINF_SUCCESS;
1585}
1586
1587
1588/**
1589 * Gets the SHADOW page directory pointer for the specified address (long mode).
1590 *
1591 * @returns VBox status code.
1592 * @param pVCpu The cross context virtual CPU structure.
1593 * @param GCPtr The address.
1594 * @param ppPml4e Receives the address of the page map level 4 entry.
1595 * @param ppPdpt Receives the address of the page directory pointer table.
1596 * @param ppPD Receives the address of the page directory.
1597 */
1598DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1599{
1600 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1601 PGM_LOCK_ASSERT_OWNER(pVM);
1602
1603 /*
1604 * PML4
1605 */
1606 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1607 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1608 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1609 if (ppPml4e)
1610 *ppPml4e = (PX86PML4E)pPml4e;
1611 X86PGPAEUINT const uPml4e = pPml4e->u;
1612 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1613 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1614 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1615
1616 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1617 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1618 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1619
1620 /*
1621 * PDPT
1622 */
1623 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1624 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1625 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1626 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1627 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1628
1629 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1630 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1631
1632 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1633 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1634 return VINF_SUCCESS;
1635}
1636
1637
1638/**
1639 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1640 * backing pages in case the PDPT or PML4 entry is missing.
1641 *
1642 * @returns VBox status code.
1643 * @param pVCpu The cross context virtual CPU structure.
1644 * @param GCPtr The address.
1645 * @param ppPdpt Receives address of pdpt
1646 * @param ppPD Receives address of page directory
1647 */
1648static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1649{
1650 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1651 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1652 int rc;
1653
1654 Assert(pVM->pgm.s.fNestedPaging);
1655 PGM_LOCK_ASSERT_OWNER(pVM);
1656
1657 /*
1658 * PML4 level.
1659 */
1660 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1661 Assert(pPml4);
1662
1663 /* Allocate page directory pointer table if not present. */
1664 PPGMPOOLPAGE pShwPage;
1665 {
1666 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1667 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1668 EPTPML4E Pml4e;
1669 Pml4e.u = pPml4e->u;
1670 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1671 {
1672 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1673 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1674 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1675 &pShwPage);
1676 AssertRCReturn(rc, rc);
1677
1678 /* Hook up the new PDPT now. */
1679 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1680 }
1681 else
1682 {
1683 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1684 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1685
1686 pgmPoolCacheUsed(pPool, pShwPage);
1687
1688 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1689 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1690 { }
1691 else
1692 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1693 }
1694 }
1695
1696 /*
1697 * PDPT level.
1698 */
1699 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1700 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1701 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1702
1703 if (ppPdpt)
1704 *ppPdpt = pPdpt;
1705
1706 /* Allocate page directory if not present. */
1707 EPTPDPTE Pdpe;
1708 Pdpe.u = pPdpe->u;
1709 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1710 {
1711 RTGCPTR64 const GCPdPt = GCPtr & ~(RT_BIT_64(EPT_PDPT_SHIFT) - 1);
1712 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1713 pShwPage->idx, iPdPt, false /*fLockPage*/,
1714 &pShwPage);
1715 AssertRCReturn(rc, rc);
1716
1717 /* Hook up the new PD now. */
1718 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1719 }
1720 else
1721 {
1722 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1723 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1724
1725 pgmPoolCacheUsed(pPool, pShwPage);
1726
1727 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1728 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1729 { }
1730 else
1731 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1732 }
1733
1734 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1735 return VINF_SUCCESS;
1736}
1737
1738
1739#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1740/**
1741 * Syncs the SHADOW nested-guest page directory pointer for the specified address.
1742 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1743 *
1744 * @returns VBox status code.
1745 * @param pVCpu The cross context virtual CPU structure.
1746 * @param GCPhysNested The nested-guest physical address.
1747 * @param ppPdpt Where to store the PDPT. Optional, can be NULL.
1748 * @param ppPD Where to store the PD. Optional, can be NULL.
1749 * @param pGstWalkAll The guest walk info.
1750 */
1751static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD,
1752 PPGMPTWALKGST pGstWalkAll)
1753{
1754 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1755 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1756 int rc;
1757
1758 PPGMPOOLPAGE pShwPage;
1759 Assert(pVM->pgm.s.fNestedPaging);
1760 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT);
1761 PGM_LOCK_ASSERT_OWNER(pVM);
1762
1763 /*
1764 * PML4 level.
1765 */
1766 {
1767 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1768 Assert(pPml4);
1769
1770 /* Allocate page directory pointer table if not present. */
1771 {
1772 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pml4e.u & pVCpu->pgm.s.fGstEptShadowedPml4eMask;
1773 const unsigned iPml4e = (GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1774 PEPTPML4E pPml4e = &pPml4->a[iPml4e];
1775
1776 if (!(pPml4e->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK)))
1777 {
1778 RTGCPHYS const GCPhysPdpt = pGstWalkAll->u.Ept.Pml4e.u & EPT_PML4E_PG_MASK;
1779 rc = pgmPoolAlloc(pVM, GCPhysPdpt, PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT, PGMPOOLACCESS_DONTCARE,
1780 PGM_A20_IS_ENABLED(pVCpu), pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4e, false /*fLockPage*/,
1781 &pShwPage);
1782 AssertRCReturn(rc, rc);
1783
1784 /* Hook up the new PDPT now. */
1785 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags);
1786 }
1787 else
1788 {
1789 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1790 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1791
1792 pgmPoolCacheUsed(pPool, pShwPage);
1793
1794 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1795 if (pPml4e->u != (pShwPage->Core.Key | fShwFlags))
1796 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags);
1797 }
1798 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
1799 Log7Func(("GstPml4e=%RX64 ShwPml4e=%RX64 iPml4e=%u\n", pGstWalkAll->u.Ept.Pml4e.u, pPml4e->u, iPml4e));
1800 }
1801 }
1802
1803 /*
1804 * PDPT level.
1805 */
1806 {
1807 AssertReturn(!(pGstWalkAll->u.Ept.Pdpte.u & EPT_E_LEAF), VERR_NOT_SUPPORTED); /* shadowing 1GB pages not supported yet. */
1808
1809 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1810 if (ppPdpt)
1811 *ppPdpt = pPdpt;
1812
1813 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pdpte.u & pVCpu->pgm.s.fGstEptShadowedPdpteMask;
1814 const unsigned iPdPte = (GCPhysNested >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1815 PEPTPDPTE pPdpte = &pPdpt->a[iPdPte];
1816
1817 if (!(pPdpte->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK)))
1818 {
1819 RTGCPHYS const GCPhysPd = pGstWalkAll->u.Ept.Pdpte.u & EPT_PDPTE_PG_MASK;
1820 rc = pgmPoolAlloc(pVM, GCPhysPd, PGMPOOLKIND_EPT_PD_FOR_EPT_PD, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1821 pShwPage->idx, iPdPte, false /*fLockPage*/, &pShwPage);
1822 AssertRCReturn(rc, rc);
1823
1824 /* Hook up the new PD now. */
1825 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags);
1826 }
1827 else
1828 {
1829 pShwPage = pgmPoolGetPage(pPool, pPdpte->u & EPT_PDPTE_PG_MASK);
1830 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1831
1832 pgmPoolCacheUsed(pPool, pShwPage);
1833
1834 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1835 if (pPdpte->u != (pShwPage->Core.Key | fShwFlags))
1836 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags);
1837 }
1838 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
1839 Log7Func(("GstPdpte=%RX64 ShwPdpte=%RX64 iPdPte=%u \n", pGstWalkAll->u.Ept.Pdpte.u, pPdpte->u, iPdPte));
1840
1841 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1842 }
1843
1844 return VINF_SUCCESS;
1845}
1846#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1847
1848
1849#ifdef IN_RING0
1850/**
1851 * Synchronizes a range of nested page table entries.
1852 *
1853 * The caller must own the PGM lock.
1854 *
1855 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1856 * @param GCPhys Where to start.
1857 * @param cPages How many pages which entries should be synced.
1858 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1859 * host paging mode for AMD-V).
1860 */
1861int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1862{
1863 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1864
1865/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1866 int rc;
1867 switch (enmShwPagingMode)
1868 {
1869 case PGMMODE_32_BIT:
1870 {
1871 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1872 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1873 break;
1874 }
1875
1876 case PGMMODE_PAE:
1877 case PGMMODE_PAE_NX:
1878 {
1879 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1880 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1881 break;
1882 }
1883
1884 case PGMMODE_AMD64:
1885 case PGMMODE_AMD64_NX:
1886 {
1887 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1888 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1889 break;
1890 }
1891
1892 case PGMMODE_EPT:
1893 {
1894 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1895 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1896 break;
1897 }
1898
1899 default:
1900 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1901 }
1902 return rc;
1903}
1904#endif /* IN_RING0 */
1905
1906
1907/**
1908 * Gets effective Guest OS page information.
1909 *
1910 * When GCPtr is in a big page, the function will return as if it was a normal
1911 * 4KB page. If the need for distinguishing between big and normal page becomes
1912 * necessary at a later point, a PGMGstGetPage() will be created for that
1913 * purpose.
1914 *
1915 * @returns VBox status code.
1916 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1917 * @param GCPtr Guest Context virtual address of the page.
1918 * @param pWalk Where to store the page walk information.
1919 */
1920VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
1921{
1922 VMCPU_ASSERT_EMT(pVCpu);
1923 Assert(pWalk);
1924 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1925 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1926 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1927 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
1928}
1929
1930
1931/**
1932 * Maps the guest CR3.
1933 *
1934 * @returns VBox status code.
1935 * @param pVCpu The cross context virtual CPU structure.
1936 * @param GCPhysCr3 The guest CR3 value.
1937 * @param pHCPtrGuestCr3 Where to store the mapped memory.
1938 */
1939DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3)
1940{
1941 /** @todo this needs some reworking wrt. locking? */
1942 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1943 PGM_LOCK_VOID(pVM);
1944 PPGMPAGE pPageCr3 = pgmPhysGetPage(pVM, GCPhysCr3);
1945 AssertReturnStmt(pPageCr3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
1946
1947 RTHCPTR HCPtrGuestCr3;
1948 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCr3, GCPhysCr3, (void **)&HCPtrGuestCr3);
1949 PGM_UNLOCK(pVM);
1950
1951 *pHCPtrGuestCr3 = HCPtrGuestCr3;
1952 return rc;
1953}
1954
1955
1956/**
1957 * Unmaps the guest CR3.
1958 *
1959 * @returns VBox status code.
1960 * @param pVCpu The cross context virtual CPU structure.
1961 */
1962DECLINLINE(int) pgmGstUnmapCr3(PVMCPUCC pVCpu)
1963{
1964 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1965 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1966 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
1967 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
1968}
1969
1970
1971/**
1972 * Performs a guest page table walk.
1973 *
1974 * The guest should be in paged protect mode or long mode when making a call to
1975 * this function.
1976 *
1977 * @returns VBox status code.
1978 * @retval VINF_SUCCESS on success.
1979 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1980 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1981 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1982 *
1983 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1984 * @param GCPtr The guest virtual address to walk by.
1985 * @param pWalk Where to return the walk result. This is valid for some
1986 * error codes as well.
1987 * @param pGstWalk The guest mode specific page walk information.
1988 */
1989int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1990{
1991 VMCPU_ASSERT_EMT(pVCpu);
1992 switch (pVCpu->pgm.s.enmGuestMode)
1993 {
1994 case PGMMODE_32_BIT:
1995 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1996 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy);
1997
1998 case PGMMODE_PAE:
1999 case PGMMODE_PAE_NX:
2000 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE;
2001 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae);
2002
2003 case PGMMODE_AMD64:
2004 case PGMMODE_AMD64_NX:
2005 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
2006 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64);
2007
2008 case PGMMODE_REAL:
2009 case PGMMODE_PROTECTED:
2010 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2011 return VERR_PGM_NOT_USED_IN_MODE;
2012
2013 case PGMMODE_EPT:
2014 case PGMMODE_NESTED_32BIT:
2015 case PGMMODE_NESTED_PAE:
2016 case PGMMODE_NESTED_AMD64:
2017 default:
2018 AssertFailed();
2019 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2020 return VERR_PGM_NOT_USED_IN_MODE;
2021 }
2022}
2023
2024#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2025
2026/**
2027 * Performs a guest second-level address translation (SLAT).
2028 *
2029 * The guest paging mode must be 32-bit, PAE or AMD64 when making a call to this
2030 * function.
2031 *
2032 * @returns VBox status code.
2033 * @retval VINF_SUCCESS on success.
2034 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
2035 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2036 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2037 *
2038 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2039 * @param GCPhysNested The nested-guest physical address being translated
2040 * (input).
2041 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is
2042 * valid. This indicates the SLAT is caused when
2043 * translating a nested-guest linear address.
2044 * @param GCPtrNested The nested-guest virtual address that initiated the
2045 * SLAT. If none, pass NIL_RTGCPTR.
2046 * @param pWalk Where to return the walk result. This is valid for
2047 * some error codes as well.
2048 * @param pGstWalk The second-level paging-mode specific walk
2049 * information.
2050 */
2051static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
2052 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
2053{
2054 /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
2055 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
2056 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
2057 switch (pVCpu->pgm.s.enmGuestSlatMode)
2058 {
2059 case PGMSLAT_EPT:
2060 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
2061 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
2062
2063 default:
2064 AssertFailed();
2065 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2066 return VERR_PGM_NOT_USED_IN_MODE;
2067 }
2068}
2069
2070
2071/**
2072 * Performs a guest second-level address translation (SLAT) for a nested-guest
2073 * physical address.
2074 *
2075 * This version requires the SLAT mode to be provided by the caller because we could
2076 * be in the process of switching paging modes (MOV CRX) and cannot presume control
2077 * register values.
2078 *
2079 * @returns VBox status code.
2080 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2081 * @param enmSlatMode The second-level paging mode to use.
2082 * @param GCPhysNested The nested-guest physical address to translate.
2083 * @param pWalk Where to store the walk result.
2084 * @param pGstWalk Where to store the second-level paging-mode specific
2085 * walk information.
2086 */
2087static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
2088 PPGMPTWALKGST pGstWalk)
2089{
2090 AssertPtr(pWalk);
2091 AssertPtr(pGstWalk);
2092 switch (enmSlatMode)
2093 {
2094 case PGMSLAT_EPT:
2095 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
2096 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, 0 /* GCPtrNested */,
2097 pWalk, &pGstWalk->u.Ept);
2098
2099 default:
2100 AssertFailed();
2101 return VERR_PGM_NOT_USED_IN_MODE;
2102 }
2103}
2104
2105#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
2106
2107/**
2108 * Tries to continue the previous walk.
2109 *
2110 * @note Requires the caller to hold the PGM lock from the first
2111 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
2112 * we cannot use the pointers.
2113 *
2114 * @returns VBox status code.
2115 * @retval VINF_SUCCESS on success.
2116 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
2117 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2118 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2119 *
2120 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2121 * @param GCPtr The guest virtual address to walk by.
2122 * @param pWalk Pointer to the previous walk result and where to return
2123 * the result of this walk. This is valid for some error
2124 * codes as well.
2125 * @param pGstWalk The guest-mode specific walk information.
2126 */
2127int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
2128{
2129 /*
2130 * We can only handle successfully walks.
2131 * We also limit ourselves to the next page.
2132 */
2133 if ( pWalk->fSucceeded
2134 && GCPtr - pWalk->GCPtr == GUEST_PAGE_SIZE)
2135 {
2136 Assert(pWalk->uLevel == 0);
2137 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
2138 {
2139 /*
2140 * AMD64
2141 */
2142 if (!pWalk->fGigantPage && !pWalk->fBigPage)
2143 {
2144 /*
2145 * We fall back to full walk if the PDE table changes, if any
2146 * reserved bits are set, or if the effective page access changes.
2147 */
2148 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
2149 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
2150 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
2151 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
2152
2153 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT))
2154 {
2155 if (pGstWalk->u.Amd64.pPte)
2156 {
2157 X86PTEPAE Pte;
2158 Pte.u = pGstWalk->u.Amd64.pPte[1].u;
2159 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2160 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2161 {
2162 pWalk->GCPtr = GCPtr;
2163 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2164 pGstWalk->u.Amd64.Pte.u = Pte.u;
2165 pGstWalk->u.Amd64.pPte++;
2166 return VINF_SUCCESS;
2167 }
2168 }
2169 }
2170 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT))
2171 {
2172 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
2173 if (pGstWalk->u.Amd64.pPde)
2174 {
2175 X86PDEPAE Pde;
2176 Pde.u = pGstWalk->u.Amd64.pPde[1].u;
2177 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame)
2178 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
2179 {
2180 /* Get the new PTE and check out the first entry. */
2181 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
2182 &pGstWalk->u.Amd64.pPt);
2183 if (RT_SUCCESS(rc))
2184 {
2185 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0];
2186 X86PTEPAE Pte;
2187 Pte.u = pGstWalk->u.Amd64.pPte->u;
2188 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2189 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2190 {
2191 pWalk->GCPtr = GCPtr;
2192 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2193 pGstWalk->u.Amd64.Pte.u = Pte.u;
2194 pGstWalk->u.Amd64.Pde.u = Pde.u;
2195 pGstWalk->u.Amd64.pPde++;
2196 return VINF_SUCCESS;
2197 }
2198 }
2199 }
2200 }
2201 }
2202 }
2203 else if (!pWalk->fGigantPage)
2204 {
2205 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK))
2206 {
2207 pWalk->GCPtr = GCPtr;
2208 pWalk->GCPhys += GUEST_PAGE_SIZE;
2209 return VINF_SUCCESS;
2210 }
2211 }
2212 else
2213 {
2214 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK))
2215 {
2216 pWalk->GCPtr = GCPtr;
2217 pWalk->GCPhys += GUEST_PAGE_SIZE;
2218 return VINF_SUCCESS;
2219 }
2220 }
2221 }
2222 }
2223 /* Case we don't handle. Do full walk. */
2224 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk);
2225}
2226
2227
2228/**
2229 * Modify page flags for a range of pages in the guest's tables
2230 *
2231 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2232 *
2233 * @returns VBox status code.
2234 * @param pVCpu The cross context virtual CPU structure.
2235 * @param GCPtr Virtual address of the first page in the range.
2236 * @param cb Size (in bytes) of the range to apply the modification to.
2237 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2238 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2239 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2240 */
2241VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2242{
2243 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2244 VMCPU_ASSERT_EMT(pVCpu);
2245
2246 /*
2247 * Validate input.
2248 */
2249 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2250 Assert(cb);
2251
2252 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2253
2254 /*
2255 * Adjust input.
2256 */
2257 cb += GCPtr & GUEST_PAGE_OFFSET_MASK;
2258 cb = RT_ALIGN_Z(cb, GUEST_PAGE_SIZE);
2259 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
2260
2261 /*
2262 * Call worker.
2263 */
2264 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2265 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2266 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2267 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2268
2269 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2270 return rc;
2271}
2272
2273
2274/**
2275 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2276 *
2277 * @returns @c true if the PDPE is valid, @c false otherwise.
2278 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2279 * @param paPaePdpes The PAE PDPEs to validate.
2280 *
2281 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2282 */
2283VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2284{
2285 Assert(paPaePdpes);
2286 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2287 {
2288 X86PDPE const PaePdpe = paPaePdpes[i];
2289 if ( !(PaePdpe.u & X86_PDPE_P)
2290 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2291 { /* likely */ }
2292 else
2293 return false;
2294 }
2295 return true;
2296}
2297
2298
2299/**
2300 * Performs the lazy mapping of the 32-bit guest PD.
2301 *
2302 * @returns VBox status code.
2303 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2304 * @param ppPd Where to return the pointer to the mapping. This is
2305 * always set.
2306 */
2307int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2308{
2309 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2310 PGM_LOCK_VOID(pVM);
2311
2312 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2313
2314 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2315 PPGMPAGE pPage;
2316 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2317 if (RT_SUCCESS(rc))
2318 {
2319 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2320 if (RT_SUCCESS(rc))
2321 {
2322# ifdef IN_RING3
2323 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2324 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2325# else
2326 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2327 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2328# endif
2329 PGM_UNLOCK(pVM);
2330 return VINF_SUCCESS;
2331 }
2332 AssertRC(rc);
2333 }
2334 PGM_UNLOCK(pVM);
2335
2336 *ppPd = NULL;
2337 return rc;
2338}
2339
2340
2341/**
2342 * Performs the lazy mapping of the PAE guest PDPT.
2343 *
2344 * @returns VBox status code.
2345 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2346 * @param ppPdpt Where to return the pointer to the mapping. This is
2347 * always set.
2348 */
2349int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2350{
2351 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2352 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2353 PGM_LOCK_VOID(pVM);
2354
2355 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2356 PPGMPAGE pPage;
2357 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2358 if (RT_SUCCESS(rc))
2359 {
2360 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2361 if (RT_SUCCESS(rc))
2362 {
2363# ifdef IN_RING3
2364 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2365 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2366# else
2367 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2368 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2369# endif
2370 PGM_UNLOCK(pVM);
2371 return VINF_SUCCESS;
2372 }
2373 AssertRC(rc);
2374 }
2375
2376 PGM_UNLOCK(pVM);
2377 *ppPdpt = NULL;
2378 return rc;
2379}
2380
2381
2382/**
2383 * Performs the lazy mapping / updating of a PAE guest PD.
2384 *
2385 * @returns Pointer to the mapping.
2386 * @returns VBox status code.
2387 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2388 * @param iPdpt Which PD entry to map (0..3).
2389 * @param ppPd Where to return the pointer to the mapping. This is
2390 * always set.
2391 */
2392int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2393{
2394 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2395 PGM_LOCK_VOID(pVM);
2396
2397 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2398 Assert(pGuestPDPT);
2399 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2400 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2401 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2402
2403 PPGMPAGE pPage;
2404 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2405 if (RT_SUCCESS(rc))
2406 {
2407 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2408 AssertRC(rc);
2409 if (RT_SUCCESS(rc))
2410 {
2411# ifdef IN_RING3
2412 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2413 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2414# else
2415 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2416 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2417# endif
2418 if (fChanged)
2419 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2420 PGM_UNLOCK(pVM);
2421 return VINF_SUCCESS;
2422 }
2423 }
2424
2425 /* Invalid page or some failure, invalidate the entry. */
2426 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2427 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2428 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2429
2430 PGM_UNLOCK(pVM);
2431 return rc;
2432}
2433
2434
2435/**
2436 * Performs the lazy mapping of the 32-bit guest PD.
2437 *
2438 * @returns VBox status code.
2439 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2440 * @param ppPml4 Where to return the pointer to the mapping. This will
2441 * always be set.
2442 */
2443int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2444{
2445 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2446 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2447 PGM_LOCK_VOID(pVM);
2448
2449 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2450 PPGMPAGE pPage;
2451 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2452 if (RT_SUCCESS(rc))
2453 {
2454 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2455 if (RT_SUCCESS(rc))
2456 {
2457# ifdef IN_RING3
2458 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2459 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2460# else
2461 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2462 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2463# endif
2464 PGM_UNLOCK(pVM);
2465 return VINF_SUCCESS;
2466 }
2467 }
2468
2469 PGM_UNLOCK(pVM);
2470 *ppPml4 = NULL;
2471 return rc;
2472}
2473
2474
2475#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2476 /**
2477 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
2478 *
2479 * @returns VBox status code.
2480 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2481 * @param ppEptPml4 Where to return the pointer to the mapping. This will
2482 * always be set.
2483 */
2484int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
2485{
2486 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
2487 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2488 PGM_LOCK_VOID(pVM);
2489
2490 RTGCPHYS const GCPhysEpt = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
2491 PPGMPAGE pPage;
2492 int rc = pgmPhysGetPageEx(pVM, GCPhysEpt, &pPage);
2493 if (RT_SUCCESS(rc))
2494 {
2495 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysEpt, (void **)ppEptPml4);
2496 if (RT_SUCCESS(rc))
2497 {
2498# ifdef IN_RING3
2499 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
2500 pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
2501# else
2502 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
2503 pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
2504# endif
2505 PGM_UNLOCK(pVM);
2506 return VINF_SUCCESS;
2507 }
2508 }
2509
2510 PGM_UNLOCK(pVM);
2511 *ppEptPml4 = NULL;
2512 return rc;
2513}
2514#endif
2515
2516
2517/**
2518 * Gets the current CR3 register value for the shadow memory context.
2519 * @returns CR3 value.
2520 * @param pVCpu The cross context virtual CPU structure.
2521 */
2522VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2523{
2524 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2525 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2526 return pPoolPage->Core.Key;
2527}
2528
2529
2530/**
2531 * Forces lazy remapping of the guest's PAE page-directory structures.
2532 *
2533 * @param pVCpu The cross context virtual CPU structure.
2534 */
2535static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2536{
2537 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2538 {
2539 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2540 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2541 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2542 }
2543}
2544
2545
2546#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2547/**
2548 * Performs second-level address translation for the given CR3 and updates the
2549 * nested-guest CR3 when successful.
2550 *
2551 * @returns VBox status code.
2552 * @param pVCpu The cross context virtual CPU structure.
2553 * @param uCr3 The masked nested-guest CR3 value.
2554 * @param pGCPhysCR3 Where to store the translated CR3.
2555 *
2556 * @warning This updates PGMCPU::GCPhysNstGstCR3 when the translation succeeds. Be
2557 * mindful of this in code that's hyper sensitive to the order of
2558 * operations.
2559 */
2560static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3)
2561{
2562# if 0
2563 if (uCr3 != pVCpu->pgm.s.GCPhysNstGstCR3)
2564# endif
2565 {
2566 PGMPTWALK Walk;
2567 PGMPTWALKGST GstWalk;
2568 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, uCr3, &Walk, &GstWalk);
2569 if (RT_SUCCESS(rc))
2570 {
2571 /* Update nested-guest CR3. */
2572 pVCpu->pgm.s.GCPhysNstGstCR3 = uCr3;
2573
2574 /* Pass back the translated result. */
2575 *pGCPhysCr3 = Walk.GCPhys;
2576 return VINF_SUCCESS;
2577 }
2578
2579 /* Translation failed. */
2580 *pGCPhysCr3 = NIL_RTGCPHYS;
2581 return rc;
2582 }
2583
2584# if 0
2585 /*
2586 * If the nested-guest CR3 has not changed, then the previously
2587 * translated CR3 result (i.e. GCPhysCR3) is passed back.
2588 */
2589 *pGCPhysCr3 = pVCpu->pgm.s.GCPhysCR3;
2590 return VINF_SUCCESS;
2591# endif
2592}
2593#endif
2594
2595
2596/**
2597 * Performs and schedules necessary updates following a CR3 load or reload.
2598 *
2599 * This will normally involve mapping the guest PD or nPDPT
2600 *
2601 * @returns VBox status code.
2602 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2603 * safely be ignored and overridden since the FF will be set too then.
2604 * @param pVCpu The cross context virtual CPU structure.
2605 * @param cr3 The new cr3.
2606 * @param fGlobal Indicates whether this is a global flush or not.
2607 */
2608VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal)
2609{
2610 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2611 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2612
2613 VMCPU_ASSERT_EMT(pVCpu);
2614
2615 /*
2616 * Always flag the necessary updates; necessary for hardware acceleration
2617 */
2618 /** @todo optimize this, it shouldn't always be necessary. */
2619 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2620 if (fGlobal)
2621 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2622
2623 /*
2624 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2625 */
2626 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2627 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2628#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2629 if ( pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT
2630 && PGMMODE_WITH_PAGING(pVCpu->pgm.s.enmGuestMode))
2631 {
2632 RTGCPHYS GCPhysOut;
2633 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2634 if (RT_SUCCESS(rc))
2635 GCPhysCR3 = GCPhysOut;
2636 else
2637 {
2638 /* CR3 SLAT translation failed but we try to pretend it
2639 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2640 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2641 int const rc2 = pgmGstUnmapCr3(pVCpu);
2642 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2643 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2644 return rc2;
2645 }
2646 }
2647#endif
2648
2649 LogFlowFunc(("cr3=%RX64 old=%RX64 fGlobal=%d\n", cr3, GCPhysOldCR3, fGlobal));
2650 int rc = VINF_SUCCESS;
2651 if (GCPhysOldCR3 != GCPhysCR3)
2652 {
2653 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2654 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2655 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2656
2657 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2658 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2659 if (RT_LIKELY(rc == VINF_SUCCESS))
2660 { }
2661 else
2662 {
2663 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2664 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2665 pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped) = false;
2666 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
2667 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2668 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2669 }
2670
2671 if (fGlobal)
2672 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2673 else
2674 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2675 }
2676 else
2677 {
2678#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2679 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2680 if (pPool->cDirtyPages)
2681 {
2682 PGM_LOCK_VOID(pVM);
2683 pgmPoolResetDirtyPages(pVM);
2684 PGM_UNLOCK(pVM);
2685 }
2686#endif
2687 if (fGlobal)
2688 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2689 else
2690 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2691
2692 /*
2693 * Flush PAE PDPTEs.
2694 */
2695 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2696 pgmGstFlushPaePdpes(pVCpu);
2697 }
2698
2699 IEMTlbInvalidateAll(pVCpu);
2700 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2701 return rc;
2702}
2703
2704
2705/**
2706 * Performs and schedules necessary updates following a CR3 load or reload when
2707 * using nested or extended paging.
2708 *
2709 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2710 * TLB and triggering a SyncCR3.
2711 *
2712 * This will normally involve mapping the guest PD or nPDPT
2713 *
2714 * @returns VBox status code.
2715 * @retval VINF_SUCCESS.
2716 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2717 * paging modes). This can safely be ignored and overridden since the
2718 * FF will be set too then.
2719 * @param pVCpu The cross context virtual CPU structure.
2720 * @param cr3 The new CR3.
2721 */
2722VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3)
2723{
2724 VMCPU_ASSERT_EMT(pVCpu);
2725
2726 /* We assume we're only called in nested paging mode. */
2727 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2728
2729 /*
2730 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2731 */
2732 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2733 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2734#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2735 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2736 {
2737 RTGCPHYS GCPhysOut;
2738 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2739 if (RT_SUCCESS(rc))
2740 GCPhysCR3 = GCPhysOut;
2741 else
2742 {
2743 /* CR3 SLAT translation failed but we try to pretend it
2744 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2745 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2746 int const rc2 = pgmGstUnmapCr3(pVCpu);
2747 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2748 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2749 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2750 return rc2;
2751 }
2752 }
2753#endif
2754
2755 LogFlowFunc(("cr3=%RX64 old=%RX64\n", cr3, GCPhysOldCR3));
2756 int rc = VINF_SUCCESS;
2757 if (GCPhysOldCR3 != GCPhysCR3)
2758 {
2759 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2760 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2761 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2762
2763 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2764 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2765
2766 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2767 }
2768 /*
2769 * Flush PAE PDPTEs.
2770 */
2771 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2772 pgmGstFlushPaePdpes(pVCpu);
2773
2774 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2775 return rc;
2776}
2777
2778
2779/**
2780 * Synchronize the paging structures.
2781 *
2782 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2783 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2784 * in several places, most importantly whenever the CR3 is loaded.
2785 *
2786 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2787 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2788 * the VMM into guest context.
2789 * @param pVCpu The cross context virtual CPU structure.
2790 * @param cr0 Guest context CR0 register
2791 * @param cr3 Guest context CR3 register
2792 * @param cr4 Guest context CR4 register
2793 * @param fGlobal Including global page directories or not
2794 */
2795VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2796{
2797 int rc;
2798
2799 VMCPU_ASSERT_EMT(pVCpu);
2800
2801 /*
2802 * The pool may have pending stuff and even require a return to ring-3 to
2803 * clear the whole thing.
2804 */
2805 rc = pgmPoolSyncCR3(pVCpu);
2806 if (rc != VINF_SUCCESS)
2807 return rc;
2808
2809 /*
2810 * We might be called when we shouldn't.
2811 *
2812 * The mode switching will ensure that the PD is resynced after every mode
2813 * switch. So, if we find ourselves here when in protected or real mode
2814 * we can safely clear the FF and return immediately.
2815 */
2816 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2817 {
2818 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2819 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2820 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2821 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2822 return VINF_SUCCESS;
2823 }
2824
2825 /* If global pages are not supported, then all flushes are global. */
2826 if (!(cr4 & X86_CR4_PGE))
2827 fGlobal = true;
2828 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2829 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2830
2831 /*
2832 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2833 * This should be done before SyncCR3.
2834 */
2835 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2836 {
2837 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2838
2839 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2840 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2841#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2842 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2843 {
2844 RTGCPHYS GCPhysOut;
2845 int rc2 = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2846 if (RT_SUCCESS(rc2))
2847 GCPhysCR3 = GCPhysOut;
2848 else
2849 {
2850 /* CR3 SLAT translation failed but we try to pretend it
2851 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2852 AssertMsgFailed(("Failed to translate CR3 %#RX64. rc=%Rrc\n", cr3, rc2));
2853 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2854 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2855 return rc2;
2856 }
2857 }
2858#endif
2859 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
2860 if (GCPhysOldCR3 != GCPhysCR3)
2861 {
2862 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2863 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2864 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2865 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2866 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2867 }
2868
2869 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2870 if ( rc == VINF_PGM_SYNC_CR3
2871 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2872 {
2873 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2874#ifdef IN_RING3
2875 rc = pgmPoolSyncCR3(pVCpu);
2876#else
2877 if (rc == VINF_PGM_SYNC_CR3)
2878 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2879 return VINF_PGM_SYNC_CR3;
2880#endif
2881 }
2882 AssertRCReturn(rc, rc);
2883 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2884 }
2885
2886 /*
2887 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2888 */
2889 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2890
2891 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2892 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2893 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2894 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2895
2896 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2897 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2898 if (rc == VINF_SUCCESS)
2899 {
2900 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2901 {
2902 /* Go back to ring 3 if a pgm pool sync is again pending. */
2903 return VINF_PGM_SYNC_CR3;
2904 }
2905
2906 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2907 {
2908 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2909 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2910 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2911 }
2912 }
2913
2914 /*
2915 * Now flush the CR3 (guest context).
2916 */
2917 if (rc == VINF_SUCCESS)
2918 PGM_INVL_VCPU_TLBS(pVCpu);
2919 return rc;
2920}
2921
2922
2923/**
2924 * Maps all the PAE PDPE entries.
2925 *
2926 * @returns VBox status code.
2927 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2928 * @param paPaePdpes The new PAE PDPE values.
2929 *
2930 * @remarks This function may be invoked during the process of changing the guest
2931 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
2932 * reflect PAE paging just yet.
2933 */
2934VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2935{
2936 Assert(paPaePdpes);
2937 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2938 {
2939 X86PDPE const PaePdpe = paPaePdpes[i];
2940
2941 /*
2942 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
2943 * are deferred.[1] Also, different situations require different handling of invalid
2944 * PDPE entries. Here we assume the caller has already validated or doesn't require
2945 * validation of the PDPEs.
2946 *
2947 * In the case of nested EPT (i.e. for nested-guests), the PAE PDPEs have been
2948 * validated by the VMX transition.
2949 *
2950 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
2951 */
2952 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
2953 {
2954 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2955 RTHCPTR HCPtr;
2956 RTGCPHYS const GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
2957
2958 PGM_LOCK_VOID(pVM);
2959 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2960 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
2961 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
2962 PGM_UNLOCK(pVM);
2963 if (RT_SUCCESS(rc))
2964 {
2965#ifdef IN_RING3
2966 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
2967 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
2968#else
2969 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
2970 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
2971#endif
2972 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
2973 continue;
2974 }
2975 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
2976 }
2977 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2978 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2979 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2980 }
2981
2982 /*
2983 * Update CPUM with the PAE PDPEs.
2984 */
2985 CPUMSetGuestPaePdpes(pVCpu, paPaePdpes);
2986 return VINF_SUCCESS;
2987}
2988
2989
2990/**
2991 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
2992 *
2993 * @returns VBox status code.
2994 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2995 * @param cr3 The guest CR3 value.
2996 *
2997 * @remarks This function may be invoked during the process of changing the guest
2998 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
2999 * PAE paging just yet.
3000 */
3001VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
3002{
3003 /*
3004 * Read the page-directory-pointer table (PDPT) at CR3.
3005 */
3006 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
3007 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
3008
3009#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3010 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
3011 {
3012 RTGCPHYS GCPhysOut;
3013 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
3014 if (RT_SUCCESS(rc))
3015 GCPhysCR3 = GCPhysOut;
3016 else
3017 {
3018 AssertMsgFailed(("Failed to load CR3 at %#RX64. rc=%Rrc\n", GCPhysCR3, rc));
3019 return rc;
3020 }
3021 }
3022#endif
3023
3024 RTHCPTR HCPtrGuestCr3;
3025 int rc = pgmGstMapCr3(pVCpu, GCPhysCR3, &HCPtrGuestCr3);
3026 if (RT_SUCCESS(rc))
3027 {
3028 /*
3029 * Validate the page-directory-pointer table entries (PDPE).
3030 */
3031 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
3032 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
3033 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
3034 {
3035 /*
3036 * Map the PDPT.
3037 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
3038 * that PGMFlushTLB will be called soon and only a change to CR3 then
3039 * will cause the shadow page tables to be updated.
3040 */
3041#ifdef IN_RING3
3042 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
3043 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
3044#else
3045 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
3046 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
3047#endif
3048
3049 /*
3050 * Map the PDPEs and update CPUM.
3051 */
3052 rc = PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
3053 if (RT_SUCCESS(rc))
3054 {
3055#ifdef IN_RING3
3056 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = true;
3057 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = false;
3058#else
3059 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = false;
3060 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = true;
3061#endif
3062 pVCpu->pgm.s.GCPhysPaeCR3 = GCPhysCR3;
3063 }
3064 }
3065 else
3066 rc = VERR_PGM_PAE_PDPE_RSVD;
3067 }
3068 return rc;
3069}
3070
3071
3072/**
3073 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
3074 *
3075 * @returns VBox status code, with the following informational code for
3076 * VM scheduling.
3077 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
3078 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
3079 *
3080 * @param pVCpu The cross context virtual CPU structure.
3081 * @param cr0 The new cr0.
3082 * @param cr4 The new cr4.
3083 * @param efer The new extended feature enable register.
3084 * @param fForce Whether to force a mode change.
3085 */
3086VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce)
3087{
3088 VMCPU_ASSERT_EMT(pVCpu);
3089
3090 /*
3091 * Calc the new guest mode.
3092 *
3093 * Note! We check PG before PE and without requiring PE because of the
3094 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
3095 */
3096 PGMMODE enmGuestMode;
3097 if (cr0 & X86_CR0_PG)
3098 {
3099 if (!(cr4 & X86_CR4_PAE))
3100 {
3101 bool const fPse = !!(cr4 & X86_CR4_PSE);
3102 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
3103 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
3104 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
3105 enmGuestMode = PGMMODE_32_BIT;
3106 }
3107 else if (!(efer & MSR_K6_EFER_LME))
3108 {
3109 if (!(efer & MSR_K6_EFER_NXE))
3110 enmGuestMode = PGMMODE_PAE;
3111 else
3112 enmGuestMode = PGMMODE_PAE_NX;
3113 }
3114 else
3115 {
3116 if (!(efer & MSR_K6_EFER_NXE))
3117 enmGuestMode = PGMMODE_AMD64;
3118 else
3119 enmGuestMode = PGMMODE_AMD64_NX;
3120 }
3121 }
3122 else if (!(cr0 & X86_CR0_PE))
3123 enmGuestMode = PGMMODE_REAL;
3124 else
3125 enmGuestMode = PGMMODE_PROTECTED;
3126
3127 /*
3128 * Did it change?
3129 */
3130 if ( !fForce
3131 && pVCpu->pgm.s.enmGuestMode == enmGuestMode)
3132 return VINF_SUCCESS;
3133
3134 /* Flush the TLB */
3135 PGM_INVL_VCPU_TLBS(pVCpu);
3136 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode, fForce);
3137}
3138
3139
3140/**
3141 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
3142 *
3143 * @returns PGM_TYPE_*.
3144 * @param pgmMode The mode value to convert.
3145 */
3146DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
3147{
3148 switch (pgmMode)
3149 {
3150 case PGMMODE_REAL: return PGM_TYPE_REAL;
3151 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
3152 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
3153 case PGMMODE_PAE:
3154 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
3155 case PGMMODE_AMD64:
3156 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
3157 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
3158 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
3159 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
3160 case PGMMODE_EPT: return PGM_TYPE_EPT;
3161 case PGMMODE_NONE: return PGM_TYPE_NONE;
3162 default:
3163 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
3164 }
3165}
3166
3167
3168/**
3169 * Calculates the shadow paging mode.
3170 *
3171 * @returns The shadow paging mode.
3172 * @param pVM The cross context VM structure.
3173 * @param enmGuestMode The guest mode.
3174 * @param enmHostMode The host mode.
3175 * @param enmShadowMode The current shadow mode.
3176 */
3177static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
3178{
3179 switch (enmGuestMode)
3180 {
3181 case PGMMODE_REAL:
3182 case PGMMODE_PROTECTED:
3183 switch (enmHostMode)
3184 {
3185 case SUPPAGINGMODE_32_BIT:
3186 case SUPPAGINGMODE_32_BIT_GLOBAL:
3187 enmShadowMode = PGMMODE_32_BIT;
3188 break;
3189
3190 case SUPPAGINGMODE_PAE:
3191 case SUPPAGINGMODE_PAE_NX:
3192 case SUPPAGINGMODE_PAE_GLOBAL:
3193 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3194 enmShadowMode = PGMMODE_PAE;
3195 break;
3196
3197 case SUPPAGINGMODE_AMD64:
3198 case SUPPAGINGMODE_AMD64_GLOBAL:
3199 case SUPPAGINGMODE_AMD64_NX:
3200 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3201 enmShadowMode = PGMMODE_PAE;
3202 break;
3203
3204 default:
3205 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3206 }
3207 break;
3208
3209 case PGMMODE_32_BIT:
3210 switch (enmHostMode)
3211 {
3212 case SUPPAGINGMODE_32_BIT:
3213 case SUPPAGINGMODE_32_BIT_GLOBAL:
3214 enmShadowMode = PGMMODE_32_BIT;
3215 break;
3216
3217 case SUPPAGINGMODE_PAE:
3218 case SUPPAGINGMODE_PAE_NX:
3219 case SUPPAGINGMODE_PAE_GLOBAL:
3220 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3221 enmShadowMode = PGMMODE_PAE;
3222 break;
3223
3224 case SUPPAGINGMODE_AMD64:
3225 case SUPPAGINGMODE_AMD64_GLOBAL:
3226 case SUPPAGINGMODE_AMD64_NX:
3227 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3228 enmShadowMode = PGMMODE_PAE;
3229 break;
3230
3231 default:
3232 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3233 }
3234 break;
3235
3236 case PGMMODE_PAE:
3237 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3238 switch (enmHostMode)
3239 {
3240 case SUPPAGINGMODE_32_BIT:
3241 case SUPPAGINGMODE_32_BIT_GLOBAL:
3242 enmShadowMode = PGMMODE_PAE;
3243 break;
3244
3245 case SUPPAGINGMODE_PAE:
3246 case SUPPAGINGMODE_PAE_NX:
3247 case SUPPAGINGMODE_PAE_GLOBAL:
3248 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3249 enmShadowMode = PGMMODE_PAE;
3250 break;
3251
3252 case SUPPAGINGMODE_AMD64:
3253 case SUPPAGINGMODE_AMD64_GLOBAL:
3254 case SUPPAGINGMODE_AMD64_NX:
3255 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3256 enmShadowMode = PGMMODE_PAE;
3257 break;
3258
3259 default:
3260 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3261 }
3262 break;
3263
3264 case PGMMODE_AMD64:
3265 case PGMMODE_AMD64_NX:
3266 switch (enmHostMode)
3267 {
3268 case SUPPAGINGMODE_32_BIT:
3269 case SUPPAGINGMODE_32_BIT_GLOBAL:
3270 enmShadowMode = PGMMODE_AMD64;
3271 break;
3272
3273 case SUPPAGINGMODE_PAE:
3274 case SUPPAGINGMODE_PAE_NX:
3275 case SUPPAGINGMODE_PAE_GLOBAL:
3276 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3277 enmShadowMode = PGMMODE_AMD64;
3278 break;
3279
3280 case SUPPAGINGMODE_AMD64:
3281 case SUPPAGINGMODE_AMD64_GLOBAL:
3282 case SUPPAGINGMODE_AMD64_NX:
3283 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3284 enmShadowMode = PGMMODE_AMD64;
3285 break;
3286
3287 default:
3288 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3289 }
3290 break;
3291
3292 default:
3293 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3294 }
3295
3296 /*
3297 * Override the shadow mode when NEM, IEM or nested paging is active.
3298 */
3299 if (!VM_IS_HM_ENABLED(pVM))
3300 {
3301 Assert(VM_IS_NEM_ENABLED(pVM) || VM_IS_EXEC_ENGINE_IEM(pVM));
3302 pVM->pgm.s.fNestedPaging = true;
3303 enmShadowMode = PGMMODE_NONE;
3304 }
3305 else
3306 {
3307 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3308 pVM->pgm.s.fNestedPaging = fNestedPaging;
3309 if (fNestedPaging)
3310 {
3311 if (HMIsVmxActive(pVM))
3312 enmShadowMode = PGMMODE_EPT;
3313 else
3314 {
3315 /* The nested SVM paging depends on the host one. */
3316 Assert(HMIsSvmActive(pVM));
3317 if ( enmGuestMode == PGMMODE_AMD64
3318 || enmGuestMode == PGMMODE_AMD64_NX)
3319 enmShadowMode = PGMMODE_NESTED_AMD64;
3320 else
3321 switch (pVM->pgm.s.enmHostMode)
3322 {
3323 case SUPPAGINGMODE_32_BIT:
3324 case SUPPAGINGMODE_32_BIT_GLOBAL:
3325 enmShadowMode = PGMMODE_NESTED_32BIT;
3326 break;
3327
3328 case SUPPAGINGMODE_PAE:
3329 case SUPPAGINGMODE_PAE_GLOBAL:
3330 case SUPPAGINGMODE_PAE_NX:
3331 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3332 enmShadowMode = PGMMODE_NESTED_PAE;
3333 break;
3334
3335 case SUPPAGINGMODE_AMD64:
3336 case SUPPAGINGMODE_AMD64_GLOBAL:
3337 case SUPPAGINGMODE_AMD64_NX:
3338 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3339 enmShadowMode = PGMMODE_NESTED_AMD64;
3340 break;
3341
3342 default:
3343 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3344 }
3345 }
3346 }
3347#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3348 else
3349 {
3350 /* Nested paging is a requirement for nested VT-x. */
3351 AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3352 }
3353#endif
3354 }
3355
3356 return enmShadowMode;
3357}
3358
3359
3360/**
3361 * Performs the actual mode change.
3362 * This is called by PGMChangeMode and pgmR3InitPaging().
3363 *
3364 * @returns VBox status code. May suspend or power off the VM on error, but this
3365 * will trigger using FFs and not informational status codes.
3366 *
3367 * @param pVM The cross context VM structure.
3368 * @param pVCpu The cross context virtual CPU structure.
3369 * @param enmGuestMode The new guest mode. This is assumed to be different from
3370 * the current mode.
3371 * @param fForce Whether to force a shadow paging mode change.
3372 */
3373VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce)
3374{
3375 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3376 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3377
3378 /*
3379 * Calc the shadow mode and switcher.
3380 */
3381 PGMMODE const enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3382 bool const fShadowModeChanged = enmShadowMode != pVCpu->pgm.s.enmShadowMode || fForce;
3383
3384 /*
3385 * Exit old mode(s).
3386 */
3387 /* shadow */
3388 if (fShadowModeChanged)
3389 {
3390 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3391 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3392 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3393 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3394 {
3395 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3396 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3397 }
3398 }
3399 else
3400 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3401
3402 /* guest */
3403 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3404 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3405 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3406 {
3407 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3408 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3409 }
3410 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3411 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
3412 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
3413 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
3414
3415 /*
3416 * Change the paging mode data indexes.
3417 */
3418 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3419 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3420 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3421 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3422 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3423 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3424 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3425#ifdef IN_RING3
3426 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3427#endif
3428
3429 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3430 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3431 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3432 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3433 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3434 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3435 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3436#ifdef IN_RING3
3437 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3438#endif
3439
3440 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3441 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3442 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3443 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3444 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3445 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3446 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3447 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3448 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3449 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3450#ifdef VBOX_STRICT
3451 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3452#endif
3453
3454 /*
3455 * Determine SLAT mode -before- entering the new shadow mode!
3456 */
3457 pVCpu->pgm.s.enmGuestSlatMode = !CPUMIsGuestVmxEptPagingEnabled(pVCpu) ? PGMSLAT_DIRECT : PGMSLAT_EPT;
3458
3459 /*
3460 * Enter new shadow mode (if changed).
3461 */
3462 if (fShadowModeChanged)
3463 {
3464 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3465 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu);
3466 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3467 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3468 }
3469
3470 /*
3471 * Always flag the necessary updates
3472 */
3473 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3474
3475 /*
3476 * Enter the new guest and shadow+guest modes.
3477 */
3478 /* Calc the new CR3 value. */
3479 RTGCPHYS GCPhysCR3;
3480 switch (enmGuestMode)
3481 {
3482 case PGMMODE_REAL:
3483 case PGMMODE_PROTECTED:
3484 GCPhysCR3 = NIL_RTGCPHYS;
3485 break;
3486
3487 case PGMMODE_32_BIT:
3488 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3489 break;
3490
3491 case PGMMODE_PAE_NX:
3492 case PGMMODE_PAE:
3493 if (!pVM->cpum.ro.GuestFeatures.fPae)
3494#ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3495 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3496 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3497#else
3498 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3499
3500#endif
3501 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3502 break;
3503
3504#ifdef VBOX_WITH_64_BITS_GUESTS
3505 case PGMMODE_AMD64_NX:
3506 case PGMMODE_AMD64:
3507 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3508 break;
3509#endif
3510 default:
3511 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3512 }
3513
3514#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3515 /*
3516 * If a nested-guest is using EPT paging:
3517 * - Update the second-level address translation (SLAT) mode.
3518 * - Indicate that the CR3 is nested-guest physical address.
3519 */
3520 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
3521 {
3522 if (PGMMODE_WITH_PAGING(enmGuestMode))
3523 {
3524 /*
3525 * Translate CR3 to its guest-physical address.
3526 * We don't use pgmGstSlatTranslateCr3() here as we want to update GCPhysNstGstCR3 -after-
3527 * switching modes to keep it consistent with how GCPhysCR3 is updated.
3528 */
3529 PGMPTWALK Walk;
3530 PGMPTWALKGST GstWalk;
3531 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysCR3, &Walk, &GstWalk);
3532 if (RT_SUCCESS(rc))
3533 { /* likely */ }
3534 else
3535 {
3536 /*
3537 * SLAT failed but we avoid reporting this to the caller because the caller
3538 * is not supposed to fail. The only time the caller needs to indicate a
3539 * failure to software is when PAE paging is used by the nested-guest, but
3540 * we handle the PAE case separately (e.g., see VMX transition in IEM).
3541 * In all other cases, the failure will be indicated when CR3 tries to be
3542 * translated on the next linear-address memory access.
3543 * See Intel spec. 27.2.1 "EPT Overview".
3544 */
3545 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));
3546
3547 /* Trying to coax PGM to succeed for the time being... */
3548 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
3549 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3550 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3551 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3552 return VINF_SUCCESS;
3553 }
3554 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3555 GCPhysCR3 = Walk.GCPhys & X86_CR3_EPT_PAGE_MASK;
3556 }
3557 }
3558 else
3559 Assert(pVCpu->pgm.s.GCPhysNstGstCR3 == NIL_RTGCPHYS);
3560#endif
3561
3562 /*
3563 * Enter the new guest mode.
3564 */
3565 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3566 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3567 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3568
3569 /* Set the new guest CR3 (and nested-guest CR3). */
3570 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3571
3572 /* status codes. */
3573 AssertRC(rc);
3574 AssertRC(rc2);
3575 if (RT_SUCCESS(rc))
3576 {
3577 rc = rc2;
3578 if (RT_SUCCESS(rc)) /* no informational status codes. */
3579 rc = VINF_SUCCESS;
3580 }
3581
3582 /*
3583 * Notify HM.
3584 */
3585 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3586 return rc;
3587}
3588
3589
3590/**
3591 * Called by CPUM or REM when CR0.WP changes to 1.
3592 *
3593 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3594 * @thread EMT
3595 */
3596VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3597{
3598 /*
3599 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3600 *
3601 * Use the counter to judge whether there might be pool pages with active
3602 * hacks in them. If there are, we will be running the risk of messing up
3603 * the guest by allowing it to write to read-only pages. Thus, we have to
3604 * clear the page pool ASAP if there is the slightest chance.
3605 */
3606 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3607 {
3608 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3609
3610 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3611 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3612 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3613 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3614 }
3615}
3616
3617
3618/**
3619 * Gets the current guest paging mode.
3620 *
3621 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3622 *
3623 * @returns The current paging mode.
3624 * @param pVCpu The cross context virtual CPU structure.
3625 */
3626VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3627{
3628 return pVCpu->pgm.s.enmGuestMode;
3629}
3630
3631
3632/**
3633 * Gets the current shadow paging mode.
3634 *
3635 * @returns The current paging mode.
3636 * @param pVCpu The cross context virtual CPU structure.
3637 */
3638VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3639{
3640 return pVCpu->pgm.s.enmShadowMode;
3641}
3642
3643
3644/**
3645 * Gets the current host paging mode.
3646 *
3647 * @returns The current paging mode.
3648 * @param pVM The cross context VM structure.
3649 */
3650VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3651{
3652 switch (pVM->pgm.s.enmHostMode)
3653 {
3654 case SUPPAGINGMODE_32_BIT:
3655 case SUPPAGINGMODE_32_BIT_GLOBAL:
3656 return PGMMODE_32_BIT;
3657
3658 case SUPPAGINGMODE_PAE:
3659 case SUPPAGINGMODE_PAE_GLOBAL:
3660 return PGMMODE_PAE;
3661
3662 case SUPPAGINGMODE_PAE_NX:
3663 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3664 return PGMMODE_PAE_NX;
3665
3666 case SUPPAGINGMODE_AMD64:
3667 case SUPPAGINGMODE_AMD64_GLOBAL:
3668 return PGMMODE_AMD64;
3669
3670 case SUPPAGINGMODE_AMD64_NX:
3671 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3672 return PGMMODE_AMD64_NX;
3673
3674 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3675 }
3676
3677 return PGMMODE_INVALID;
3678}
3679
3680
3681/**
3682 * Get mode name.
3683 *
3684 * @returns read-only name string.
3685 * @param enmMode The mode which name is desired.
3686 */
3687VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3688{
3689 switch (enmMode)
3690 {
3691 case PGMMODE_REAL: return "Real";
3692 case PGMMODE_PROTECTED: return "Protected";
3693 case PGMMODE_32_BIT: return "32-bit";
3694 case PGMMODE_PAE: return "PAE";
3695 case PGMMODE_PAE_NX: return "PAE+NX";
3696 case PGMMODE_AMD64: return "AMD64";
3697 case PGMMODE_AMD64_NX: return "AMD64+NX";
3698 case PGMMODE_NESTED_32BIT: return "Nested-32";
3699 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3700 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3701 case PGMMODE_EPT: return "EPT";
3702 case PGMMODE_NONE: return "None";
3703 default: return "unknown mode value";
3704 }
3705}
3706
3707
3708#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3709/**
3710 * Gets the SLAT mode name.
3711 *
3712 * @returns The read-only SLAT mode descriptive string.
3713 * @param enmSlatMode The SLAT mode value.
3714 */
3715VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
3716{
3717 switch (enmSlatMode)
3718 {
3719 case PGMSLAT_DIRECT: return "Direct";
3720 case PGMSLAT_EPT: return "EPT";
3721 case PGMSLAT_32BIT: return "32-bit";
3722 case PGMSLAT_PAE: return "PAE";
3723 case PGMSLAT_AMD64: return "AMD64";
3724 default: return "Unknown";
3725 }
3726}
3727#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
3728
3729
3730/**
3731 * Gets the physical address represented in the guest CR3 as PGM sees it.
3732 *
3733 * This is mainly for logging and debugging.
3734 *
3735 * @returns PGM's guest CR3 value.
3736 * @param pVCpu The cross context virtual CPU structure.
3737 */
3738VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3739{
3740 return pVCpu->pgm.s.GCPhysCR3;
3741}
3742
3743
3744
3745/**
3746 * Notification from CPUM that the EFER.NXE bit has changed.
3747 *
3748 * @param pVCpu The cross context virtual CPU structure of the CPU for
3749 * which EFER changed.
3750 * @param fNxe The new NXE state.
3751 */
3752VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3753{
3754/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3755 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3756
3757 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3758 if (fNxe)
3759 {
3760 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3761 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3762 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3763 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3764 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3765 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3766 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3767 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3768 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3769 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3770 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3771
3772 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3773 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3774 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3775 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3776 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3777 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3778 }
3779 else
3780 {
3781 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3782 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3783 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3784 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3785 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3786 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3787 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3788 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3789 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3790 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3791 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3792
3793 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3794 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3795 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3796 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3797 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3798 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3799 }
3800}
3801
3802
3803/**
3804 * Check if any pgm pool pages are marked dirty (not monitored)
3805 *
3806 * @returns bool locked/not locked
3807 * @param pVM The cross context VM structure.
3808 */
3809VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3810{
3811 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3812}
3813
3814
3815/**
3816 * Check if this VCPU currently owns the PGM lock.
3817 *
3818 * @returns bool owner/not owner
3819 * @param pVM The cross context VM structure.
3820 */
3821VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
3822{
3823 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
3824}
3825
3826
3827/**
3828 * Enable or disable large page usage
3829 *
3830 * @returns VBox status code.
3831 * @param pVM The cross context VM structure.
3832 * @param fUseLargePages Use/not use large pages
3833 */
3834VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3835{
3836 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3837
3838 pVM->pgm.s.fUseLargePages = fUseLargePages;
3839 return VINF_SUCCESS;
3840}
3841
3842
3843/**
3844 * Acquire the PGM lock.
3845 *
3846 * @returns VBox status code
3847 * @param pVM The cross context VM structure.
3848 * @param fVoid Set if the caller cannot handle failure returns.
3849 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3850 */
3851#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
3852int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
3853#else
3854int pgmLock(PVMCC pVM, bool fVoid)
3855#endif
3856{
3857#if defined(VBOX_STRICT)
3858 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3859#else
3860 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
3861#endif
3862 if (RT_SUCCESS(rc))
3863 return rc;
3864 if (fVoid)
3865 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
3866 else
3867 AssertRC(rc);
3868 return rc;
3869}
3870
3871
3872/**
3873 * Release the PGM lock.
3874 *
3875 * @returns VBox status code
3876 * @param pVM The cross context VM structure.
3877 */
3878void pgmUnlock(PVMCC pVM)
3879{
3880 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3881 pVM->pgm.s.cDeprecatedPageLocks = 0;
3882 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
3883 if (rc == VINF_SEM_NESTED)
3884 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3885}
3886
3887
3888#if !defined(IN_R0) || defined(LOG_ENABLED)
3889
3890/** Format handler for PGMPAGE.
3891 * @copydoc FNRTSTRFORMATTYPE */
3892static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3893 const char *pszType, void const *pvValue,
3894 int cchWidth, int cchPrecision, unsigned fFlags,
3895 void *pvUser)
3896{
3897 size_t cch;
3898 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3899 if (RT_VALID_PTR(pPage))
3900 {
3901 char szTmp[64+80];
3902
3903 cch = 0;
3904
3905 /* The single char state stuff. */
3906 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3907 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3908
3909# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3910 if (IS_PART_INCLUDED(5))
3911 {
3912 static const char s_achHandlerStates[4*2] = { '-', 't', 'w', 'a' , '_', 'T', 'W', 'A' };
3913 szTmp[cch++] = s_achHandlerStates[ PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)
3914 | ((uint8_t)PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) << 2)];
3915 }
3916
3917 /* The type. */
3918 if (IS_PART_INCLUDED(4))
3919 {
3920 szTmp[cch++] = ':';
3921 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3922 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3923 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3924 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3925 }
3926
3927 /* The numbers. */
3928 if (IS_PART_INCLUDED(3))
3929 {
3930 szTmp[cch++] = ':';
3931 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3932 }
3933
3934 if (IS_PART_INCLUDED(2))
3935 {
3936 szTmp[cch++] = ':';
3937 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3938 }
3939
3940 if (IS_PART_INCLUDED(6))
3941 {
3942 szTmp[cch++] = ':';
3943 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3944 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3945 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3946 }
3947# undef IS_PART_INCLUDED
3948
3949 cch = pfnOutput(pvArgOutput, szTmp, cch);
3950 }
3951 else
3952 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3953 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3954 return cch;
3955}
3956
3957
3958/** Format handler for PGMRAMRANGE.
3959 * @copydoc FNRTSTRFORMATTYPE */
3960static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3961 const char *pszType, void const *pvValue,
3962 int cchWidth, int cchPrecision, unsigned fFlags,
3963 void *pvUser)
3964{
3965 size_t cch;
3966 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3967 if (RT_VALID_PTR(pRam))
3968 {
3969 char szTmp[80];
3970 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3971 cch = pfnOutput(pvArgOutput, szTmp, cch);
3972 }
3973 else
3974 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3975 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3976 return cch;
3977}
3978
3979/** Format type andlers to be registered/deregistered. */
3980static const struct
3981{
3982 char szType[24];
3983 PFNRTSTRFORMATTYPE pfnHandler;
3984} g_aPgmFormatTypes[] =
3985{
3986 { "pgmpage", pgmFormatTypeHandlerPage },
3987 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3988};
3989
3990#endif /* !IN_R0 || LOG_ENABLED */
3991
3992/**
3993 * Registers the global string format types.
3994 *
3995 * This should be called at module load time or in some other manner that ensure
3996 * that it's called exactly one time.
3997 *
3998 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3999 */
4000VMMDECL(int) PGMRegisterStringFormatTypes(void)
4001{
4002#if !defined(IN_R0) || defined(LOG_ENABLED)
4003 int rc = VINF_SUCCESS;
4004 unsigned i;
4005 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
4006 {
4007 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
4008# ifdef IN_RING0
4009 if (rc == VERR_ALREADY_EXISTS)
4010 {
4011 /* in case of cleanup failure in ring-0 */
4012 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4013 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
4014 }
4015# endif
4016 }
4017 if (RT_FAILURE(rc))
4018 while (i-- > 0)
4019 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4020
4021 return rc;
4022#else
4023 return VINF_SUCCESS;
4024#endif
4025}
4026
4027
4028/**
4029 * Deregisters the global string format types.
4030 *
4031 * This should be called at module unload time or in some other manner that
4032 * ensure that it's called exactly one time.
4033 */
4034VMMDECL(void) PGMDeregisterStringFormatTypes(void)
4035{
4036#if !defined(IN_R0) || defined(LOG_ENABLED)
4037 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
4038 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4039#endif
4040}
4041
4042
4043#ifdef VBOX_STRICT
4044/**
4045 * Asserts that everything related to the guest CR3 is correctly shadowed.
4046 *
4047 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
4048 * and assert the correctness of the guest CR3 mapping before asserting that the
4049 * shadow page tables is in sync with the guest page tables.
4050 *
4051 * @returns Number of conflicts.
4052 * @param pVM The cross context VM structure.
4053 * @param pVCpu The cross context virtual CPU structure.
4054 * @param cr3 The current guest CR3 register value.
4055 * @param cr4 The current guest CR4 register value.
4056 */
4057VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
4058{
4059 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
4060
4061 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
4062 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
4063 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
4064
4065 PGM_LOCK_VOID(pVM);
4066 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
4067 PGM_UNLOCK(pVM);
4068
4069 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
4070 return cErrors;
4071}
4072#endif /* VBOX_STRICT */
4073
4074
4075/**
4076 * Updates PGM's copy of the guest's EPT pointer.
4077 *
4078 * @param pVCpu The cross context virtual CPU structure.
4079 * @param uEptPtr The EPT pointer.
4080 *
4081 * @remarks This can be called as part of VM-entry so we might be in the midst of
4082 * switching to VMX non-root mode.
4083 */
4084VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
4085{
4086 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4087 PGM_LOCK_VOID(pVM);
4088 pVCpu->pgm.s.uEptPtr = uEptPtr;
4089 pVCpu->pgm.s.pGstEptPml4R3 = 0;
4090 pVCpu->pgm.s.pGstEptPml4R0 = 0;
4091 PGM_UNLOCK(pVM);
4092}
4093
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette