VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 86453

Last change on this file since 86453 was 86453, checked in by vboxsync, 4 years ago

VMM/PGM: Workaround for buggy gcc (10.2.1) clearing high dword of PDE. bugref:9841

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 134.3 KB
Line 
1/* $Id: PGMAll.cpp 86453 2020-10-05 17:42:00Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/iem.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/sup.h>
29#include <VBox/vmm/mm.h>
30#include <VBox/vmm/stam.h>
31#include <VBox/vmm/trpm.h>
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/hm.h>
34#include <VBox/vmm/hm_vmx.h>
35#include "PGMInternal.h"
36#include <VBox/vmm/vmcc.h>
37#include "PGMInline.h"
38#include <iprt/assert.h>
39#include <iprt/asm-amd64-x86.h>
40#include <iprt/string.h>
41#include <VBox/log.h>
42#include <VBox/param.h>
43#include <VBox/err.h>
44
45
46/*********************************************************************************************************************************
47* Internal Functions *
48*********************************************************************************************************************************/
49DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
50DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
51static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
52static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
53
54
55/*
56 * Shadow - 32-bit mode
57 */
58#define PGM_SHW_TYPE PGM_TYPE_32BIT
59#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
60#include "PGMAllShw.h"
61
62/* Guest - real mode */
63#define PGM_GST_TYPE PGM_TYPE_REAL
64#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
65#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
66#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
67#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
68#include "PGMGstDefs.h"
69#include "PGMAllGst.h"
70#include "PGMAllBth.h"
71#undef BTH_PGMPOOLKIND_PT_FOR_PT
72#undef BTH_PGMPOOLKIND_ROOT
73#undef PGM_BTH_NAME
74#undef PGM_GST_TYPE
75#undef PGM_GST_NAME
76
77/* Guest - protected mode */
78#define PGM_GST_TYPE PGM_TYPE_PROT
79#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
80#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
81#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
82#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
83#include "PGMGstDefs.h"
84#include "PGMAllGst.h"
85#include "PGMAllBth.h"
86#undef BTH_PGMPOOLKIND_PT_FOR_PT
87#undef BTH_PGMPOOLKIND_ROOT
88#undef PGM_BTH_NAME
89#undef PGM_GST_TYPE
90#undef PGM_GST_NAME
91
92/* Guest - 32-bit mode */
93#define PGM_GST_TYPE PGM_TYPE_32BIT
94#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
95#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
96#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
97#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
98#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
99#include "PGMGstDefs.h"
100#include "PGMAllGst.h"
101#include "PGMAllBth.h"
102#undef BTH_PGMPOOLKIND_PT_FOR_BIG
103#undef BTH_PGMPOOLKIND_PT_FOR_PT
104#undef BTH_PGMPOOLKIND_ROOT
105#undef PGM_BTH_NAME
106#undef PGM_GST_TYPE
107#undef PGM_GST_NAME
108
109#undef PGM_SHW_TYPE
110#undef PGM_SHW_NAME
111
112
113/*
114 * Shadow - PAE mode
115 */
116#define PGM_SHW_TYPE PGM_TYPE_PAE
117#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
118#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
119#include "PGMAllShw.h"
120
121/* Guest - real mode */
122#define PGM_GST_TYPE PGM_TYPE_REAL
123#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
124#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
125#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
126#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
127#include "PGMGstDefs.h"
128#include "PGMAllBth.h"
129#undef BTH_PGMPOOLKIND_PT_FOR_PT
130#undef BTH_PGMPOOLKIND_ROOT
131#undef PGM_BTH_NAME
132#undef PGM_GST_TYPE
133#undef PGM_GST_NAME
134
135/* Guest - protected mode */
136#define PGM_GST_TYPE PGM_TYPE_PROT
137#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
138#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
139#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
140#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
141#include "PGMGstDefs.h"
142#include "PGMAllBth.h"
143#undef BTH_PGMPOOLKIND_PT_FOR_PT
144#undef BTH_PGMPOOLKIND_ROOT
145#undef PGM_BTH_NAME
146#undef PGM_GST_TYPE
147#undef PGM_GST_NAME
148
149/* Guest - 32-bit mode */
150#define PGM_GST_TYPE PGM_TYPE_32BIT
151#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
152#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
153#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
154#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
155#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
156#include "PGMGstDefs.h"
157#include "PGMAllBth.h"
158#undef BTH_PGMPOOLKIND_PT_FOR_BIG
159#undef BTH_PGMPOOLKIND_PT_FOR_PT
160#undef BTH_PGMPOOLKIND_ROOT
161#undef PGM_BTH_NAME
162#undef PGM_GST_TYPE
163#undef PGM_GST_NAME
164
165
166/* Guest - PAE mode */
167#define PGM_GST_TYPE PGM_TYPE_PAE
168#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
169#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
170#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
171#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
172#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
173#include "PGMGstDefs.h"
174#include "PGMAllGst.h"
175#include "PGMAllBth.h"
176#undef BTH_PGMPOOLKIND_PT_FOR_BIG
177#undef BTH_PGMPOOLKIND_PT_FOR_PT
178#undef BTH_PGMPOOLKIND_ROOT
179#undef PGM_BTH_NAME
180#undef PGM_GST_TYPE
181#undef PGM_GST_NAME
182
183#undef PGM_SHW_TYPE
184#undef PGM_SHW_NAME
185
186
187/*
188 * Shadow - AMD64 mode
189 */
190#define PGM_SHW_TYPE PGM_TYPE_AMD64
191#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
192#include "PGMAllShw.h"
193
194/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
195/** @todo retire this hack. */
196#define PGM_GST_TYPE PGM_TYPE_PROT
197#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
198#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
199#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
200#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
201#include "PGMGstDefs.h"
202#include "PGMAllBth.h"
203#undef BTH_PGMPOOLKIND_PT_FOR_PT
204#undef BTH_PGMPOOLKIND_ROOT
205#undef PGM_BTH_NAME
206#undef PGM_GST_TYPE
207#undef PGM_GST_NAME
208
209#ifdef VBOX_WITH_64_BITS_GUESTS
210/* Guest - AMD64 mode */
211# define PGM_GST_TYPE PGM_TYPE_AMD64
212# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
213# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
214# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
215# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
216# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
217# include "PGMGstDefs.h"
218# include "PGMAllGst.h"
219# include "PGMAllBth.h"
220# undef BTH_PGMPOOLKIND_PT_FOR_BIG
221# undef BTH_PGMPOOLKIND_PT_FOR_PT
222# undef BTH_PGMPOOLKIND_ROOT
223# undef PGM_BTH_NAME
224# undef PGM_GST_TYPE
225# undef PGM_GST_NAME
226#endif /* VBOX_WITH_64_BITS_GUESTS */
227
228#undef PGM_SHW_TYPE
229#undef PGM_SHW_NAME
230
231
232/*
233 * Shadow - 32-bit nested paging mode.
234 */
235#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
236#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
237#include "PGMAllShw.h"
238
239/* Guest - real mode */
240#define PGM_GST_TYPE PGM_TYPE_REAL
241#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
242#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
243#include "PGMGstDefs.h"
244#include "PGMAllBth.h"
245#undef PGM_BTH_NAME
246#undef PGM_GST_TYPE
247#undef PGM_GST_NAME
248
249/* Guest - protected mode */
250#define PGM_GST_TYPE PGM_TYPE_PROT
251#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
252#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
253#include "PGMGstDefs.h"
254#include "PGMAllBth.h"
255#undef PGM_BTH_NAME
256#undef PGM_GST_TYPE
257#undef PGM_GST_NAME
258
259/* Guest - 32-bit mode */
260#define PGM_GST_TYPE PGM_TYPE_32BIT
261#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
262#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
263#include "PGMGstDefs.h"
264#include "PGMAllBth.h"
265#undef PGM_BTH_NAME
266#undef PGM_GST_TYPE
267#undef PGM_GST_NAME
268
269/* Guest - PAE mode */
270#define PGM_GST_TYPE PGM_TYPE_PAE
271#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
272#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
273#include "PGMGstDefs.h"
274#include "PGMAllBth.h"
275#undef PGM_BTH_NAME
276#undef PGM_GST_TYPE
277#undef PGM_GST_NAME
278
279#ifdef VBOX_WITH_64_BITS_GUESTS
280/* Guest - AMD64 mode */
281# define PGM_GST_TYPE PGM_TYPE_AMD64
282# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
283# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
284# include "PGMGstDefs.h"
285# include "PGMAllBth.h"
286# undef PGM_BTH_NAME
287# undef PGM_GST_TYPE
288# undef PGM_GST_NAME
289#endif /* VBOX_WITH_64_BITS_GUESTS */
290
291#undef PGM_SHW_TYPE
292#undef PGM_SHW_NAME
293
294
295/*
296 * Shadow - PAE nested paging mode.
297 */
298#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
299#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
300#include "PGMAllShw.h"
301
302/* Guest - real mode */
303#define PGM_GST_TYPE PGM_TYPE_REAL
304#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
305#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
306#include "PGMGstDefs.h"
307#include "PGMAllBth.h"
308#undef PGM_BTH_NAME
309#undef PGM_GST_TYPE
310#undef PGM_GST_NAME
311
312/* Guest - protected mode */
313#define PGM_GST_TYPE PGM_TYPE_PROT
314#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
315#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
316#include "PGMGstDefs.h"
317#include "PGMAllBth.h"
318#undef PGM_BTH_NAME
319#undef PGM_GST_TYPE
320#undef PGM_GST_NAME
321
322/* Guest - 32-bit mode */
323#define PGM_GST_TYPE PGM_TYPE_32BIT
324#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
325#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
326#include "PGMGstDefs.h"
327#include "PGMAllBth.h"
328#undef PGM_BTH_NAME
329#undef PGM_GST_TYPE
330#undef PGM_GST_NAME
331
332/* Guest - PAE mode */
333#define PGM_GST_TYPE PGM_TYPE_PAE
334#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
335#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
336#include "PGMGstDefs.h"
337#include "PGMAllBth.h"
338#undef PGM_BTH_NAME
339#undef PGM_GST_TYPE
340#undef PGM_GST_NAME
341
342#ifdef VBOX_WITH_64_BITS_GUESTS
343/* Guest - AMD64 mode */
344# define PGM_GST_TYPE PGM_TYPE_AMD64
345# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
346# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
347# include "PGMGstDefs.h"
348# include "PGMAllBth.h"
349# undef PGM_BTH_NAME
350# undef PGM_GST_TYPE
351# undef PGM_GST_NAME
352#endif /* VBOX_WITH_64_BITS_GUESTS */
353
354#undef PGM_SHW_TYPE
355#undef PGM_SHW_NAME
356
357
358/*
359 * Shadow - AMD64 nested paging mode.
360 */
361#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
362#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
363#include "PGMAllShw.h"
364
365/* Guest - real mode */
366#define PGM_GST_TYPE PGM_TYPE_REAL
367#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
368#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
369#include "PGMGstDefs.h"
370#include "PGMAllBth.h"
371#undef PGM_BTH_NAME
372#undef PGM_GST_TYPE
373#undef PGM_GST_NAME
374
375/* Guest - protected mode */
376#define PGM_GST_TYPE PGM_TYPE_PROT
377#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
378#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
379#include "PGMGstDefs.h"
380#include "PGMAllBth.h"
381#undef PGM_BTH_NAME
382#undef PGM_GST_TYPE
383#undef PGM_GST_NAME
384
385/* Guest - 32-bit mode */
386#define PGM_GST_TYPE PGM_TYPE_32BIT
387#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
388#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
389#include "PGMGstDefs.h"
390#include "PGMAllBth.h"
391#undef PGM_BTH_NAME
392#undef PGM_GST_TYPE
393#undef PGM_GST_NAME
394
395/* Guest - PAE mode */
396#define PGM_GST_TYPE PGM_TYPE_PAE
397#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
398#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
399#include "PGMGstDefs.h"
400#include "PGMAllBth.h"
401#undef PGM_BTH_NAME
402#undef PGM_GST_TYPE
403#undef PGM_GST_NAME
404
405#ifdef VBOX_WITH_64_BITS_GUESTS
406/* Guest - AMD64 mode */
407# define PGM_GST_TYPE PGM_TYPE_AMD64
408# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
409# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
410# include "PGMGstDefs.h"
411# include "PGMAllBth.h"
412# undef PGM_BTH_NAME
413# undef PGM_GST_TYPE
414# undef PGM_GST_NAME
415#endif /* VBOX_WITH_64_BITS_GUESTS */
416
417#undef PGM_SHW_TYPE
418#undef PGM_SHW_NAME
419
420
421/*
422 * Shadow - EPT.
423 */
424#define PGM_SHW_TYPE PGM_TYPE_EPT
425#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
426#include "PGMAllShw.h"
427
428/* Guest - real mode */
429#define PGM_GST_TYPE PGM_TYPE_REAL
430#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
431#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
432#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
433#include "PGMGstDefs.h"
434#include "PGMAllBth.h"
435#undef BTH_PGMPOOLKIND_PT_FOR_PT
436#undef PGM_BTH_NAME
437#undef PGM_GST_TYPE
438#undef PGM_GST_NAME
439
440/* Guest - protected mode */
441#define PGM_GST_TYPE PGM_TYPE_PROT
442#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
443#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
444#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
445#include "PGMGstDefs.h"
446#include "PGMAllBth.h"
447#undef BTH_PGMPOOLKIND_PT_FOR_PT
448#undef PGM_BTH_NAME
449#undef PGM_GST_TYPE
450#undef PGM_GST_NAME
451
452/* Guest - 32-bit mode */
453#define PGM_GST_TYPE PGM_TYPE_32BIT
454#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
455#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
456#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
457#include "PGMGstDefs.h"
458#include "PGMAllBth.h"
459#undef BTH_PGMPOOLKIND_PT_FOR_PT
460#undef PGM_BTH_NAME
461#undef PGM_GST_TYPE
462#undef PGM_GST_NAME
463
464/* Guest - PAE mode */
465#define PGM_GST_TYPE PGM_TYPE_PAE
466#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
467#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
468#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
469#include "PGMGstDefs.h"
470#include "PGMAllBth.h"
471#undef BTH_PGMPOOLKIND_PT_FOR_PT
472#undef PGM_BTH_NAME
473#undef PGM_GST_TYPE
474#undef PGM_GST_NAME
475
476#ifdef VBOX_WITH_64_BITS_GUESTS
477/* Guest - AMD64 mode */
478# define PGM_GST_TYPE PGM_TYPE_AMD64
479# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
480# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
481# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
482# include "PGMGstDefs.h"
483# include "PGMAllBth.h"
484# undef BTH_PGMPOOLKIND_PT_FOR_PT
485# undef PGM_BTH_NAME
486# undef PGM_GST_TYPE
487# undef PGM_GST_NAME
488#endif /* VBOX_WITH_64_BITS_GUESTS */
489
490#undef PGM_SHW_TYPE
491#undef PGM_SHW_NAME
492
493
494/*
495 * Shadow - NEM / None.
496 */
497#define PGM_SHW_TYPE PGM_TYPE_NONE
498#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
499#include "PGMAllShw.h"
500
501/* Guest - real mode */
502#define PGM_GST_TYPE PGM_TYPE_REAL
503#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
504#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
505#include "PGMGstDefs.h"
506#include "PGMAllBth.h"
507#undef PGM_BTH_NAME
508#undef PGM_GST_TYPE
509#undef PGM_GST_NAME
510
511/* Guest - protected mode */
512#define PGM_GST_TYPE PGM_TYPE_PROT
513#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
514#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
515#include "PGMGstDefs.h"
516#include "PGMAllBth.h"
517#undef PGM_BTH_NAME
518#undef PGM_GST_TYPE
519#undef PGM_GST_NAME
520
521/* Guest - 32-bit mode */
522#define PGM_GST_TYPE PGM_TYPE_32BIT
523#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
524#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
525#include "PGMGstDefs.h"
526#include "PGMAllBth.h"
527#undef PGM_BTH_NAME
528#undef PGM_GST_TYPE
529#undef PGM_GST_NAME
530
531/* Guest - PAE mode */
532#define PGM_GST_TYPE PGM_TYPE_PAE
533#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
534#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
535#include "PGMGstDefs.h"
536#include "PGMAllBth.h"
537#undef PGM_BTH_NAME
538#undef PGM_GST_TYPE
539#undef PGM_GST_NAME
540
541#ifdef VBOX_WITH_64_BITS_GUESTS
542/* Guest - AMD64 mode */
543# define PGM_GST_TYPE PGM_TYPE_AMD64
544# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
545# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
546# include "PGMGstDefs.h"
547# include "PGMAllBth.h"
548# undef PGM_BTH_NAME
549# undef PGM_GST_TYPE
550# undef PGM_GST_NAME
551#endif /* VBOX_WITH_64_BITS_GUESTS */
552
553#undef PGM_SHW_TYPE
554#undef PGM_SHW_NAME
555
556
557
558/**
559 * Guest mode data array.
560 */
561PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
562{
563 { UINT32_MAX, NULL, NULL, NULL, NULL, NULL }, /* 0 */
564 {
565 PGM_TYPE_REAL,
566 PGM_GST_NAME_REAL(GetPage),
567 PGM_GST_NAME_REAL(ModifyPage),
568 PGM_GST_NAME_REAL(GetPDE),
569 PGM_GST_NAME_REAL(Enter),
570 PGM_GST_NAME_REAL(Exit),
571#ifdef IN_RING3
572 PGM_GST_NAME_REAL(Relocate),
573#endif
574 },
575 {
576 PGM_TYPE_PROT,
577 PGM_GST_NAME_PROT(GetPage),
578 PGM_GST_NAME_PROT(ModifyPage),
579 PGM_GST_NAME_PROT(GetPDE),
580 PGM_GST_NAME_PROT(Enter),
581 PGM_GST_NAME_PROT(Exit),
582#ifdef IN_RING3
583 PGM_GST_NAME_PROT(Relocate),
584#endif
585 },
586 {
587 PGM_TYPE_32BIT,
588 PGM_GST_NAME_32BIT(GetPage),
589 PGM_GST_NAME_32BIT(ModifyPage),
590 PGM_GST_NAME_32BIT(GetPDE),
591 PGM_GST_NAME_32BIT(Enter),
592 PGM_GST_NAME_32BIT(Exit),
593#ifdef IN_RING3
594 PGM_GST_NAME_32BIT(Relocate),
595#endif
596 },
597 {
598 PGM_TYPE_PAE,
599 PGM_GST_NAME_PAE(GetPage),
600 PGM_GST_NAME_PAE(ModifyPage),
601 PGM_GST_NAME_PAE(GetPDE),
602 PGM_GST_NAME_PAE(Enter),
603 PGM_GST_NAME_PAE(Exit),
604#ifdef IN_RING3
605 PGM_GST_NAME_PAE(Relocate),
606#endif
607 },
608#ifdef VBOX_WITH_64_BITS_GUESTS
609 {
610 PGM_TYPE_AMD64,
611 PGM_GST_NAME_AMD64(GetPage),
612 PGM_GST_NAME_AMD64(ModifyPage),
613 PGM_GST_NAME_AMD64(GetPDE),
614 PGM_GST_NAME_AMD64(Enter),
615 PGM_GST_NAME_AMD64(Exit),
616# ifdef IN_RING3
617 PGM_GST_NAME_AMD64(Relocate),
618# endif
619 },
620#endif
621};
622
623
624/**
625 * The shadow mode data array.
626 */
627PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
628{
629 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
630 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
631 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
632 {
633 PGM_TYPE_32BIT,
634 PGM_SHW_NAME_32BIT(GetPage),
635 PGM_SHW_NAME_32BIT(ModifyPage),
636 PGM_SHW_NAME_32BIT(Enter),
637 PGM_SHW_NAME_32BIT(Exit),
638#ifdef IN_RING3
639 PGM_SHW_NAME_32BIT(Relocate),
640#endif
641 },
642 {
643 PGM_TYPE_PAE,
644 PGM_SHW_NAME_PAE(GetPage),
645 PGM_SHW_NAME_PAE(ModifyPage),
646 PGM_SHW_NAME_PAE(Enter),
647 PGM_SHW_NAME_PAE(Exit),
648#ifdef IN_RING3
649 PGM_SHW_NAME_PAE(Relocate),
650#endif
651 },
652 {
653 PGM_TYPE_AMD64,
654 PGM_SHW_NAME_AMD64(GetPage),
655 PGM_SHW_NAME_AMD64(ModifyPage),
656 PGM_SHW_NAME_AMD64(Enter),
657 PGM_SHW_NAME_AMD64(Exit),
658#ifdef IN_RING3
659 PGM_SHW_NAME_AMD64(Relocate),
660#endif
661 },
662 {
663 PGM_TYPE_NESTED_32BIT,
664 PGM_SHW_NAME_NESTED_32BIT(GetPage),
665 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
666 PGM_SHW_NAME_NESTED_32BIT(Enter),
667 PGM_SHW_NAME_NESTED_32BIT(Exit),
668#ifdef IN_RING3
669 PGM_SHW_NAME_NESTED_32BIT(Relocate),
670#endif
671 },
672 {
673 PGM_TYPE_NESTED_PAE,
674 PGM_SHW_NAME_NESTED_PAE(GetPage),
675 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
676 PGM_SHW_NAME_NESTED_PAE(Enter),
677 PGM_SHW_NAME_NESTED_PAE(Exit),
678#ifdef IN_RING3
679 PGM_SHW_NAME_NESTED_PAE(Relocate),
680#endif
681 },
682 {
683 PGM_TYPE_NESTED_AMD64,
684 PGM_SHW_NAME_NESTED_AMD64(GetPage),
685 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
686 PGM_SHW_NAME_NESTED_AMD64(Enter),
687 PGM_SHW_NAME_NESTED_AMD64(Exit),
688#ifdef IN_RING3
689 PGM_SHW_NAME_NESTED_AMD64(Relocate),
690#endif
691 },
692 {
693 PGM_TYPE_EPT,
694 PGM_SHW_NAME_EPT(GetPage),
695 PGM_SHW_NAME_EPT(ModifyPage),
696 PGM_SHW_NAME_EPT(Enter),
697 PGM_SHW_NAME_EPT(Exit),
698#ifdef IN_RING3
699 PGM_SHW_NAME_EPT(Relocate),
700#endif
701 },
702 {
703 PGM_TYPE_NONE,
704 PGM_SHW_NAME_NONE(GetPage),
705 PGM_SHW_NAME_NONE(ModifyPage),
706 PGM_SHW_NAME_NONE(Enter),
707 PGM_SHW_NAME_NONE(Exit),
708#ifdef IN_RING3
709 PGM_SHW_NAME_NONE(Relocate),
710#endif
711 },
712};
713
714
715/**
716 * The guest+shadow mode data array.
717 */
718PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
719{
720#if !defined(IN_RING3) && !defined(VBOX_STRICT)
721# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
722# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
723 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler) }
724
725#elif !defined(IN_RING3) && defined(VBOX_STRICT)
726# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
727# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
728 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(AssertCR3) }
729
730#elif defined(IN_RING3) && !defined(VBOX_STRICT)
731# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
732# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
733 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
734
735#elif defined(IN_RING3) && defined(VBOX_STRICT)
736# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
737# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
738 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
739
740#else
741# error "Misconfig."
742#endif
743
744 /* 32-bit shadow paging mode: */
745 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
746 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
747 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
748 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
749 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
750 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
751 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
752 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
753 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
754 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
755 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
756
757 /* PAE shadow paging mode: */
758 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
759 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
760 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
761 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
762 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
763 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
764 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
765 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
766 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
767 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
768 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
769
770 /* AMD64 shadow paging mode: */
771 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
772 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
773 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
774 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
775 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
776#ifdef VBOX_WITH_64_BITS_GUESTS
777 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
778#else
779 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
780#endif
781 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
782 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
783 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
784 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
785 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
786
787 /* 32-bit nested paging mode: */
788 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
789 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
790 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
791 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
792 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
793#ifdef VBOX_WITH_64_BITS_GUESTS
794 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
795#else
796 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
797#endif
798 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
799 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
800 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
801 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
802 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
803
804 /* PAE nested paging mode: */
805 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
806 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
807 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
808 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
809 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
810#ifdef VBOX_WITH_64_BITS_GUESTS
811 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
812#else
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
814#endif
815 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
816 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
817 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
818 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
819 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
820
821 /* AMD64 nested paging mode: */
822 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
823 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
824 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
825 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
826 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
827#ifdef VBOX_WITH_64_BITS_GUESTS
828 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
829#else
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
831#endif
832 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
833 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
834 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
835 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
836 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
837
838 /* EPT nested paging mode: */
839 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
840 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
841 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
842 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
843 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
844#ifdef VBOX_WITH_64_BITS_GUESTS
845 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
846#else
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
848#endif
849 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
850 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
851 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
852 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
853 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
854
855 /* NONE / NEM: */
856 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
857 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
858 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
859 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
860 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
861#ifdef VBOX_WITH_64_BITS_GUESTS
862 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
863#else
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
865#endif
866 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
867 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
868 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
869 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
870 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
871
872
873#undef PGMMODEDATABTH_ENTRY
874#undef PGMMODEDATABTH_NULL_ENTRY
875};
876
877
878#ifdef IN_RING0
879/**
880 * #PF Handler.
881 *
882 * @returns VBox status code (appropriate for trap handling and GC return).
883 * @param pVCpu The cross context virtual CPU structure.
884 * @param uErr The trap error code.
885 * @param pRegFrame Trap register frame.
886 * @param pvFault The fault address.
887 */
888VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
889{
890 PVM pVM = pVCpu->CTX_SUFF(pVM);
891
892 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
893 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
894 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
895
896
897# ifdef VBOX_WITH_STATISTICS
898 /*
899 * Error code stats.
900 */
901 if (uErr & X86_TRAP_PF_US)
902 {
903 if (!(uErr & X86_TRAP_PF_P))
904 {
905 if (uErr & X86_TRAP_PF_RW)
906 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
907 else
908 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
909 }
910 else if (uErr & X86_TRAP_PF_RW)
911 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
912 else if (uErr & X86_TRAP_PF_RSVD)
913 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
914 else if (uErr & X86_TRAP_PF_ID)
915 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
916 else
917 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
918 }
919 else
920 { /* Supervisor */
921 if (!(uErr & X86_TRAP_PF_P))
922 {
923 if (uErr & X86_TRAP_PF_RW)
924 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
925 else
926 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
927 }
928 else if (uErr & X86_TRAP_PF_RW)
929 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
930 else if (uErr & X86_TRAP_PF_ID)
931 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
932 else if (uErr & X86_TRAP_PF_RSVD)
933 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
934 }
935# endif /* VBOX_WITH_STATISTICS */
936
937 /*
938 * Call the worker.
939 */
940 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
941 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
942 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
943 bool fLockTaken = false;
944 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
945 if (fLockTaken)
946 {
947 PGM_LOCK_ASSERT_OWNER(pVM);
948 pgmUnlock(pVM);
949 }
950 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
951
952 /*
953 * Return code tweaks.
954 */
955 if (rc != VINF_SUCCESS)
956 {
957 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
958 rc = VINF_SUCCESS;
959
960 /* Note: hack alert for difficult to reproduce problem. */
961 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
962 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
963 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
964 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
965 {
966 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
967 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
968 rc = VINF_SUCCESS;
969 }
970 }
971
972 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
973 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
974 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
975 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
976 return rc;
977}
978#endif /* IN_RING0 */
979
980
981/**
982 * Prefetch a page
983 *
984 * Typically used to sync commonly used pages before entering raw mode
985 * after a CR3 reload.
986 *
987 * @returns VBox status code suitable for scheduling.
988 * @retval VINF_SUCCESS on success.
989 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
990 * @param pVCpu The cross context virtual CPU structure.
991 * @param GCPtrPage Page to invalidate.
992 */
993VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
994{
995 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
996
997 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
998 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
999 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1000 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1001
1002 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
1003 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1004 return rc;
1005}
1006
1007
1008#ifndef PGM_WITHOUT_MAPPINGS
1009/**
1010 * Gets the mapping corresponding to the specified address (if any).
1011 *
1012 * @returns Pointer to the mapping.
1013 * @returns NULL if not
1014 *
1015 * @param pVM The cross context VM structure.
1016 * @param GCPtr The guest context pointer.
1017 */
1018PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
1019{
1020 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
1021 while (pMapping)
1022 {
1023 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
1024 break;
1025 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
1026 return pMapping;
1027 pMapping = pMapping->CTX_SUFF(pNext);
1028 }
1029 return NULL;
1030}
1031#endif
1032
1033
1034/**
1035 * Verifies a range of pages for read or write access
1036 *
1037 * Only checks the guest's page tables
1038 *
1039 * @returns VBox status code.
1040 * @param pVCpu The cross context virtual CPU structure.
1041 * @param Addr Guest virtual address to check
1042 * @param cbSize Access size
1043 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
1044 * @remarks Current not in use.
1045 */
1046VMMDECL(int) PGMIsValidAccess(PVMCPUCC pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
1047{
1048 /*
1049 * Validate input.
1050 */
1051 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
1052 {
1053 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
1054 return VERR_INVALID_PARAMETER;
1055 }
1056
1057 uint64_t fPage;
1058 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
1059 if (RT_FAILURE(rc))
1060 {
1061 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
1062 return VINF_EM_RAW_GUEST_TRAP;
1063 }
1064
1065 /*
1066 * Check if the access would cause a page fault
1067 *
1068 * Note that hypervisor page directories are not present in the guest's tables, so this check
1069 * is sufficient.
1070 */
1071 bool fWrite = !!(fAccess & X86_PTE_RW);
1072 bool fUser = !!(fAccess & X86_PTE_US);
1073 if ( !(fPage & X86_PTE_P)
1074 || (fWrite && !(fPage & X86_PTE_RW))
1075 || (fUser && !(fPage & X86_PTE_US)) )
1076 {
1077 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
1078 return VINF_EM_RAW_GUEST_TRAP;
1079 }
1080 if ( RT_SUCCESS(rc)
1081 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
1082 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
1083 return rc;
1084}
1085
1086
1087/**
1088 * Verifies a range of pages for read or write access
1089 *
1090 * Supports handling of pages marked for dirty bit tracking and CSAM
1091 *
1092 * @returns VBox status code.
1093 * @param pVCpu The cross context virtual CPU structure.
1094 * @param Addr Guest virtual address to check
1095 * @param cbSize Access size
1096 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
1097 */
1098VMMDECL(int) PGMVerifyAccess(PVMCPUCC pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
1099{
1100 PVM pVM = pVCpu->CTX_SUFF(pVM);
1101
1102 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
1103
1104 /*
1105 * Get going.
1106 */
1107 uint64_t fPageGst;
1108 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
1109 if (RT_FAILURE(rc))
1110 {
1111 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
1112 return VINF_EM_RAW_GUEST_TRAP;
1113 }
1114
1115 /*
1116 * Check if the access would cause a page fault
1117 *
1118 * Note that hypervisor page directories are not present in the guest's tables, so this check
1119 * is sufficient.
1120 */
1121 const bool fWrite = !!(fAccess & X86_PTE_RW);
1122 const bool fUser = !!(fAccess & X86_PTE_US);
1123 if ( !(fPageGst & X86_PTE_P)
1124 || (fWrite && !(fPageGst & X86_PTE_RW))
1125 || (fUser && !(fPageGst & X86_PTE_US)) )
1126 {
1127 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
1128 return VINF_EM_RAW_GUEST_TRAP;
1129 }
1130
1131 if (!pVM->pgm.s.fNestedPaging)
1132 {
1133 /*
1134 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
1135 */
1136 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
1137 if ( rc == VERR_PAGE_NOT_PRESENT
1138 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1139 {
1140 /*
1141 * Page is not present in our page tables.
1142 * Try to sync it!
1143 */
1144 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
1145 uint32_t const uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
1146 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1147 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1148 AssertReturn(g_aPgmBothModeData[idxBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
1149 rc = g_aPgmBothModeData[idxBth].pfnVerifyAccessSyncPage(pVCpu, Addr, fPageGst, uErr);
1150 if (rc != VINF_SUCCESS)
1151 return rc;
1152 }
1153 else
1154 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
1155 }
1156
1157#if 0 /* def VBOX_STRICT; triggers too often now */
1158 /*
1159 * This check is a bit paranoid, but useful.
1160 */
1161 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
1162 uint64_t fPageShw;
1163 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
1164 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
1165 || (fWrite && !(fPageShw & X86_PTE_RW))
1166 || (fUser && !(fPageShw & X86_PTE_US)) )
1167 {
1168 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
1169 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
1170 return VINF_EM_RAW_GUEST_TRAP;
1171 }
1172#endif
1173
1174 if ( RT_SUCCESS(rc)
1175 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
1176 || Addr + cbSize < Addr))
1177 {
1178 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
1179 for (;;)
1180 {
1181 Addr += PAGE_SIZE;
1182 if (cbSize > PAGE_SIZE)
1183 cbSize -= PAGE_SIZE;
1184 else
1185 cbSize = 1;
1186 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
1187 if (rc != VINF_SUCCESS)
1188 break;
1189 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
1190 break;
1191 }
1192 }
1193 return rc;
1194}
1195
1196
1197/**
1198 * Emulation of the invlpg instruction (HC only actually).
1199 *
1200 * @returns Strict VBox status code, special care required.
1201 * @retval VINF_PGM_SYNC_CR3 - handled.
1202 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1203 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1204 *
1205 * @param pVCpu The cross context virtual CPU structure.
1206 * @param GCPtrPage Page to invalidate.
1207 *
1208 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1209 * safe, but there could be edge cases!
1210 *
1211 * @todo Flush page or page directory only if necessary!
1212 * @todo VBOXSTRICTRC
1213 */
1214VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1215{
1216 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1217 int rc;
1218 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1219
1220 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1221
1222 /*
1223 * Call paging mode specific worker.
1224 */
1225 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
1226 pgmLock(pVM);
1227
1228 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1229 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), pgmUnlock(pVM), VERR_PGM_MODE_IPE);
1230 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, pgmUnlock(pVM), VERR_PGM_MODE_IPE);
1231 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1232
1233 pgmUnlock(pVM);
1234 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
1235
1236#ifdef IN_RING3
1237 /*
1238 * Check if we have a pending update of the CR3 monitoring.
1239 */
1240 if ( RT_SUCCESS(rc)
1241 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
1242 {
1243 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1244 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
1245 }
1246#endif /* IN_RING3 */
1247
1248 /* Ignore all irrelevant error codes. */
1249 if ( rc == VERR_PAGE_NOT_PRESENT
1250 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1251 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1252 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1253 rc = VINF_SUCCESS;
1254
1255 return rc;
1256}
1257
1258
1259/**
1260 * Executes an instruction using the interpreter.
1261 *
1262 * @returns VBox status code (appropriate for trap handling and GC return).
1263 * @param pVM The cross context VM structure.
1264 * @param pVCpu The cross context virtual CPU structure.
1265 * @param pRegFrame Register frame.
1266 * @param pvFault Fault address.
1267 */
1268VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1269{
1270 NOREF(pVM);
1271 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1272 if (rc == VERR_EM_INTERPRETER)
1273 rc = VINF_EM_RAW_EMULATE_INSTR;
1274 if (rc != VINF_SUCCESS)
1275 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1276 return rc;
1277}
1278
1279
1280/**
1281 * Gets effective page information (from the VMM page directory).
1282 *
1283 * @returns VBox status code.
1284 * @param pVCpu The cross context virtual CPU structure.
1285 * @param GCPtr Guest Context virtual address of the page.
1286 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1287 * @param pHCPhys Where to store the HC physical address of the page.
1288 * This is page aligned.
1289 * @remark You should use PGMMapGetPage() for pages in a mapping.
1290 */
1291VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1292{
1293 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1294 pgmLock(pVM);
1295
1296 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1297 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1298 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1299 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1300
1301 pgmUnlock(pVM);
1302 return rc;
1303}
1304
1305
1306/**
1307 * Modify page flags for a range of pages in the shadow context.
1308 *
1309 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1310 *
1311 * @returns VBox status code.
1312 * @param pVCpu The cross context virtual CPU structure.
1313 * @param GCPtr Virtual address of the first page in the range.
1314 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1315 * @param fMask The AND mask - page flags X86_PTE_*.
1316 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1317 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1318 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1319 */
1320DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1321{
1322 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1323 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1324
1325 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
1326
1327 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1328 pgmLock(pVM);
1329
1330 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1331 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1332 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1333 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
1334
1335 pgmUnlock(pVM);
1336 return rc;
1337}
1338
1339
1340/**
1341 * Changing the page flags for a single page in the shadow page tables so as to
1342 * make it read-only.
1343 *
1344 * @returns VBox status code.
1345 * @param pVCpu The cross context virtual CPU structure.
1346 * @param GCPtr Virtual address of the first page in the range.
1347 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1348 */
1349VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1350{
1351 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1352}
1353
1354
1355/**
1356 * Changing the page flags for a single page in the shadow page tables so as to
1357 * make it writable.
1358 *
1359 * The call must know with 101% certainty that the guest page tables maps this
1360 * as writable too. This function will deal shared, zero and write monitored
1361 * pages.
1362 *
1363 * @returns VBox status code.
1364 * @param pVCpu The cross context virtual CPU structure.
1365 * @param GCPtr Virtual address of the first page in the range.
1366 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1367 */
1368VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1369{
1370 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1371 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1372 return VINF_SUCCESS;
1373}
1374
1375
1376/**
1377 * Changing the page flags for a single page in the shadow page tables so as to
1378 * make it not present.
1379 *
1380 * @returns VBox status code.
1381 * @param pVCpu The cross context virtual CPU structure.
1382 * @param GCPtr Virtual address of the first page in the range.
1383 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1384 */
1385VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1386{
1387 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1388}
1389
1390
1391/**
1392 * Changing the page flags for a single page in the shadow page tables so as to
1393 * make it supervisor and writable.
1394 *
1395 * This if for dealing with CR0.WP=0 and readonly user pages.
1396 *
1397 * @returns VBox status code.
1398 * @param pVCpu The cross context virtual CPU structure.
1399 * @param GCPtr Virtual address of the first page in the range.
1400 * @param fBigPage Whether or not this is a big page. If it is, we have to
1401 * change the shadow PDE as well. If it isn't, the caller
1402 * has checked that the shadow PDE doesn't need changing.
1403 * We ASSUME 4KB pages backing the big page here!
1404 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1405 */
1406int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1407{
1408 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1409 if (rc == VINF_SUCCESS && fBigPage)
1410 {
1411 /* this is a bit ugly... */
1412 switch (pVCpu->pgm.s.enmShadowMode)
1413 {
1414 case PGMMODE_32_BIT:
1415 {
1416 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1417 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1418 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1419 pPde->n.u1Write = 1;
1420 Log(("-> PDE=%#llx (32)\n", pPde->u));
1421 break;
1422 }
1423 case PGMMODE_PAE:
1424 case PGMMODE_PAE_NX:
1425 {
1426 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1427 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1428 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1429 pPde->n.u1Write = 1;
1430 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1431 break;
1432 }
1433 default:
1434 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1435 }
1436 }
1437 return rc;
1438}
1439
1440
1441/**
1442 * Gets the shadow page directory for the specified address, PAE.
1443 *
1444 * @returns Pointer to the shadow PD.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param GCPtr The address.
1447 * @param uGstPdpe Guest PDPT entry. Valid.
1448 * @param ppPD Receives address of page directory
1449 */
1450int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1451{
1452 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1453 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1454 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1455 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1456 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1457 PPGMPOOLPAGE pShwPage;
1458 int rc;
1459
1460 PGM_LOCK_ASSERT_OWNER(pVM);
1461
1462 /* Allocate page directory if not present. */
1463 if ( !pPdpe->n.u1Present
1464 && !(pPdpe->u & X86_PDPE_PG_MASK))
1465 {
1466 RTGCPTR64 GCPdPt;
1467 PGMPOOLKIND enmKind;
1468
1469 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1470 {
1471 /* AMD-V nested paging or real/protected mode without paging. */
1472 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1473 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1474 }
1475 else
1476 {
1477 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1478 {
1479 if (!(uGstPdpe & X86_PDPE_P))
1480 {
1481 /* PD not present; guest must reload CR3 to change it.
1482 * No need to monitor anything in this case.
1483 */
1484 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1485
1486 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1487 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1488 uGstPdpe |= X86_PDPE_P;
1489 }
1490 else
1491 {
1492 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1493 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1494 }
1495 }
1496 else
1497 {
1498 GCPdPt = CPUMGetGuestCR3(pVCpu);
1499 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1500 }
1501 }
1502
1503 /* Create a reference back to the PDPT by using the index in its shadow page. */
1504 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1505 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1506 &pShwPage);
1507 AssertRCReturn(rc, rc);
1508
1509 /* The PD was cached or created; hook it up now. */
1510 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
1511 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1512 }
1513 else
1514 {
1515 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1516 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1517 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1518
1519 pgmPoolCacheUsed(pPool, pShwPage);
1520 }
1521 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1522 return VINF_SUCCESS;
1523}
1524
1525
1526/**
1527 * Gets the pointer to the shadow page directory entry for an address, PAE.
1528 *
1529 * @returns Pointer to the PDE.
1530 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1531 * @param GCPtr The address.
1532 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1533 */
1534DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1535{
1536 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1537 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1538 PVM pVM = pVCpu->CTX_SUFF(pVM);
1539
1540 PGM_LOCK_ASSERT_OWNER(pVM);
1541
1542 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1543 if (!pPdpt->a[iPdPt].n.u1Present)
1544 {
1545 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1546 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1547 }
1548 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1549
1550 /* Fetch the pgm pool shadow descriptor. */
1551 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1552 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1553
1554 *ppShwPde = pShwPde;
1555 return VINF_SUCCESS;
1556}
1557
1558
1559/**
1560 * Syncs the SHADOW page directory pointer for the specified address.
1561 *
1562 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1563 *
1564 * The caller is responsible for making sure the guest has a valid PD before
1565 * calling this function.
1566 *
1567 * @returns VBox status code.
1568 * @param pVCpu The cross context virtual CPU structure.
1569 * @param GCPtr The address.
1570 * @param uGstPml4e Guest PML4 entry (valid).
1571 * @param uGstPdpe Guest PDPT entry (valid).
1572 * @param ppPD Receives address of page directory
1573 */
1574static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1575{
1576 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1577 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1578 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1579 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1580 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1581 PPGMPOOLPAGE pShwPage;
1582 int rc;
1583
1584 PGM_LOCK_ASSERT_OWNER(pVM);
1585
1586 /* Allocate page directory pointer table if not present. */
1587 if ( !pPml4e->n.u1Present
1588 && !(pPml4e->u & X86_PML4E_PG_MASK))
1589 {
1590 RTGCPTR64 GCPml4;
1591 PGMPOOLKIND enmKind;
1592
1593 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1594
1595 if (fNestedPagingOrNoGstPaging)
1596 {
1597 /* AMD-V nested paging or real/protected mode without paging */
1598 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1599 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1600 }
1601 else
1602 {
1603 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1604 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1605 }
1606
1607 /* Create a reference back to the PDPT by using the index in its shadow page. */
1608 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1609 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1610 &pShwPage);
1611 AssertRCReturn(rc, rc);
1612 }
1613 else
1614 {
1615 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1616 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1617
1618 pgmPoolCacheUsed(pPool, pShwPage);
1619 }
1620 /* The PDPT was cached or created; hook it up now. */
1621 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
1622
1623 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1624 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1625 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1626
1627 /* Allocate page directory if not present. */
1628 if ( !pPdpe->n.u1Present
1629 && !(pPdpe->u & X86_PDPE_PG_MASK))
1630 {
1631 RTGCPTR64 GCPdPt;
1632 PGMPOOLKIND enmKind;
1633
1634 if (fNestedPagingOrNoGstPaging)
1635 {
1636 /* AMD-V nested paging or real/protected mode without paging */
1637 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1638 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1639 }
1640 else
1641 {
1642 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1643 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1644 }
1645
1646 /* Create a reference back to the PDPT by using the index in its shadow page. */
1647 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1648 pShwPage->idx, iPdPt, false /*fLockPage*/,
1649 &pShwPage);
1650 AssertRCReturn(rc, rc);
1651 }
1652 else
1653 {
1654 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1655 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1656
1657 pgmPoolCacheUsed(pPool, pShwPage);
1658 }
1659 /* The PD was cached or created; hook it up now. */
1660 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
1661
1662 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1663 return VINF_SUCCESS;
1664}
1665
1666
1667/**
1668 * Gets the SHADOW page directory pointer for the specified address (long mode).
1669 *
1670 * @returns VBox status code.
1671 * @param pVCpu The cross context virtual CPU structure.
1672 * @param GCPtr The address.
1673 * @param ppPml4e Receives the address of the page map level 4 entry.
1674 * @param ppPdpt Receives the address of the page directory pointer table.
1675 * @param ppPD Receives the address of the page directory.
1676 */
1677DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1678{
1679 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1680 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1681
1682 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1683
1684 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1685 if (ppPml4e)
1686 *ppPml4e = (PX86PML4E)pPml4e;
1687
1688 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1689
1690 if (!pPml4e->n.u1Present)
1691 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1692
1693 PVM pVM = pVCpu->CTX_SUFF(pVM);
1694 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1695 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1696 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1697
1698 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1699 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1700 if (!pPdpt->a[iPdPt].n.u1Present)
1701 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1702
1703 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1704 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1705
1706 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1707 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1708 return VINF_SUCCESS;
1709}
1710
1711
1712/**
1713 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1714 * backing pages in case the PDPT or PML4 entry is missing.
1715 *
1716 * @returns VBox status code.
1717 * @param pVCpu The cross context virtual CPU structure.
1718 * @param GCPtr The address.
1719 * @param ppPdpt Receives address of pdpt
1720 * @param ppPD Receives address of page directory
1721 */
1722static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1723{
1724 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1725 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1726 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1727 PEPTPML4 pPml4;
1728 PEPTPML4E pPml4e;
1729 PPGMPOOLPAGE pShwPage;
1730 int rc;
1731
1732 Assert(pVM->pgm.s.fNestedPaging);
1733 PGM_LOCK_ASSERT_OWNER(pVM);
1734
1735 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1736 Assert(pPml4);
1737
1738 /* Allocate page directory pointer table if not present. */
1739 pPml4e = &pPml4->a[iPml4];
1740 if ( !pPml4e->n.u1Present
1741 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1742 {
1743 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1744 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1745
1746 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1747 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1748 &pShwPage);
1749 AssertRCReturn(rc, rc);
1750 }
1751 else
1752 {
1753 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1754 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1755
1756 pgmPoolCacheUsed(pPool, pShwPage);
1757 }
1758
1759 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1760/** @todo r=bird: This is sub-optimal, gcc 10 generates a qword move of the address followed by
1761 * a byte write of the 0x7 flag value. These two writes should be combined, but for that
1762 * we need to add/find the EPT flag defines. */
1763/** @todo r=bird: use atomic writes here and maybe only update if really needed? */
1764 pPml4e->u = pShwPage->Core.Key;
1765 pPml4e->n.u1Present = 1;
1766 pPml4e->n.u1Write = 1;
1767 pPml4e->n.u1Execute = 1;
1768
1769 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1770 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1771 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1772
1773 if (ppPdpt)
1774 *ppPdpt = pPdpt;
1775
1776 /* Allocate page directory if not present. */
1777 if ( !pPdpe->n.u1Present
1778 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1779 {
1780 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1781 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1782 pShwPage->idx, iPdPt, false /*fLockPage*/,
1783 &pShwPage);
1784 AssertRCReturn(rc, rc);
1785 }
1786 else
1787 {
1788 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1789 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1790
1791 pgmPoolCacheUsed(pPool, pShwPage);
1792 }
1793 /* The PD was cached or created; hook it up now and fill with the default value. */
1794 pPdpe->u = pShwPage->Core.Key;
1795 pPdpe->n.u1Present = 1;
1796 pPdpe->n.u1Write = 1;
1797 pPdpe->n.u1Execute = 1;
1798
1799 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1800 return VINF_SUCCESS;
1801}
1802
1803
1804#ifdef IN_RING0
1805/**
1806 * Synchronizes a range of nested page table entries.
1807 *
1808 * The caller must own the PGM lock.
1809 *
1810 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1811 * @param GCPhys Where to start.
1812 * @param cPages How many pages which entries should be synced.
1813 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1814 * host paging mode for AMD-V).
1815 */
1816int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1817{
1818 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1819
1820/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1821 int rc;
1822 switch (enmShwPagingMode)
1823 {
1824 case PGMMODE_32_BIT:
1825 {
1826 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1827 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1828 break;
1829 }
1830
1831 case PGMMODE_PAE:
1832 case PGMMODE_PAE_NX:
1833 {
1834 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1835 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1836 break;
1837 }
1838
1839 case PGMMODE_AMD64:
1840 case PGMMODE_AMD64_NX:
1841 {
1842 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1843 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1844 break;
1845 }
1846
1847 case PGMMODE_EPT:
1848 {
1849 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1850 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1851 break;
1852 }
1853
1854 default:
1855 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1856 }
1857 return rc;
1858}
1859#endif /* IN_RING0 */
1860
1861
1862/**
1863 * Gets effective Guest OS page information.
1864 *
1865 * When GCPtr is in a big page, the function will return as if it was a normal
1866 * 4KB page. If the need for distinguishing between big and normal page becomes
1867 * necessary at a later point, a PGMGstGetPage() will be created for that
1868 * purpose.
1869 *
1870 * @returns VBox status code.
1871 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1872 * @param GCPtr Guest Context virtual address of the page.
1873 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1874 * @param pGCPhys Where to store the GC physical address of the page.
1875 * This is page aligned. The fact that the
1876 */
1877VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1878{
1879 VMCPU_ASSERT_EMT(pVCpu);
1880 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1881 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1882 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1883 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pfFlags, pGCPhys);
1884}
1885
1886
1887/**
1888 * Performs a guest page table walk.
1889 *
1890 * The guest should be in paged protect mode or long mode when making a call to
1891 * this function.
1892 *
1893 * @returns VBox status code.
1894 * @retval VINF_SUCCESS on success.
1895 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1896 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1897 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1898 *
1899 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1900 * @param GCPtr The guest virtual address to walk by.
1901 * @param pWalk Where to return the walk result. This is valid for some
1902 * error codes as well.
1903 */
1904int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1905{
1906 VMCPU_ASSERT_EMT(pVCpu);
1907 switch (pVCpu->pgm.s.enmGuestMode)
1908 {
1909 case PGMMODE_32_BIT:
1910 pWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1911 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);
1912
1913 case PGMMODE_PAE:
1914 case PGMMODE_PAE_NX:
1915 pWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1916 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);
1917
1918 case PGMMODE_AMD64:
1919 case PGMMODE_AMD64_NX:
1920 pWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1921 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);
1922
1923 case PGMMODE_REAL:
1924 case PGMMODE_PROTECTED:
1925 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1926 return VERR_PGM_NOT_USED_IN_MODE;
1927
1928 case PGMMODE_NESTED_32BIT:
1929 case PGMMODE_NESTED_PAE:
1930 case PGMMODE_NESTED_AMD64:
1931 case PGMMODE_EPT:
1932 default:
1933 AssertFailed();
1934 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1935 return VERR_PGM_NOT_USED_IN_MODE;
1936 }
1937}
1938
1939
1940/**
1941 * Tries to continue the previous walk.
1942 *
1943 * @note Requires the caller to hold the PGM lock from the first
1944 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
1945 * we cannot use the pointers.
1946 *
1947 * @returns VBox status code.
1948 * @retval VINF_SUCCESS on success.
1949 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1950 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1951 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1952 *
1953 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1954 * @param GCPtr The guest virtual address to walk by.
1955 * @param pWalk Pointer to the previous walk result and where to return
1956 * the result of this walk. This is valid for some error
1957 * codes as well.
1958 */
1959int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1960{
1961 /*
1962 * We can only handle successfully walks.
1963 * We also limit ourselves to the next page.
1964 */
1965 if ( pWalk->u.Core.fSucceeded
1966 && GCPtr - pWalk->u.Core.GCPtr == PAGE_SIZE)
1967 {
1968 Assert(pWalk->u.Core.uLevel == 0);
1969 if (pWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
1970 {
1971 /*
1972 * AMD64
1973 */
1974 if (!pWalk->u.Core.fGigantPage && !pWalk->u.Core.fBigPage)
1975 {
1976 /*
1977 * We fall back to full walk if the PDE table changes, if any
1978 * reserved bits are set, or if the effective page access changes.
1979 */
1980 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
1981 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
1982 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
1983 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
1984
1985 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PD_PAE_SHIFT))
1986 {
1987 if (pWalk->u.Amd64.pPte)
1988 {
1989 X86PTEPAE Pte;
1990 Pte.u = pWalk->u.Amd64.pPte[1].u;
1991 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
1992 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1993 {
1994
1995 pWalk->u.Core.GCPtr = GCPtr;
1996 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1997 pWalk->u.Amd64.Pte.u = Pte.u;
1998 pWalk->u.Amd64.pPte++;
1999 return VINF_SUCCESS;
2000 }
2001 }
2002 }
2003 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PDPT_SHIFT))
2004 {
2005 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
2006 if (pWalk->u.Amd64.pPde)
2007 {
2008 X86PDEPAE Pde;
2009 Pde.u = pWalk->u.Amd64.pPde[1].u;
2010 if ( (Pde.u & fPdeSame) == (pWalk->u.Amd64.Pde.u & fPdeSame)
2011 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
2012 {
2013 /* Get the new PTE and check out the first entry. */
2014 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
2015 &pWalk->u.Amd64.pPt);
2016 if (RT_SUCCESS(rc))
2017 {
2018 pWalk->u.Amd64.pPte = &pWalk->u.Amd64.pPt->a[0];
2019 X86PTEPAE Pte;
2020 Pte.u = pWalk->u.Amd64.pPte->u;
2021 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
2022 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2023 {
2024 pWalk->u.Core.GCPtr = GCPtr;
2025 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2026 pWalk->u.Amd64.Pte.u = Pte.u;
2027 pWalk->u.Amd64.Pde.u = Pde.u;
2028 pWalk->u.Amd64.pPde++;
2029 return VINF_SUCCESS;
2030 }
2031 }
2032 }
2033 }
2034 }
2035 }
2036 else if (!pWalk->u.Core.fGigantPage)
2037 {
2038 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_2M_BASE_MASK))
2039 {
2040 pWalk->u.Core.GCPtr = GCPtr;
2041 pWalk->u.Core.GCPhys += PAGE_SIZE;
2042 return VINF_SUCCESS;
2043 }
2044 }
2045 else
2046 {
2047 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_1G_BASE_MASK))
2048 {
2049 pWalk->u.Core.GCPtr = GCPtr;
2050 pWalk->u.Core.GCPhys += PAGE_SIZE;
2051 return VINF_SUCCESS;
2052 }
2053 }
2054 }
2055 }
2056 /* Case we don't handle. Do full walk. */
2057 return pgmGstPtWalk(pVCpu, GCPtr, pWalk);
2058}
2059
2060
2061/**
2062 * Checks if the page is present.
2063 *
2064 * @returns true if the page is present.
2065 * @returns false if the page is not present.
2066 * @param pVCpu The cross context virtual CPU structure.
2067 * @param GCPtr Address within the page.
2068 */
2069VMMDECL(bool) PGMGstIsPagePresent(PVMCPUCC pVCpu, RTGCPTR GCPtr)
2070{
2071 VMCPU_ASSERT_EMT(pVCpu);
2072 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
2073 return RT_SUCCESS(rc);
2074}
2075
2076
2077/**
2078 * Sets (replaces) the page flags for a range of pages in the guest's tables.
2079 *
2080 * @returns VBox status code.
2081 * @param pVCpu The cross context virtual CPU structure.
2082 * @param GCPtr The address of the first page.
2083 * @param cb The size of the range in bytes.
2084 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
2085 */
2086VMMDECL(int) PGMGstSetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
2087{
2088 VMCPU_ASSERT_EMT(pVCpu);
2089 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
2090}
2091
2092
2093/**
2094 * Modify page flags for a range of pages in the guest's tables
2095 *
2096 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2097 *
2098 * @returns VBox status code.
2099 * @param pVCpu The cross context virtual CPU structure.
2100 * @param GCPtr Virtual address of the first page in the range.
2101 * @param cb Size (in bytes) of the range to apply the modification to.
2102 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2103 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2104 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2105 */
2106VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2107{
2108 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
2109 VMCPU_ASSERT_EMT(pVCpu);
2110
2111 /*
2112 * Validate input.
2113 */
2114 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2115 Assert(cb);
2116
2117 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2118
2119 /*
2120 * Adjust input.
2121 */
2122 cb += GCPtr & PAGE_OFFSET_MASK;
2123 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
2124 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
2125
2126 /*
2127 * Call worker.
2128 */
2129 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2130 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2131 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2132 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2133
2134 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
2135 return rc;
2136}
2137
2138
2139#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2140
2141/**
2142 * Performs the lazy mapping of the 32-bit guest PD.
2143 *
2144 * @returns VBox status code.
2145 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2146 * @param ppPd Where to return the pointer to the mapping. This is
2147 * always set.
2148 */
2149int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2150{
2151 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2152 pgmLock(pVM);
2153
2154 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2155
2156 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
2157 PPGMPAGE pPage;
2158 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2159 if (RT_SUCCESS(rc))
2160 {
2161# ifdef VBOX_WITH_RAM_IN_KERNEL
2162 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2163 if (RT_SUCCESS(rc))
2164 {
2165# ifdef IN_RING3
2166 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2167 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2168# else
2169 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2170 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2171# endif
2172 pgmUnlock(pVM);
2173 return VINF_SUCCESS;
2174 }
2175# else
2176 RTHCPTR HCPtrGuestCR3;
2177 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
2178 if (RT_SUCCESS(rc))
2179 {
2180 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
2181# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2182 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
2183# endif
2184 *ppPd = (PX86PD)HCPtrGuestCR3;
2185
2186 pgmUnlock(pVM);
2187 return VINF_SUCCESS;
2188 }
2189# endif
2190 AssertRC(rc);
2191 }
2192 pgmUnlock(pVM);
2193
2194 *ppPd = NULL;
2195 return rc;
2196}
2197
2198
2199/**
2200 * Performs the lazy mapping of the PAE guest PDPT.
2201 *
2202 * @returns VBox status code.
2203 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2204 * @param ppPdpt Where to return the pointer to the mapping. This is
2205 * always set.
2206 */
2207int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2208{
2209 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2210 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2211 pgmLock(pVM);
2212
2213 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
2214 PPGMPAGE pPage;
2215 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2216 if (RT_SUCCESS(rc))
2217 {
2218# ifdef VBOX_WITH_RAM_IN_KERNEL
2219 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2220 if (RT_SUCCESS(rc))
2221 {
2222# ifdef IN_RING3
2223 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2224 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2225# else
2226 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2227 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2228# endif
2229 pgmUnlock(pVM);
2230 return VINF_SUCCESS;
2231 }
2232# else
2233 RTHCPTR HCPtrGuestCR3;
2234 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
2235 if (RT_SUCCESS(rc))
2236 {
2237 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
2238# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2239 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
2240# endif
2241 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
2242
2243 pgmUnlock(pVM);
2244 return VINF_SUCCESS;
2245 }
2246# endif
2247 AssertRC(rc);
2248 }
2249
2250 pgmUnlock(pVM);
2251 *ppPdpt = NULL;
2252 return rc;
2253}
2254
2255
2256/**
2257 * Performs the lazy mapping / updating of a PAE guest PD.
2258 *
2259 * @returns Pointer to the mapping.
2260 * @returns VBox status code.
2261 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2262 * @param iPdpt Which PD entry to map (0..3).
2263 * @param ppPd Where to return the pointer to the mapping. This is
2264 * always set.
2265 */
2266int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2267{
2268 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2269 pgmLock(pVM);
2270
2271 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2272 Assert(pGuestPDPT);
2273 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
2274 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2275 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2276
2277 PPGMPAGE pPage;
2278 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2279 if (RT_SUCCESS(rc))
2280 {
2281# ifdef VBOX_WITH_RAM_IN_KERNEL
2282 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2283 AssertRC(rc);
2284 if (RT_SUCCESS(rc))
2285 {
2286# ifdef IN_RING3
2287 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2288 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2289# else
2290 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2291 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2292# endif
2293 if (fChanged)
2294 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2295 pgmUnlock(pVM);
2296 return VINF_SUCCESS;
2297 }
2298# else
2299 RTHCPTR HCPtr = NIL_RTHCPTR;
2300# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2301 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
2302 AssertRC(rc);
2303# endif
2304 if (RT_SUCCESS(rc))
2305 {
2306 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
2307# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2308 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
2309# endif
2310 if (fChanged)
2311 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2312
2313 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
2314 pgmUnlock(pVM);
2315 return VINF_SUCCESS;
2316 }
2317# endif
2318 }
2319
2320 /* Invalid page or some failure, invalidate the entry. */
2321 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2322 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2323# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2324 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2325# endif
2326
2327 pgmUnlock(pVM);
2328 return rc;
2329}
2330
2331
2332/**
2333 * Performs the lazy mapping of the 32-bit guest PD.
2334 *
2335 * @returns VBox status code.
2336 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2337 * @param ppPml4 Where to return the pointer to the mapping. This will
2338 * always be set.
2339 */
2340int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2341{
2342 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2343 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2344 pgmLock(pVM);
2345
2346 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
2347 PPGMPAGE pPage;
2348 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2349 if (RT_SUCCESS(rc))
2350 {
2351# ifdef VBOX_WITH_RAM_IN_KERNEL
2352 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2353 if (RT_SUCCESS(rc))
2354 {
2355# ifdef IN_RING3
2356 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2357 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2358# else
2359 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2360 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2361# endif
2362 pgmUnlock(pVM);
2363 return VINF_SUCCESS;
2364 }
2365# else
2366 RTHCPTR HCPtrGuestCR3;
2367 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
2368 if (RT_SUCCESS(rc))
2369 {
2370 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
2371# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2372 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
2373# endif
2374 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
2375
2376 pgmUnlock(pVM);
2377 return VINF_SUCCESS;
2378 }
2379# endif
2380 }
2381
2382 pgmUnlock(pVM);
2383 *ppPml4 = NULL;
2384 return rc;
2385}
2386
2387#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2388
2389
2390/**
2391 * Gets the PAE PDPEs values cached by the CPU.
2392 *
2393 * @returns VBox status code.
2394 * @param pVCpu The cross context virtual CPU structure.
2395 * @param paPdpes Where to return the four PDPEs. The array
2396 * pointed to must have 4 entries.
2397 */
2398VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPUCC pVCpu, PX86PDPE paPdpes)
2399{
2400 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2401
2402 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
2403 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
2404 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
2405 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
2406 return VINF_SUCCESS;
2407}
2408
2409
2410/**
2411 * Sets the PAE PDPEs values cached by the CPU.
2412 *
2413 * @remarks This must be called *AFTER* PGMUpdateCR3.
2414 *
2415 * @param pVCpu The cross context virtual CPU structure.
2416 * @param paPdpes The four PDPE values. The array pointed to must
2417 * have exactly 4 entries.
2418 *
2419 * @remarks No-long-jump zone!!!
2420 */
2421VMM_INT_DECL(void) PGMGstUpdatePaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPdpes)
2422{
2423 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2424
2425 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
2426 {
2427 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
2428 {
2429 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
2430
2431 /* Force lazy remapping if it changed in any way. */
2432 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2433#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2434 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2435#endif
2436 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2437 }
2438 }
2439
2440 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
2441}
2442
2443
2444/**
2445 * Gets the current CR3 register value for the shadow memory context.
2446 * @returns CR3 value.
2447 * @param pVCpu The cross context virtual CPU structure.
2448 */
2449VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2450{
2451 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2452 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2453 return pPoolPage->Core.Key;
2454}
2455
2456
2457/**
2458 * Performs and schedules necessary updates following a CR3 load or reload.
2459 *
2460 * This will normally involve mapping the guest PD or nPDPT
2461 *
2462 * @returns VBox status code.
2463 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2464 * safely be ignored and overridden since the FF will be set too then.
2465 * @param pVCpu The cross context virtual CPU structure.
2466 * @param cr3 The new cr3.
2467 * @param fGlobal Indicates whether this is a global flush or not.
2468 */
2469VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal)
2470{
2471 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2472 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2473
2474 VMCPU_ASSERT_EMT(pVCpu);
2475
2476 /*
2477 * Always flag the necessary updates; necessary for hardware acceleration
2478 */
2479 /** @todo optimize this, it shouldn't always be necessary. */
2480 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2481 if (fGlobal)
2482 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2483 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
2484
2485 /*
2486 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2487 */
2488 int rc = VINF_SUCCESS;
2489 RTGCPHYS GCPhysCR3;
2490 switch (pVCpu->pgm.s.enmGuestMode)
2491 {
2492 case PGMMODE_PAE:
2493 case PGMMODE_PAE_NX:
2494 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2495 break;
2496 case PGMMODE_AMD64:
2497 case PGMMODE_AMD64_NX:
2498 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2499 break;
2500 default:
2501 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2502 break;
2503 }
2504 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2505
2506 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2507 if (GCPhysOldCR3 != GCPhysCR3)
2508 {
2509 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2510 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2511 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2512
2513 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2514 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2515 if (RT_LIKELY(rc == VINF_SUCCESS))
2516 {
2517 if (pgmMapAreMappingsFloating(pVM))
2518 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2519 }
2520 else
2521 {
2522 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2523 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2524 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2525 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2526 if (pgmMapAreMappingsFloating(pVM))
2527 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
2528 }
2529
2530 if (fGlobal)
2531 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2532 else
2533 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
2534 }
2535 else
2536 {
2537#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2538 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2539 if (pPool->cDirtyPages)
2540 {
2541 pgmLock(pVM);
2542 pgmPoolResetDirtyPages(pVM);
2543 pgmUnlock(pVM);
2544 }
2545#endif
2546 /*
2547 * Check if we have a pending update of the CR3 monitoring.
2548 */
2549 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2550 {
2551 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2552 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
2553 }
2554 if (fGlobal)
2555 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2556 else
2557 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
2558 }
2559
2560 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2561 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2562 return rc;
2563}
2564
2565
2566/**
2567 * Performs and schedules necessary updates following a CR3 load or reload when
2568 * using nested or extended paging.
2569 *
2570 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2571 * TLB and triggering a SyncCR3.
2572 *
2573 * This will normally involve mapping the guest PD or nPDPT
2574 *
2575 * @returns VBox status code.
2576 * @retval VINF_SUCCESS.
2577 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2578 * paging modes). This can safely be ignored and overridden since the
2579 * FF will be set too then.
2580 * @param pVCpu The cross context virtual CPU structure.
2581 * @param cr3 The new cr3.
2582 */
2583VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3)
2584{
2585 VMCPU_ASSERT_EMT(pVCpu);
2586 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
2587
2588 /* We assume we're only called in nested paging mode. */
2589 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2590 Assert(!pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2591 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
2592
2593 /*
2594 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2595 */
2596 int rc = VINF_SUCCESS;
2597 RTGCPHYS GCPhysCR3;
2598 switch (pVCpu->pgm.s.enmGuestMode)
2599 {
2600 case PGMMODE_PAE:
2601 case PGMMODE_PAE_NX:
2602 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2603 break;
2604 case PGMMODE_AMD64:
2605 case PGMMODE_AMD64_NX:
2606 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2607 break;
2608 default:
2609 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2610 break;
2611 }
2612 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2613
2614 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2615 {
2616 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2617 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2618 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2619
2620 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2621 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2622
2623 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2624 }
2625
2626 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2627 return rc;
2628}
2629
2630
2631/**
2632 * Synchronize the paging structures.
2633 *
2634 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2635 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2636 * in several places, most importantly whenever the CR3 is loaded.
2637 *
2638 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2639 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2640 * the VMM into guest context.
2641 * @param pVCpu The cross context virtual CPU structure.
2642 * @param cr0 Guest context CR0 register
2643 * @param cr3 Guest context CR3 register
2644 * @param cr4 Guest context CR4 register
2645 * @param fGlobal Including global page directories or not
2646 */
2647VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2648{
2649 int rc;
2650
2651 VMCPU_ASSERT_EMT(pVCpu);
2652
2653 /*
2654 * The pool may have pending stuff and even require a return to ring-3 to
2655 * clear the whole thing.
2656 */
2657 rc = pgmPoolSyncCR3(pVCpu);
2658 if (rc != VINF_SUCCESS)
2659 return rc;
2660
2661 /*
2662 * We might be called when we shouldn't.
2663 *
2664 * The mode switching will ensure that the PD is resynced after every mode
2665 * switch. So, if we find ourselves here when in protected or real mode
2666 * we can safely clear the FF and return immediately.
2667 */
2668 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2669 {
2670 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2671 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2672 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2673 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2674 return VINF_SUCCESS;
2675 }
2676
2677 /* If global pages are not supported, then all flushes are global. */
2678 if (!(cr4 & X86_CR4_PGE))
2679 fGlobal = true;
2680 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2681 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2682
2683 /*
2684 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2685 * This should be done before SyncCR3.
2686 */
2687 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2688 {
2689 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2690
2691 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2692 RTGCPHYS GCPhysCR3;
2693 switch (pVCpu->pgm.s.enmGuestMode)
2694 {
2695 case PGMMODE_PAE:
2696 case PGMMODE_PAE_NX:
2697 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2698 break;
2699 case PGMMODE_AMD64:
2700 case PGMMODE_AMD64_NX:
2701 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2702 break;
2703 default:
2704 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2705 break;
2706 }
2707 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2708
2709 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2710 {
2711 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2712 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2713 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2714 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2715 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2716 }
2717
2718 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2719 if ( rc == VINF_PGM_SYNC_CR3
2720 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2721 {
2722 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2723#ifdef IN_RING3
2724 rc = pgmPoolSyncCR3(pVCpu);
2725#else
2726 if (rc == VINF_PGM_SYNC_CR3)
2727 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2728 return VINF_PGM_SYNC_CR3;
2729#endif
2730 }
2731 AssertRCReturn(rc, rc);
2732 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2733 }
2734
2735 /*
2736 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2737 */
2738 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2739
2740 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2741 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2742 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2743 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2744
2745 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2746 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2747 if (rc == VINF_SUCCESS)
2748 {
2749 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2750 {
2751 /* Go back to ring 3 if a pgm pool sync is again pending. */
2752 return VINF_PGM_SYNC_CR3;
2753 }
2754
2755 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2756 {
2757 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2758 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2759 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2760 }
2761
2762 /*
2763 * Check if we have a pending update of the CR3 monitoring.
2764 */
2765 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2766 {
2767 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2768 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);
2769 Assert(pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2770 }
2771 }
2772
2773 /*
2774 * Now flush the CR3 (guest context).
2775 */
2776 if (rc == VINF_SUCCESS)
2777 PGM_INVL_VCPU_TLBS(pVCpu);
2778 return rc;
2779}
2780
2781
2782/**
2783 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2784 *
2785 * @returns VBox status code, with the following informational code for
2786 * VM scheduling.
2787 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2788 * @retval VINF_PGM_CHANGE_MODE if we're in RC the mode changes. This will
2789 * NOT be returned in ring-3 or ring-0.
2790 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2791 *
2792 * @param pVCpu The cross context virtual CPU structure.
2793 * @param cr0 The new cr0.
2794 * @param cr4 The new cr4.
2795 * @param efer The new extended feature enable register.
2796 */
2797VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2798{
2799 VMCPU_ASSERT_EMT(pVCpu);
2800
2801 /*
2802 * Calc the new guest mode.
2803 *
2804 * Note! We check PG before PE and without requiring PE because of the
2805 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2806 */
2807 PGMMODE enmGuestMode;
2808 if (cr0 & X86_CR0_PG)
2809 {
2810 if (!(cr4 & X86_CR4_PAE))
2811 {
2812 bool const fPse = !!(cr4 & X86_CR4_PSE);
2813 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2814 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2815 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2816 enmGuestMode = PGMMODE_32_BIT;
2817 }
2818 else if (!(efer & MSR_K6_EFER_LME))
2819 {
2820 if (!(efer & MSR_K6_EFER_NXE))
2821 enmGuestMode = PGMMODE_PAE;
2822 else
2823 enmGuestMode = PGMMODE_PAE_NX;
2824 }
2825 else
2826 {
2827 if (!(efer & MSR_K6_EFER_NXE))
2828 enmGuestMode = PGMMODE_AMD64;
2829 else
2830 enmGuestMode = PGMMODE_AMD64_NX;
2831 }
2832 }
2833 else if (!(cr0 & X86_CR0_PE))
2834 enmGuestMode = PGMMODE_REAL;
2835 else
2836 enmGuestMode = PGMMODE_PROTECTED;
2837
2838 /*
2839 * Did it change?
2840 */
2841 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2842 return VINF_SUCCESS;
2843
2844 /* Flush the TLB */
2845 PGM_INVL_VCPU_TLBS(pVCpu);
2846 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2847}
2848
2849
2850/**
2851 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
2852 *
2853 * @returns PGM_TYPE_*.
2854 * @param pgmMode The mode value to convert.
2855 */
2856DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
2857{
2858 switch (pgmMode)
2859 {
2860 case PGMMODE_REAL: return PGM_TYPE_REAL;
2861 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
2862 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
2863 case PGMMODE_PAE:
2864 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
2865 case PGMMODE_AMD64:
2866 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
2867 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
2868 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
2869 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
2870 case PGMMODE_EPT: return PGM_TYPE_EPT;
2871 case PGMMODE_NONE: return PGM_TYPE_NONE;
2872 default:
2873 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
2874 }
2875}
2876
2877
2878/**
2879 * Calculates the shadow paging mode.
2880 *
2881 * @returns The shadow paging mode.
2882 * @param pVM The cross context VM structure.
2883 * @param enmGuestMode The guest mode.
2884 * @param enmHostMode The host mode.
2885 * @param enmShadowMode The current shadow mode.
2886 */
2887static PGMMODE pgmCalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
2888{
2889 switch (enmGuestMode)
2890 {
2891 /*
2892 * When switching to real or protected mode we don't change
2893 * anything since it's likely that we'll switch back pretty soon.
2894 *
2895 * During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
2896 * and is supposed to determine which shadow paging and switcher to
2897 * use during init.
2898 */
2899 case PGMMODE_REAL:
2900 case PGMMODE_PROTECTED:
2901 if ( enmShadowMode != PGMMODE_INVALID
2902 && VM_IS_RAW_MODE_ENABLED(pVM) /* always switch in hm and nem modes! */)
2903 break; /* (no change) */
2904
2905 switch (enmHostMode)
2906 {
2907 case SUPPAGINGMODE_32_BIT:
2908 case SUPPAGINGMODE_32_BIT_GLOBAL:
2909 enmShadowMode = PGMMODE_32_BIT;
2910 break;
2911
2912 case SUPPAGINGMODE_PAE:
2913 case SUPPAGINGMODE_PAE_NX:
2914 case SUPPAGINGMODE_PAE_GLOBAL:
2915 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2916 enmShadowMode = PGMMODE_PAE;
2917 break;
2918
2919 case SUPPAGINGMODE_AMD64:
2920 case SUPPAGINGMODE_AMD64_GLOBAL:
2921 case SUPPAGINGMODE_AMD64_NX:
2922 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2923 enmShadowMode = PGMMODE_PAE;
2924 break;
2925
2926 default:
2927 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
2928 }
2929 break;
2930
2931 case PGMMODE_32_BIT:
2932 switch (enmHostMode)
2933 {
2934 case SUPPAGINGMODE_32_BIT:
2935 case SUPPAGINGMODE_32_BIT_GLOBAL:
2936 enmShadowMode = PGMMODE_32_BIT;
2937 break;
2938
2939 case SUPPAGINGMODE_PAE:
2940 case SUPPAGINGMODE_PAE_NX:
2941 case SUPPAGINGMODE_PAE_GLOBAL:
2942 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2943 enmShadowMode = PGMMODE_PAE;
2944 break;
2945
2946 case SUPPAGINGMODE_AMD64:
2947 case SUPPAGINGMODE_AMD64_GLOBAL:
2948 case SUPPAGINGMODE_AMD64_NX:
2949 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2950 enmShadowMode = PGMMODE_PAE;
2951 break;
2952
2953 default:
2954 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
2955 }
2956 break;
2957
2958 case PGMMODE_PAE:
2959 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
2960 switch (enmHostMode)
2961 {
2962 case SUPPAGINGMODE_32_BIT:
2963 case SUPPAGINGMODE_32_BIT_GLOBAL:
2964 enmShadowMode = PGMMODE_PAE;
2965 break;
2966
2967 case SUPPAGINGMODE_PAE:
2968 case SUPPAGINGMODE_PAE_NX:
2969 case SUPPAGINGMODE_PAE_GLOBAL:
2970 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2971 enmShadowMode = PGMMODE_PAE;
2972 break;
2973
2974 case SUPPAGINGMODE_AMD64:
2975 case SUPPAGINGMODE_AMD64_GLOBAL:
2976 case SUPPAGINGMODE_AMD64_NX:
2977 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2978 enmShadowMode = PGMMODE_PAE;
2979 break;
2980
2981 default:
2982 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
2983 }
2984 break;
2985
2986 case PGMMODE_AMD64:
2987 case PGMMODE_AMD64_NX:
2988 switch (enmHostMode)
2989 {
2990 case SUPPAGINGMODE_32_BIT:
2991 case SUPPAGINGMODE_32_BIT_GLOBAL:
2992 enmShadowMode = PGMMODE_AMD64;
2993 break;
2994
2995 case SUPPAGINGMODE_PAE:
2996 case SUPPAGINGMODE_PAE_NX:
2997 case SUPPAGINGMODE_PAE_GLOBAL:
2998 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2999 enmShadowMode = PGMMODE_AMD64;
3000 break;
3001
3002 case SUPPAGINGMODE_AMD64:
3003 case SUPPAGINGMODE_AMD64_GLOBAL:
3004 case SUPPAGINGMODE_AMD64_NX:
3005 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3006 enmShadowMode = PGMMODE_AMD64;
3007 break;
3008
3009 default:
3010 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3011 }
3012 break;
3013
3014 default:
3015 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3016 }
3017
3018 /*
3019 * Override the shadow mode when NEM or nested paging is active.
3020 */
3021 if (VM_IS_NEM_ENABLED(pVM))
3022 {
3023 pVM->pgm.s.fNestedPaging = true;
3024 enmShadowMode = PGMMODE_NONE;
3025 }
3026 else
3027 {
3028 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3029 pVM->pgm.s.fNestedPaging = fNestedPaging;
3030 if (fNestedPaging)
3031 {
3032 if (HMIsVmxActive(pVM))
3033 enmShadowMode = PGMMODE_EPT;
3034 else
3035 {
3036 /* The nested SVM paging depends on the host one. */
3037 Assert(HMIsSvmActive(pVM));
3038 if ( enmGuestMode == PGMMODE_AMD64
3039 || enmGuestMode == PGMMODE_AMD64_NX)
3040 enmShadowMode = PGMMODE_NESTED_AMD64;
3041 else
3042 switch (pVM->pgm.s.enmHostMode)
3043 {
3044 case SUPPAGINGMODE_32_BIT:
3045 case SUPPAGINGMODE_32_BIT_GLOBAL:
3046 enmShadowMode = PGMMODE_NESTED_32BIT;
3047 break;
3048
3049 case SUPPAGINGMODE_PAE:
3050 case SUPPAGINGMODE_PAE_GLOBAL:
3051 case SUPPAGINGMODE_PAE_NX:
3052 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3053 enmShadowMode = PGMMODE_NESTED_PAE;
3054 break;
3055
3056 case SUPPAGINGMODE_AMD64:
3057 case SUPPAGINGMODE_AMD64_GLOBAL:
3058 case SUPPAGINGMODE_AMD64_NX:
3059 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3060 enmShadowMode = PGMMODE_NESTED_AMD64;
3061 break;
3062
3063 default:
3064 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3065 }
3066 }
3067 }
3068 }
3069
3070 return enmShadowMode;
3071}
3072
3073
3074/**
3075 * Performs the actual mode change.
3076 * This is called by PGMChangeMode and pgmR3InitPaging().
3077 *
3078 * @returns VBox status code. May suspend or power off the VM on error, but this
3079 * will trigger using FFs and not informational status codes.
3080 *
3081 * @param pVM The cross context VM structure.
3082 * @param pVCpu The cross context virtual CPU structure.
3083 * @param enmGuestMode The new guest mode. This is assumed to be different from
3084 * the current mode.
3085 */
3086VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode)
3087{
3088 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3089 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3090
3091 /*
3092 * Calc the shadow mode and switcher.
3093 */
3094 PGMMODE enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3095
3096 /*
3097 * Exit old mode(s).
3098 */
3099 /* shadow */
3100 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3101 {
3102 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3103 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3104 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3105 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3106 {
3107 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3108 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3109 }
3110 }
3111 else
3112 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3113
3114 /* guest */
3115 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3116 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3117 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3118 {
3119 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3120 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3121 }
3122 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3123
3124 /*
3125 * Change the paging mode data indexes.
3126 */
3127 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3128 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3129 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3130 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3131 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3132 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPDE, VERR_PGM_MODE_IPE);
3133 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3134 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3135#ifdef IN_RING3
3136 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3137#endif
3138
3139 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3140 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3141 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3142 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3143 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3144 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3145 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3146#ifdef IN_RING3
3147 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3148#endif
3149
3150 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3151 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3152 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3153 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3154 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3155 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3156 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3157 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3158 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3159 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3160#ifdef VBOX_STRICT
3161 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3162#endif
3163
3164 /*
3165 * Enter new shadow mode (if changed).
3166 */
3167 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3168 {
3169 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3170 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu, enmGuestMode >= PGMMODE_AMD64);
3171 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3172 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3173 }
3174
3175 /*
3176 * Always flag the necessary updates
3177 */
3178 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3179
3180 /*
3181 * Enter the new guest and shadow+guest modes.
3182 */
3183 /* Calc the new CR3 value. */
3184 RTGCPHYS GCPhysCR3;
3185 switch (enmGuestMode)
3186 {
3187 case PGMMODE_REAL:
3188 case PGMMODE_PROTECTED:
3189 GCPhysCR3 = NIL_RTGCPHYS;
3190 break;
3191
3192 case PGMMODE_32_BIT:
3193 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3194 break;
3195
3196 case PGMMODE_PAE_NX:
3197 case PGMMODE_PAE:
3198 if (!pVM->cpum.ro.GuestFeatures.fPae)
3199 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3200 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3201 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3202 break;
3203
3204#ifdef VBOX_WITH_64_BITS_GUESTS
3205 case PGMMODE_AMD64_NX:
3206 case PGMMODE_AMD64:
3207 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3208 break;
3209#endif
3210 default:
3211 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3212 }
3213
3214 /* Enter the new guest mode. */
3215 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3216 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3217 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3218
3219 /* Set the new guest CR3. */
3220 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3221
3222 /* status codes. */
3223 AssertRC(rc);
3224 AssertRC(rc2);
3225 if (RT_SUCCESS(rc))
3226 {
3227 rc = rc2;
3228 if (RT_SUCCESS(rc)) /* no informational status codes. */
3229 rc = VINF_SUCCESS;
3230 }
3231
3232 /*
3233 * Notify HM.
3234 */
3235 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3236 return rc;
3237}
3238
3239
3240/**
3241 * Called by CPUM or REM when CR0.WP changes to 1.
3242 *
3243 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3244 * @thread EMT
3245 */
3246VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3247{
3248 /*
3249 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3250 *
3251 * Use the counter to judge whether there might be pool pages with active
3252 * hacks in them. If there are, we will be running the risk of messing up
3253 * the guest by allowing it to write to read-only pages. Thus, we have to
3254 * clear the page pool ASAP if there is the slightest chance.
3255 */
3256 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3257 {
3258 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3259
3260 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3261 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3262 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3263 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3264 }
3265}
3266
3267
3268/**
3269 * Gets the current guest paging mode.
3270 *
3271 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3272 *
3273 * @returns The current paging mode.
3274 * @param pVCpu The cross context virtual CPU structure.
3275 */
3276VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3277{
3278 return pVCpu->pgm.s.enmGuestMode;
3279}
3280
3281
3282/**
3283 * Gets the current shadow paging mode.
3284 *
3285 * @returns The current paging mode.
3286 * @param pVCpu The cross context virtual CPU structure.
3287 */
3288VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3289{
3290 return pVCpu->pgm.s.enmShadowMode;
3291}
3292
3293
3294/**
3295 * Gets the current host paging mode.
3296 *
3297 * @returns The current paging mode.
3298 * @param pVM The cross context VM structure.
3299 */
3300VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3301{
3302 switch (pVM->pgm.s.enmHostMode)
3303 {
3304 case SUPPAGINGMODE_32_BIT:
3305 case SUPPAGINGMODE_32_BIT_GLOBAL:
3306 return PGMMODE_32_BIT;
3307
3308 case SUPPAGINGMODE_PAE:
3309 case SUPPAGINGMODE_PAE_GLOBAL:
3310 return PGMMODE_PAE;
3311
3312 case SUPPAGINGMODE_PAE_NX:
3313 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3314 return PGMMODE_PAE_NX;
3315
3316 case SUPPAGINGMODE_AMD64:
3317 case SUPPAGINGMODE_AMD64_GLOBAL:
3318 return PGMMODE_AMD64;
3319
3320 case SUPPAGINGMODE_AMD64_NX:
3321 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3322 return PGMMODE_AMD64_NX;
3323
3324 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3325 }
3326
3327 return PGMMODE_INVALID;
3328}
3329
3330
3331/**
3332 * Get mode name.
3333 *
3334 * @returns read-only name string.
3335 * @param enmMode The mode which name is desired.
3336 */
3337VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3338{
3339 switch (enmMode)
3340 {
3341 case PGMMODE_REAL: return "Real";
3342 case PGMMODE_PROTECTED: return "Protected";
3343 case PGMMODE_32_BIT: return "32-bit";
3344 case PGMMODE_PAE: return "PAE";
3345 case PGMMODE_PAE_NX: return "PAE+NX";
3346 case PGMMODE_AMD64: return "AMD64";
3347 case PGMMODE_AMD64_NX: return "AMD64+NX";
3348 case PGMMODE_NESTED_32BIT: return "Nested-32";
3349 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3350 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3351 case PGMMODE_EPT: return "EPT";
3352 case PGMMODE_NONE: return "None";
3353 default: return "unknown mode value";
3354 }
3355}
3356
3357
3358/**
3359 * Gets the physical address represented in the guest CR3 as PGM sees it.
3360 *
3361 * This is mainly for logging and debugging.
3362 *
3363 * @returns PGM's guest CR3 value.
3364 * @param pVCpu The cross context virtual CPU structure.
3365 */
3366VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3367{
3368 return pVCpu->pgm.s.GCPhysCR3;
3369}
3370
3371
3372
3373/**
3374 * Notification from CPUM that the EFER.NXE bit has changed.
3375 *
3376 * @param pVCpu The cross context virtual CPU structure of the CPU for
3377 * which EFER changed.
3378 * @param fNxe The new NXE state.
3379 */
3380VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3381{
3382/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3383 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3384
3385 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3386 if (fNxe)
3387 {
3388 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3389 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3390 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3391 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3392 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3393 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3394 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3395 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3396 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3397 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3398 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3399
3400 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3401 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3402 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3403 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3404 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3405 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3406 }
3407 else
3408 {
3409 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3410 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3411 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3412 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3413 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3414 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3415 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3416 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3417 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3418 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3419 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3420
3421 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3422 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3423 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3424 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3425 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3426 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3427 }
3428}
3429
3430
3431/**
3432 * Check if any pgm pool pages are marked dirty (not monitored)
3433 *
3434 * @returns bool locked/not locked
3435 * @param pVM The cross context VM structure.
3436 */
3437VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3438{
3439 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3440}
3441
3442
3443/**
3444 * Check if this VCPU currently owns the PGM lock.
3445 *
3446 * @returns bool owner/not owner
3447 * @param pVM The cross context VM structure.
3448 */
3449VMMDECL(bool) PGMIsLockOwner(PVM pVM)
3450{
3451 return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
3452}
3453
3454
3455/**
3456 * Enable or disable large page usage
3457 *
3458 * @returns VBox status code.
3459 * @param pVM The cross context VM structure.
3460 * @param fUseLargePages Use/not use large pages
3461 */
3462VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3463{
3464 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3465
3466 pVM->fUseLargePages = fUseLargePages;
3467 return VINF_SUCCESS;
3468}
3469
3470
3471/**
3472 * Acquire the PGM lock.
3473 *
3474 * @returns VBox status code
3475 * @param pVM The cross context VM structure.
3476 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3477 */
3478#if (defined(VBOX_STRICT) && defined(IN_RING3)) || defined(DOXYGEN_RUNNING)
3479int pgmLockDebug(PVMCC pVM, RT_SRC_POS_DECL)
3480#else
3481int pgmLock(PVMCC pVM)
3482#endif
3483{
3484#if defined(VBOX_STRICT) && defined(IN_RING3)
3485 int rc = PDMCritSectEnterDebug(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3486#else
3487 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
3488#endif
3489#ifdef IN_RING0
3490 if (rc == VERR_SEM_BUSY)
3491 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
3492#endif
3493 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
3494 return rc;
3495}
3496
3497
3498/**
3499 * Release the PGM lock.
3500 *
3501 * @returns VBox status code
3502 * @param pVM The cross context VM structure.
3503 */
3504void pgmUnlock(PVM pVM)
3505{
3506 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3507 pVM->pgm.s.cDeprecatedPageLocks = 0;
3508 int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
3509 if (rc == VINF_SEM_NESTED)
3510 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3511}
3512
3513#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3514
3515/**
3516 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
3517 *
3518 * @returns VBox status code.
3519 * @param pVM The cross context VM structure.
3520 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3521 * @param GCPhys The guest physical address of the page to map. The
3522 * offset bits are not ignored.
3523 * @param ppv Where to return the address corresponding to @a GCPhys.
3524 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3525 */
3526int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
3527{
3528 pgmLock(pVM);
3529
3530 /*
3531 * Convert it to a writable page and it on to the dynamic mapper.
3532 */
3533 int rc;
3534 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3535 if (RT_LIKELY(pPage))
3536 {
3537 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
3538 if (RT_SUCCESS(rc))
3539 {
3540 void *pv;
3541 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
3542 if (RT_SUCCESS(rc))
3543 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
3544 }
3545 else
3546 AssertRC(rc);
3547 }
3548 else
3549 {
3550 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
3551 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3552 }
3553
3554 pgmUnlock(pVM);
3555 return rc;
3556}
3557
3558#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
3559#if !defined(IN_R0) || defined(LOG_ENABLED)
3560
3561/** Format handler for PGMPAGE.
3562 * @copydoc FNRTSTRFORMATTYPE */
3563static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3564 const char *pszType, void const *pvValue,
3565 int cchWidth, int cchPrecision, unsigned fFlags,
3566 void *pvUser)
3567{
3568 size_t cch;
3569 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3570 if (RT_VALID_PTR(pPage))
3571 {
3572 char szTmp[64+80];
3573
3574 cch = 0;
3575
3576 /* The single char state stuff. */
3577 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3578 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3579
3580# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3581 if (IS_PART_INCLUDED(5))
3582 {
3583 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
3584 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
3585 }
3586
3587 /* The type. */
3588 if (IS_PART_INCLUDED(4))
3589 {
3590 szTmp[cch++] = ':';
3591 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3592 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3593 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3594 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3595 }
3596
3597 /* The numbers. */
3598 if (IS_PART_INCLUDED(3))
3599 {
3600 szTmp[cch++] = ':';
3601 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3602 }
3603
3604 if (IS_PART_INCLUDED(2))
3605 {
3606 szTmp[cch++] = ':';
3607 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3608 }
3609
3610 if (IS_PART_INCLUDED(6))
3611 {
3612 szTmp[cch++] = ':';
3613 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3614 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3615 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3616 }
3617# undef IS_PART_INCLUDED
3618
3619 cch = pfnOutput(pvArgOutput, szTmp, cch);
3620 }
3621 else
3622 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3623 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3624 return cch;
3625}
3626
3627
3628/** Format handler for PGMRAMRANGE.
3629 * @copydoc FNRTSTRFORMATTYPE */
3630static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3631 const char *pszType, void const *pvValue,
3632 int cchWidth, int cchPrecision, unsigned fFlags,
3633 void *pvUser)
3634{
3635 size_t cch;
3636 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3637 if (VALID_PTR(pRam))
3638 {
3639 char szTmp[80];
3640 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3641 cch = pfnOutput(pvArgOutput, szTmp, cch);
3642 }
3643 else
3644 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3645 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3646 return cch;
3647}
3648
3649/** Format type andlers to be registered/deregistered. */
3650static const struct
3651{
3652 char szType[24];
3653 PFNRTSTRFORMATTYPE pfnHandler;
3654} g_aPgmFormatTypes[] =
3655{
3656 { "pgmpage", pgmFormatTypeHandlerPage },
3657 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3658};
3659
3660#endif /* !IN_R0 || LOG_ENABLED */
3661
3662/**
3663 * Registers the global string format types.
3664 *
3665 * This should be called at module load time or in some other manner that ensure
3666 * that it's called exactly one time.
3667 *
3668 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3669 */
3670VMMDECL(int) PGMRegisterStringFormatTypes(void)
3671{
3672#if !defined(IN_R0) || defined(LOG_ENABLED)
3673 int rc = VINF_SUCCESS;
3674 unsigned i;
3675 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3676 {
3677 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3678# ifdef IN_RING0
3679 if (rc == VERR_ALREADY_EXISTS)
3680 {
3681 /* in case of cleanup failure in ring-0 */
3682 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3683 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3684 }
3685# endif
3686 }
3687 if (RT_FAILURE(rc))
3688 while (i-- > 0)
3689 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3690
3691 return rc;
3692#else
3693 return VINF_SUCCESS;
3694#endif
3695}
3696
3697
3698/**
3699 * Deregisters the global string format types.
3700 *
3701 * This should be called at module unload time or in some other manner that
3702 * ensure that it's called exactly one time.
3703 */
3704VMMDECL(void) PGMDeregisterStringFormatTypes(void)
3705{
3706#if !defined(IN_R0) || defined(LOG_ENABLED)
3707 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3708 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3709#endif
3710}
3711
3712#ifdef VBOX_STRICT
3713
3714# ifndef PGM_WITHOUT_MAPPINGS
3715/**
3716 * Asserts that there are no mapping conflicts.
3717 *
3718 * @returns Number of conflicts.
3719 * @param pVM The cross context VM structure.
3720 */
3721VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
3722{
3723 unsigned cErrors = 0;
3724
3725 /* Only applies to raw mode -> 1 VPCU */
3726 Assert(pVM->cCpus == 1);
3727 PVMCPU pVCpu = &VMCC_GET_CPU_0(pVM);
3728
3729 /*
3730 * Check for mapping conflicts.
3731 */
3732 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
3733 pMapping;
3734 pMapping = pMapping->CTX_SUFF(pNext))
3735 {
3736 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
3737 for (RTGCPTR GCPtr = pMapping->GCPtr; GCPtr <= pMapping->GCPtrLast; GCPtr += PAGE_SIZE)
3738 {
3739 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
3740 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
3741 {
3742 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
3743 cErrors++;
3744 break;
3745 }
3746 }
3747 }
3748
3749 return cErrors;
3750}
3751# endif /* !PGM_WITHOUT_MAPPINGS */
3752
3753
3754/**
3755 * Asserts that everything related to the guest CR3 is correctly shadowed.
3756 *
3757 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3758 * and assert the correctness of the guest CR3 mapping before asserting that the
3759 * shadow page tables is in sync with the guest page tables.
3760 *
3761 * @returns Number of conflicts.
3762 * @param pVM The cross context VM structure.
3763 * @param pVCpu The cross context virtual CPU structure.
3764 * @param cr3 The current guest CR3 register value.
3765 * @param cr4 The current guest CR4 register value.
3766 */
3767VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
3768{
3769 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
3770
3771 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
3772 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
3773 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
3774
3775 pgmLock(pVM);
3776 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3777 pgmUnlock(pVM);
3778
3779 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
3780 return cErrors;
3781}
3782
3783#endif /* VBOX_STRICT */
3784
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette