VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 58123

Last change on this file since 58123 was 58123, checked in by vboxsync, 9 years ago

VMM: Made @param pVCpu more uniform and to the point.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 94.2 KB
Line 
1/* $Id: PGMAll.cpp 58123 2015-10-08 18:09:45Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/iom.h>
27#include <VBox/sup.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/stam.h>
30#include <VBox/vmm/csam.h>
31#include <VBox/vmm/patm.h>
32#include <VBox/vmm/trpm.h>
33#ifdef VBOX_WITH_REM
34# include <VBox/vmm/rem.h>
35#endif
36#include <VBox/vmm/em.h>
37#include <VBox/vmm/hm.h>
38#include <VBox/vmm/hm_vmx.h>
39#include "PGMInternal.h"
40#include <VBox/vmm/vm.h>
41#include "PGMInline.h"
42#include <iprt/assert.h>
43#include <iprt/asm-amd64-x86.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*********************************************************************************************************************************
51* Structures and Typedefs *
52*********************************************************************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** Pointer to the VM. */
60 PVM pVM;
61 /** Pointer to the VMCPU. */
62 PVMCPU pVCpu;
63 /** The todo flags. */
64 RTUINT fTodo;
65 /** The CR4 register value. */
66 uint32_t cr4;
67} PGMHVUSTATE, *PPGMHVUSTATE;
68
69
70/*********************************************************************************************************************************
71* Internal Functions *
72*********************************************************************************************************************************/
73DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
74DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
75#ifndef IN_RC
76static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
77static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
78#endif
79
80
81/*
82 * Shadow - 32-bit mode
83 */
84#define PGM_SHW_TYPE PGM_TYPE_32BIT
85#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
86#include "PGMAllShw.h"
87
88/* Guest - real mode */
89#define PGM_GST_TYPE PGM_TYPE_REAL
90#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
91#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
92#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
93#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
94#include "PGMGstDefs.h"
95#include "PGMAllGst.h"
96#include "PGMAllBth.h"
97#undef BTH_PGMPOOLKIND_PT_FOR_PT
98#undef BTH_PGMPOOLKIND_ROOT
99#undef PGM_BTH_NAME
100#undef PGM_GST_TYPE
101#undef PGM_GST_NAME
102
103/* Guest - protected mode */
104#define PGM_GST_TYPE PGM_TYPE_PROT
105#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
106#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
107#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
108#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
109#include "PGMGstDefs.h"
110#include "PGMAllGst.h"
111#include "PGMAllBth.h"
112#undef BTH_PGMPOOLKIND_PT_FOR_PT
113#undef BTH_PGMPOOLKIND_ROOT
114#undef PGM_BTH_NAME
115#undef PGM_GST_TYPE
116#undef PGM_GST_NAME
117
118/* Guest - 32-bit mode */
119#define PGM_GST_TYPE PGM_TYPE_32BIT
120#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
121#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
122#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
123#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
124#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
125#include "PGMGstDefs.h"
126#include "PGMAllGst.h"
127#include "PGMAllBth.h"
128#undef BTH_PGMPOOLKIND_PT_FOR_BIG
129#undef BTH_PGMPOOLKIND_PT_FOR_PT
130#undef BTH_PGMPOOLKIND_ROOT
131#undef PGM_BTH_NAME
132#undef PGM_GST_TYPE
133#undef PGM_GST_NAME
134
135#undef PGM_SHW_TYPE
136#undef PGM_SHW_NAME
137
138
139/*
140 * Shadow - PAE mode
141 */
142#define PGM_SHW_TYPE PGM_TYPE_PAE
143#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
144#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
145#include "PGMAllShw.h"
146
147/* Guest - real mode */
148#define PGM_GST_TYPE PGM_TYPE_REAL
149#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
150#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
151#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
152#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
153#include "PGMGstDefs.h"
154#include "PGMAllBth.h"
155#undef BTH_PGMPOOLKIND_PT_FOR_PT
156#undef BTH_PGMPOOLKIND_ROOT
157#undef PGM_BTH_NAME
158#undef PGM_GST_TYPE
159#undef PGM_GST_NAME
160
161/* Guest - protected mode */
162#define PGM_GST_TYPE PGM_TYPE_PROT
163#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
164#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
165#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
166#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
167#include "PGMGstDefs.h"
168#include "PGMAllBth.h"
169#undef BTH_PGMPOOLKIND_PT_FOR_PT
170#undef BTH_PGMPOOLKIND_ROOT
171#undef PGM_BTH_NAME
172#undef PGM_GST_TYPE
173#undef PGM_GST_NAME
174
175/* Guest - 32-bit mode */
176#define PGM_GST_TYPE PGM_TYPE_32BIT
177#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
178#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
179#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
180#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
181#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
182#include "PGMGstDefs.h"
183#include "PGMAllBth.h"
184#undef BTH_PGMPOOLKIND_PT_FOR_BIG
185#undef BTH_PGMPOOLKIND_PT_FOR_PT
186#undef BTH_PGMPOOLKIND_ROOT
187#undef PGM_BTH_NAME
188#undef PGM_GST_TYPE
189#undef PGM_GST_NAME
190
191
192/* Guest - PAE mode */
193#define PGM_GST_TYPE PGM_TYPE_PAE
194#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
195#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
196#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
197#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
198#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
199#include "PGMGstDefs.h"
200#include "PGMAllGst.h"
201#include "PGMAllBth.h"
202#undef BTH_PGMPOOLKIND_PT_FOR_BIG
203#undef BTH_PGMPOOLKIND_PT_FOR_PT
204#undef BTH_PGMPOOLKIND_ROOT
205#undef PGM_BTH_NAME
206#undef PGM_GST_TYPE
207#undef PGM_GST_NAME
208
209#undef PGM_SHW_TYPE
210#undef PGM_SHW_NAME
211
212
213#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
214/*
215 * Shadow - AMD64 mode
216 */
217# define PGM_SHW_TYPE PGM_TYPE_AMD64
218# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
219# include "PGMAllShw.h"
220
221/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
222# define PGM_GST_TYPE PGM_TYPE_PROT
223# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
224# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
225# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
226# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
227# include "PGMGstDefs.h"
228# include "PGMAllBth.h"
229# undef BTH_PGMPOOLKIND_PT_FOR_PT
230# undef BTH_PGMPOOLKIND_ROOT
231# undef PGM_BTH_NAME
232# undef PGM_GST_TYPE
233# undef PGM_GST_NAME
234
235# ifdef VBOX_WITH_64_BITS_GUESTS
236/* Guest - AMD64 mode */
237# define PGM_GST_TYPE PGM_TYPE_AMD64
238# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
239# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
240# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
241# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
242# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
243# include "PGMGstDefs.h"
244# include "PGMAllGst.h"
245# include "PGMAllBth.h"
246# undef BTH_PGMPOOLKIND_PT_FOR_BIG
247# undef BTH_PGMPOOLKIND_PT_FOR_PT
248# undef BTH_PGMPOOLKIND_ROOT
249# undef PGM_BTH_NAME
250# undef PGM_GST_TYPE
251# undef PGM_GST_NAME
252# endif /* VBOX_WITH_64_BITS_GUESTS */
253
254# undef PGM_SHW_TYPE
255# undef PGM_SHW_NAME
256
257
258/*
259 * Shadow - Nested paging mode
260 */
261# define PGM_SHW_TYPE PGM_TYPE_NESTED
262# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
263# include "PGMAllShw.h"
264
265/* Guest - real mode */
266# define PGM_GST_TYPE PGM_TYPE_REAL
267# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
268# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
269# include "PGMGstDefs.h"
270# include "PGMAllBth.h"
271# undef PGM_BTH_NAME
272# undef PGM_GST_TYPE
273# undef PGM_GST_NAME
274
275/* Guest - protected mode */
276# define PGM_GST_TYPE PGM_TYPE_PROT
277# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
278# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
279# include "PGMGstDefs.h"
280# include "PGMAllBth.h"
281# undef PGM_BTH_NAME
282# undef PGM_GST_TYPE
283# undef PGM_GST_NAME
284
285/* Guest - 32-bit mode */
286# define PGM_GST_TYPE PGM_TYPE_32BIT
287# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
288# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
289# include "PGMGstDefs.h"
290# include "PGMAllBth.h"
291# undef PGM_BTH_NAME
292# undef PGM_GST_TYPE
293# undef PGM_GST_NAME
294
295/* Guest - PAE mode */
296# define PGM_GST_TYPE PGM_TYPE_PAE
297# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
298# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
299# include "PGMGstDefs.h"
300# include "PGMAllBth.h"
301# undef PGM_BTH_NAME
302# undef PGM_GST_TYPE
303# undef PGM_GST_NAME
304
305# ifdef VBOX_WITH_64_BITS_GUESTS
306/* Guest - AMD64 mode */
307# define PGM_GST_TYPE PGM_TYPE_AMD64
308# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
309# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
310# include "PGMGstDefs.h"
311# include "PGMAllBth.h"
312# undef PGM_BTH_NAME
313# undef PGM_GST_TYPE
314# undef PGM_GST_NAME
315# endif /* VBOX_WITH_64_BITS_GUESTS */
316
317# undef PGM_SHW_TYPE
318# undef PGM_SHW_NAME
319
320
321/*
322 * Shadow - EPT
323 */
324# define PGM_SHW_TYPE PGM_TYPE_EPT
325# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
326# include "PGMAllShw.h"
327
328/* Guest - real mode */
329# define PGM_GST_TYPE PGM_TYPE_REAL
330# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
331# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
332# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
333# include "PGMGstDefs.h"
334# include "PGMAllBth.h"
335# undef BTH_PGMPOOLKIND_PT_FOR_PT
336# undef PGM_BTH_NAME
337# undef PGM_GST_TYPE
338# undef PGM_GST_NAME
339
340/* Guest - protected mode */
341# define PGM_GST_TYPE PGM_TYPE_PROT
342# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
343# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
344# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
345# include "PGMGstDefs.h"
346# include "PGMAllBth.h"
347# undef BTH_PGMPOOLKIND_PT_FOR_PT
348# undef PGM_BTH_NAME
349# undef PGM_GST_TYPE
350# undef PGM_GST_NAME
351
352/* Guest - 32-bit mode */
353# define PGM_GST_TYPE PGM_TYPE_32BIT
354# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
355# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
356# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
357# include "PGMGstDefs.h"
358# include "PGMAllBth.h"
359# undef BTH_PGMPOOLKIND_PT_FOR_PT
360# undef PGM_BTH_NAME
361# undef PGM_GST_TYPE
362# undef PGM_GST_NAME
363
364/* Guest - PAE mode */
365# define PGM_GST_TYPE PGM_TYPE_PAE
366# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
367# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
368# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
369# include "PGMGstDefs.h"
370# include "PGMAllBth.h"
371# undef BTH_PGMPOOLKIND_PT_FOR_PT
372# undef PGM_BTH_NAME
373# undef PGM_GST_TYPE
374# undef PGM_GST_NAME
375
376# ifdef VBOX_WITH_64_BITS_GUESTS
377/* Guest - AMD64 mode */
378# define PGM_GST_TYPE PGM_TYPE_AMD64
379# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
380# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
381# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
382# include "PGMGstDefs.h"
383# include "PGMAllBth.h"
384# undef BTH_PGMPOOLKIND_PT_FOR_PT
385# undef PGM_BTH_NAME
386# undef PGM_GST_TYPE
387# undef PGM_GST_NAME
388# endif /* VBOX_WITH_64_BITS_GUESTS */
389
390# undef PGM_SHW_TYPE
391# undef PGM_SHW_NAME
392
393#endif /* !IN_RC */
394
395
396#ifndef IN_RING3
397/**
398 * #PF Handler.
399 *
400 * @returns VBox status code (appropriate for trap handling and GC return).
401 * @param pVCpu The cross context virtual CPU structure.
402 * @param uErr The trap error code.
403 * @param pRegFrame Trap register frame.
404 * @param pvFault The fault address.
405 */
406VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
407{
408 PVM pVM = pVCpu->CTX_SUFF(pVM);
409
410 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
411 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
412 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
413
414
415#ifdef VBOX_WITH_STATISTICS
416 /*
417 * Error code stats.
418 */
419 if (uErr & X86_TRAP_PF_US)
420 {
421 if (!(uErr & X86_TRAP_PF_P))
422 {
423 if (uErr & X86_TRAP_PF_RW)
424 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
425 else
426 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
427 }
428 else if (uErr & X86_TRAP_PF_RW)
429 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
430 else if (uErr & X86_TRAP_PF_RSVD)
431 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
432 else if (uErr & X86_TRAP_PF_ID)
433 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
434 else
435 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
436 }
437 else
438 { /* Supervisor */
439 if (!(uErr & X86_TRAP_PF_P))
440 {
441 if (uErr & X86_TRAP_PF_RW)
442 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
443 else
444 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
445 }
446 else if (uErr & X86_TRAP_PF_RW)
447 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
448 else if (uErr & X86_TRAP_PF_ID)
449 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
450 else if (uErr & X86_TRAP_PF_RSVD)
451 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
452 }
453#endif /* VBOX_WITH_STATISTICS */
454
455 /*
456 * Call the worker.
457 */
458 bool fLockTaken = false;
459 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
460 if (fLockTaken)
461 {
462 PGM_LOCK_ASSERT_OWNER(pVM);
463 pgmUnlock(pVM);
464 }
465 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
466
467 /*
468 * Return code tweaks.
469 */
470 if (rc != VINF_SUCCESS)
471 {
472 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
473 rc = VINF_SUCCESS;
474
475# ifdef IN_RING0
476 /* Note: hack alert for difficult to reproduce problem. */
477 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
478 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
479 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
480 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
481 {
482 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
483 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
484 rc = VINF_SUCCESS;
485 }
486# endif
487 }
488
489 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
490 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
491 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
492 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
493 return rc;
494}
495#endif /* !IN_RING3 */
496
497
498/**
499 * Prefetch a page
500 *
501 * Typically used to sync commonly used pages before entering raw mode
502 * after a CR3 reload.
503 *
504 * @returns VBox status code suitable for scheduling.
505 * @retval VINF_SUCCESS on success.
506 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
507 * @param pVCpu The cross context virtual CPU structure.
508 * @param GCPtrPage Page to invalidate.
509 */
510VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
511{
512 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
513 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
514 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
515 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
516 return rc;
517}
518
519
520/**
521 * Gets the mapping corresponding to the specified address (if any).
522 *
523 * @returns Pointer to the mapping.
524 * @returns NULL if not
525 *
526 * @param pVM The cross context VM structure.
527 * @param GCPtr The guest context pointer.
528 */
529PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
530{
531 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
532 while (pMapping)
533 {
534 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
535 break;
536 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
537 return pMapping;
538 pMapping = pMapping->CTX_SUFF(pNext);
539 }
540 return NULL;
541}
542
543
544/**
545 * Verifies a range of pages for read or write access
546 *
547 * Only checks the guest's page tables
548 *
549 * @returns VBox status code.
550 * @param pVCpu The cross context virtual CPU structure.
551 * @param Addr Guest virtual address to check
552 * @param cbSize Access size
553 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
554 * @remarks Current not in use.
555 */
556VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
557{
558 /*
559 * Validate input.
560 */
561 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
562 {
563 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
564 return VERR_INVALID_PARAMETER;
565 }
566
567 uint64_t fPage;
568 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
569 if (RT_FAILURE(rc))
570 {
571 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
572 return VINF_EM_RAW_GUEST_TRAP;
573 }
574
575 /*
576 * Check if the access would cause a page fault
577 *
578 * Note that hypervisor page directories are not present in the guest's tables, so this check
579 * is sufficient.
580 */
581 bool fWrite = !!(fAccess & X86_PTE_RW);
582 bool fUser = !!(fAccess & X86_PTE_US);
583 if ( !(fPage & X86_PTE_P)
584 || (fWrite && !(fPage & X86_PTE_RW))
585 || (fUser && !(fPage & X86_PTE_US)) )
586 {
587 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
588 return VINF_EM_RAW_GUEST_TRAP;
589 }
590 if ( RT_SUCCESS(rc)
591 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
592 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
593 return rc;
594}
595
596
597/**
598 * Verifies a range of pages for read or write access
599 *
600 * Supports handling of pages marked for dirty bit tracking and CSAM
601 *
602 * @returns VBox status code.
603 * @param pVCpu The cross context virtual CPU structure.
604 * @param Addr Guest virtual address to check
605 * @param cbSize Access size
606 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
607 */
608VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
609{
610 PVM pVM = pVCpu->CTX_SUFF(pVM);
611
612 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
613
614 /*
615 * Get going.
616 */
617 uint64_t fPageGst;
618 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
619 if (RT_FAILURE(rc))
620 {
621 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
622 return VINF_EM_RAW_GUEST_TRAP;
623 }
624
625 /*
626 * Check if the access would cause a page fault
627 *
628 * Note that hypervisor page directories are not present in the guest's tables, so this check
629 * is sufficient.
630 */
631 const bool fWrite = !!(fAccess & X86_PTE_RW);
632 const bool fUser = !!(fAccess & X86_PTE_US);
633 if ( !(fPageGst & X86_PTE_P)
634 || (fWrite && !(fPageGst & X86_PTE_RW))
635 || (fUser && !(fPageGst & X86_PTE_US)) )
636 {
637 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
638 return VINF_EM_RAW_GUEST_TRAP;
639 }
640
641 if (!pVM->pgm.s.fNestedPaging)
642 {
643 /*
644 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
645 */
646 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
647 if ( rc == VERR_PAGE_NOT_PRESENT
648 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
649 {
650 /*
651 * Page is not present in our page tables.
652 * Try to sync it!
653 */
654 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
655 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
656 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
657 if (rc != VINF_SUCCESS)
658 return rc;
659 }
660 else
661 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
662 }
663
664#if 0 /* def VBOX_STRICT; triggers too often now */
665 /*
666 * This check is a bit paranoid, but useful.
667 */
668 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
669 uint64_t fPageShw;
670 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
671 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
672 || (fWrite && !(fPageShw & X86_PTE_RW))
673 || (fUser && !(fPageShw & X86_PTE_US)) )
674 {
675 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
676 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
677 return VINF_EM_RAW_GUEST_TRAP;
678 }
679#endif
680
681 if ( RT_SUCCESS(rc)
682 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
683 || Addr + cbSize < Addr))
684 {
685 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
686 for (;;)
687 {
688 Addr += PAGE_SIZE;
689 if (cbSize > PAGE_SIZE)
690 cbSize -= PAGE_SIZE;
691 else
692 cbSize = 1;
693 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
694 if (rc != VINF_SUCCESS)
695 break;
696 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
697 break;
698 }
699 }
700 return rc;
701}
702
703
704/**
705 * Emulation of the invlpg instruction (HC only actually).
706 *
707 * @returns Strict VBox status code, special care required.
708 * @retval VINF_PGM_SYNC_CR3 - handled.
709 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
710 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
711 *
712 * @param pVCpu The cross context virtual CPU structure.
713 * @param GCPtrPage Page to invalidate.
714 *
715 * @remark ASSUMES the page table entry or page directory is valid. Fairly
716 * safe, but there could be edge cases!
717 *
718 * @todo Flush page or page directory only if necessary!
719 * @todo VBOXSTRICTRC
720 */
721VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
722{
723 PVM pVM = pVCpu->CTX_SUFF(pVM);
724 int rc;
725 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
726
727#if !defined(IN_RING3) && defined(VBOX_WITH_REM)
728 /*
729 * Notify the recompiler so it can record this instruction.
730 */
731 REMNotifyInvalidatePage(pVM, GCPtrPage);
732#endif /* !IN_RING3 */
733
734
735#ifdef IN_RC
736 /*
737 * Check for conflicts and pending CR3 monitoring updates.
738 */
739 if (pgmMapAreMappingsFloating(pVM))
740 {
741 if ( pgmGetMapping(pVM, GCPtrPage)
742 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
743 {
744 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
745 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
746 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
747 return VINF_PGM_SYNC_CR3;
748 }
749
750 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
751 {
752 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
753 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
754 return VINF_EM_RAW_EMULATE_INSTR;
755 }
756 }
757#endif /* IN_RC */
758
759 /*
760 * Call paging mode specific worker.
761 */
762 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
763 pgmLock(pVM);
764 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
765 pgmUnlock(pVM);
766 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
767
768#ifdef IN_RING3
769 /*
770 * Check if we have a pending update of the CR3 monitoring.
771 */
772 if ( RT_SUCCESS(rc)
773 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
774 {
775 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
776 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
777 }
778
779# ifdef VBOX_WITH_RAW_MODE
780 /*
781 * Inform CSAM about the flush
782 *
783 * Note: This is to check if monitored pages have been changed; when we implement
784 * callbacks for virtual handlers, this is no longer required.
785 */
786 CSAMR3FlushPage(pVM, GCPtrPage);
787# endif
788#endif /* IN_RING3 */
789
790 /* Ignore all irrelevant error codes. */
791 if ( rc == VERR_PAGE_NOT_PRESENT
792 || rc == VERR_PAGE_TABLE_NOT_PRESENT
793 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
794 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
795 rc = VINF_SUCCESS;
796
797 return rc;
798}
799
800
801/**
802 * Executes an instruction using the interpreter.
803 *
804 * @returns VBox status code (appropriate for trap handling and GC return).
805 * @param pVM The cross context VM structure.
806 * @param pVCpu The cross context virtual CPU structure.
807 * @param pRegFrame Register frame.
808 * @param pvFault Fault address.
809 */
810VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
811{
812 NOREF(pVM);
813 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
814 if (rc == VERR_EM_INTERPRETER)
815 rc = VINF_EM_RAW_EMULATE_INSTR;
816 if (rc != VINF_SUCCESS)
817 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
818 return rc;
819}
820
821
822/**
823 * Gets effective page information (from the VMM page directory).
824 *
825 * @returns VBox status.
826 * @param pVCpu The cross context virtual CPU structure.
827 * @param GCPtr Guest Context virtual address of the page.
828 * @param pfFlags Where to store the flags. These are X86_PTE_*.
829 * @param pHCPhys Where to store the HC physical address of the page.
830 * This is page aligned.
831 * @remark You should use PGMMapGetPage() for pages in a mapping.
832 */
833VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
834{
835 pgmLock(pVCpu->CTX_SUFF(pVM));
836 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
837 pgmUnlock(pVCpu->CTX_SUFF(pVM));
838 return rc;
839}
840
841
842/**
843 * Modify page flags for a range of pages in the shadow context.
844 *
845 * The existing flags are ANDed with the fMask and ORed with the fFlags.
846 *
847 * @returns VBox status code.
848 * @param pVCpu The cross context virtual CPU structure.
849 * @param GCPtr Virtual address of the first page in the range.
850 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
851 * @param fMask The AND mask - page flags X86_PTE_*.
852 * Be very CAREFUL when ~'ing constants which could be 32-bit!
853 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
854 * @remark You must use PGMMapModifyPage() for pages in a mapping.
855 */
856DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
857{
858 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
859 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
860
861 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
862
863 PVM pVM = pVCpu->CTX_SUFF(pVM);
864 pgmLock(pVM);
865 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
866 pgmUnlock(pVM);
867 return rc;
868}
869
870
871/**
872 * Changing the page flags for a single page in the shadow page tables so as to
873 * make it read-only.
874 *
875 * @returns VBox status code.
876 * @param pVCpu The cross context virtual CPU structure.
877 * @param GCPtr Virtual address of the first page in the range.
878 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
879 */
880VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
881{
882 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
883}
884
885
886/**
887 * Changing the page flags for a single page in the shadow page tables so as to
888 * make it writable.
889 *
890 * The call must know with 101% certainty that the guest page tables maps this
891 * as writable too. This function will deal shared, zero and write monitored
892 * pages.
893 *
894 * @returns VBox status code.
895 * @param pVCpu The cross context virtual CPU structure.
896 * @param GCPtr Virtual address of the first page in the range.
897 * @param fMmio2 Set if it is an MMIO2 page.
898 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
899 */
900VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
901{
902 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
903}
904
905
906/**
907 * Changing the page flags for a single page in the shadow page tables so as to
908 * make it not present.
909 *
910 * @returns VBox status code.
911 * @param pVCpu The cross context virtual CPU structure.
912 * @param GCPtr Virtual address of the first page in the range.
913 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
914 */
915VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
916{
917 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
918}
919
920
921/**
922 * Changing the page flags for a single page in the shadow page tables so as to
923 * make it supervisor and writable.
924 *
925 * This if for dealing with CR0.WP=0 and readonly user pages.
926 *
927 * @returns VBox status code.
928 * @param pVCpu The cross context virtual CPU structure.
929 * @param GCPtr Virtual address of the first page in the range.
930 * @param fBigPage Whether or not this is a big page. If it is, we have to
931 * change the shadow PDE as well. If it isn't, the caller
932 * has checked that the shadow PDE doesn't need changing.
933 * We ASSUME 4KB pages backing the big page here!
934 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
935 */
936int pgmShwMakePageSupervisorAndWritable(PVMCPU pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
937{
938 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
939 if (rc == VINF_SUCCESS && fBigPage)
940 {
941 /* this is a bit ugly... */
942 switch (pVCpu->pgm.s.enmShadowMode)
943 {
944 case PGMMODE_32_BIT:
945 {
946 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
947 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
948 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
949 pPde->n.u1Write = 1;
950 Log(("-> PDE=%#llx (32)\n", pPde->u));
951 break;
952 }
953 case PGMMODE_PAE:
954 case PGMMODE_PAE_NX:
955 {
956 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
957 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
958 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
959 pPde->n.u1Write = 1;
960 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
961 break;
962 }
963 default:
964 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
965 }
966 }
967 return rc;
968}
969
970
971/**
972 * Gets the shadow page directory for the specified address, PAE.
973 *
974 * @returns Pointer to the shadow PD.
975 * @param pVCpu The cross context virtual CPU structure.
976 * @param GCPtr The address.
977 * @param uGstPdpe Guest PDPT entry. Valid.
978 * @param ppPD Receives address of page directory
979 */
980int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
981{
982 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
983 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
984 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
985 PVM pVM = pVCpu->CTX_SUFF(pVM);
986 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
987 PPGMPOOLPAGE pShwPage;
988 int rc;
989
990 PGM_LOCK_ASSERT_OWNER(pVM);
991
992 /* Allocate page directory if not present. */
993 if ( !pPdpe->n.u1Present
994 && !(pPdpe->u & X86_PDPE_PG_MASK))
995 {
996 RTGCPTR64 GCPdPt;
997 PGMPOOLKIND enmKind;
998
999 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1000 {
1001 /* AMD-V nested paging or real/protected mode without paging. */
1002 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1003 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1004 }
1005 else
1006 {
1007 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1008 {
1009 if (!(uGstPdpe & X86_PDPE_P))
1010 {
1011 /* PD not present; guest must reload CR3 to change it.
1012 * No need to monitor anything in this case.
1013 */
1014 Assert(!HMIsEnabled(pVM));
1015
1016 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1017 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1018 uGstPdpe |= X86_PDPE_P;
1019 }
1020 else
1021 {
1022 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1023 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1024 }
1025 }
1026 else
1027 {
1028 GCPdPt = CPUMGetGuestCR3(pVCpu);
1029 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1030 }
1031 }
1032
1033 /* Create a reference back to the PDPT by using the index in its shadow page. */
1034 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1035 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1036 &pShwPage);
1037 AssertRCReturn(rc, rc);
1038
1039 /* The PD was cached or created; hook it up now. */
1040 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
1041
1042# if defined(IN_RC)
1043 /*
1044 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
1045 * PDPT entry; the CPU fetches them only during cr3 load, so any
1046 * non-present PDPT will continue to cause page faults.
1047 */
1048 ASMReloadCR3();
1049# endif
1050 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1051 }
1052 else
1053 {
1054 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1055 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1056 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1057
1058 pgmPoolCacheUsed(pPool, pShwPage);
1059 }
1060 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1061 return VINF_SUCCESS;
1062}
1063
1064
1065/**
1066 * Gets the pointer to the shadow page directory entry for an address, PAE.
1067 *
1068 * @returns Pointer to the PDE.
1069 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1070 * @param GCPtr The address.
1071 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1072 */
1073DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1074{
1075 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1076 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1077 PVM pVM = pVCpu->CTX_SUFF(pVM);
1078
1079 PGM_LOCK_ASSERT_OWNER(pVM);
1080
1081 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1082 if (!pPdpt->a[iPdPt].n.u1Present)
1083 {
1084 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1085 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1086 }
1087 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1088
1089 /* Fetch the pgm pool shadow descriptor. */
1090 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1091 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1092
1093 *ppShwPde = pShwPde;
1094 return VINF_SUCCESS;
1095}
1096
1097#ifndef IN_RC
1098
1099/**
1100 * Syncs the SHADOW page directory pointer for the specified address.
1101 *
1102 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1103 *
1104 * The caller is responsible for making sure the guest has a valid PD before
1105 * calling this function.
1106 *
1107 * @returns VBox status.
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param GCPtr The address.
1110 * @param uGstPml4e Guest PML4 entry (valid).
1111 * @param uGstPdpe Guest PDPT entry (valid).
1112 * @param ppPD Receives address of page directory
1113 */
1114static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1115{
1116 PVM pVM = pVCpu->CTX_SUFF(pVM);
1117 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1118 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1119 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1120 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1121 PPGMPOOLPAGE pShwPage;
1122 int rc;
1123
1124 PGM_LOCK_ASSERT_OWNER(pVM);
1125
1126 /* Allocate page directory pointer table if not present. */
1127 if ( !pPml4e->n.u1Present
1128 && !(pPml4e->u & X86_PML4E_PG_MASK))
1129 {
1130 RTGCPTR64 GCPml4;
1131 PGMPOOLKIND enmKind;
1132
1133 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1134
1135 if (fNestedPagingOrNoGstPaging)
1136 {
1137 /* AMD-V nested paging or real/protected mode without paging */
1138 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1139 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1140 }
1141 else
1142 {
1143 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1144 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1145 }
1146
1147 /* Create a reference back to the PDPT by using the index in its shadow page. */
1148 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1149 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1150 &pShwPage);
1151 AssertRCReturn(rc, rc);
1152 }
1153 else
1154 {
1155 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1156 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1157
1158 pgmPoolCacheUsed(pPool, pShwPage);
1159 }
1160 /* The PDPT was cached or created; hook it up now. */
1161 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
1162
1163 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1164 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1165 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1166
1167 /* Allocate page directory if not present. */
1168 if ( !pPdpe->n.u1Present
1169 && !(pPdpe->u & X86_PDPE_PG_MASK))
1170 {
1171 RTGCPTR64 GCPdPt;
1172 PGMPOOLKIND enmKind;
1173
1174 if (fNestedPagingOrNoGstPaging)
1175 {
1176 /* AMD-V nested paging or real/protected mode without paging */
1177 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1178 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1179 }
1180 else
1181 {
1182 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1183 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1184 }
1185
1186 /* Create a reference back to the PDPT by using the index in its shadow page. */
1187 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1188 pShwPage->idx, iPdPt, false /*fLockPage*/,
1189 &pShwPage);
1190 AssertRCReturn(rc, rc);
1191 }
1192 else
1193 {
1194 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1195 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1196
1197 pgmPoolCacheUsed(pPool, pShwPage);
1198 }
1199 /* The PD was cached or created; hook it up now. */
1200 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
1201
1202 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1203 return VINF_SUCCESS;
1204}
1205
1206
1207/**
1208 * Gets the SHADOW page directory pointer for the specified address (long mode).
1209 *
1210 * @returns VBox status.
1211 * @param pVCpu The cross context virtual CPU structure.
1212 * @param GCPtr The address.
1213 * @param ppPdpt Receives address of pdpt
1214 * @param ppPD Receives address of page directory
1215 */
1216DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1217{
1218 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1219 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1220
1221 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1222
1223 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1224 if (ppPml4e)
1225 *ppPml4e = (PX86PML4E)pPml4e;
1226
1227 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1228
1229 if (!pPml4e->n.u1Present)
1230 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1231
1232 PVM pVM = pVCpu->CTX_SUFF(pVM);
1233 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1234 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1235 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1236
1237 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1238 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1239 if (!pPdpt->a[iPdPt].n.u1Present)
1240 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1241
1242 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1243 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1244
1245 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1246 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1247 return VINF_SUCCESS;
1248}
1249
1250
1251/**
1252 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1253 * backing pages in case the PDPT or PML4 entry is missing.
1254 *
1255 * @returns VBox status.
1256 * @param pVCpu The cross context virtual CPU structure.
1257 * @param GCPtr The address.
1258 * @param ppPdpt Receives address of pdpt
1259 * @param ppPD Receives address of page directory
1260 */
1261static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1262{
1263 PVM pVM = pVCpu->CTX_SUFF(pVM);
1264 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1265 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1266 PEPTPML4 pPml4;
1267 PEPTPML4E pPml4e;
1268 PPGMPOOLPAGE pShwPage;
1269 int rc;
1270
1271 Assert(pVM->pgm.s.fNestedPaging);
1272 PGM_LOCK_ASSERT_OWNER(pVM);
1273
1274 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1275 Assert(pPml4);
1276
1277 /* Allocate page directory pointer table if not present. */
1278 pPml4e = &pPml4->a[iPml4];
1279 if ( !pPml4e->n.u1Present
1280 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1281 {
1282 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1283 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1284
1285 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1286 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1287 &pShwPage);
1288 AssertRCReturn(rc, rc);
1289 }
1290 else
1291 {
1292 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1293 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1294
1295 pgmPoolCacheUsed(pPool, pShwPage);
1296 }
1297 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1298 pPml4e->u = pShwPage->Core.Key;
1299 pPml4e->n.u1Present = 1;
1300 pPml4e->n.u1Write = 1;
1301 pPml4e->n.u1Execute = 1;
1302
1303 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1304 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1305 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1306
1307 if (ppPdpt)
1308 *ppPdpt = pPdpt;
1309
1310 /* Allocate page directory if not present. */
1311 if ( !pPdpe->n.u1Present
1312 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1313 {
1314 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1315 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1316 pShwPage->idx, iPdPt, false /*fLockPage*/,
1317 &pShwPage);
1318 AssertRCReturn(rc, rc);
1319 }
1320 else
1321 {
1322 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1323 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1324
1325 pgmPoolCacheUsed(pPool, pShwPage);
1326 }
1327 /* The PD was cached or created; hook it up now and fill with the default value. */
1328 pPdpe->u = pShwPage->Core.Key;
1329 pPdpe->n.u1Present = 1;
1330 pPdpe->n.u1Write = 1;
1331 pPdpe->n.u1Execute = 1;
1332
1333 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1334 return VINF_SUCCESS;
1335}
1336
1337#endif /* IN_RC */
1338
1339#ifdef IN_RING0
1340/**
1341 * Synchronizes a range of nested page table entries.
1342 *
1343 * The caller must own the PGM lock.
1344 *
1345 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1346 * @param GCPhys Where to start.
1347 * @param cPages How many pages which entries should be synced.
1348 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1349 * host paging mode for AMD-V).
1350 */
1351int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhysFault, uint32_t cPages, PGMMODE enmShwPagingMode)
1352{
1353 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1354
1355 int rc;
1356 switch (enmShwPagingMode)
1357 {
1358 case PGMMODE_32_BIT:
1359 {
1360 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1361 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1362 break;
1363 }
1364
1365 case PGMMODE_PAE:
1366 case PGMMODE_PAE_NX:
1367 {
1368 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1369 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1370 break;
1371 }
1372
1373 case PGMMODE_AMD64:
1374 case PGMMODE_AMD64_NX:
1375 {
1376 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1377 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1378 break;
1379 }
1380
1381 case PGMMODE_EPT:
1382 {
1383 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1384 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1385 break;
1386 }
1387
1388 default:
1389 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1390 }
1391 return rc;
1392}
1393#endif /* IN_RING0 */
1394
1395
1396/**
1397 * Gets effective Guest OS page information.
1398 *
1399 * When GCPtr is in a big page, the function will return as if it was a normal
1400 * 4KB page. If the need for distinguishing between big and normal page becomes
1401 * necessary at a later point, a PGMGstGetPage() will be created for that
1402 * purpose.
1403 *
1404 * @returns VBox status.
1405 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1406 * @param GCPtr Guest Context virtual address of the page.
1407 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1408 * @param pGCPhys Where to store the GC physical address of the page.
1409 * This is page aligned. The fact that the
1410 */
1411VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1412{
1413 VMCPU_ASSERT_EMT(pVCpu);
1414 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1415}
1416
1417
1418/**
1419 * Performs a guest page table walk.
1420 *
1421 * The guest should be in paged protect mode or long mode when making a call to
1422 * this function.
1423 *
1424 * @returns VBox status code.
1425 * @retval VINF_SUCCESS on success.
1426 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1427 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1428 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1429 *
1430 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1431 * @param GCPtr The guest virtual address to walk by.
1432 * @param pWalk Where to return the walk result. This is valid on some
1433 * error codes as well.
1434 */
1435int pgmGstPtWalk(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1436{
1437 VMCPU_ASSERT_EMT(pVCpu);
1438 switch (pVCpu->pgm.s.enmGuestMode)
1439 {
1440 case PGMMODE_32_BIT:
1441 pWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1442 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);
1443
1444 case PGMMODE_PAE:
1445 case PGMMODE_PAE_NX:
1446 pWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1447 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);
1448
1449#if !defined(IN_RC)
1450 case PGMMODE_AMD64:
1451 case PGMMODE_AMD64_NX:
1452 pWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1453 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);
1454#endif
1455
1456 case PGMMODE_REAL:
1457 case PGMMODE_PROTECTED:
1458 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1459 return VERR_PGM_NOT_USED_IN_MODE;
1460
1461#if defined(IN_RC)
1462 case PGMMODE_AMD64:
1463 case PGMMODE_AMD64_NX:
1464#endif
1465 case PGMMODE_NESTED:
1466 case PGMMODE_EPT:
1467 default:
1468 AssertFailed();
1469 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1470 return VERR_PGM_NOT_USED_IN_MODE;
1471 }
1472}
1473
1474
1475/**
1476 * Checks if the page is present.
1477 *
1478 * @returns true if the page is present.
1479 * @returns false if the page is not present.
1480 * @param pVCpu The cross context virtual CPU structure.
1481 * @param GCPtr Address within the page.
1482 */
1483VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1484{
1485 VMCPU_ASSERT_EMT(pVCpu);
1486 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1487 return RT_SUCCESS(rc);
1488}
1489
1490
1491/**
1492 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1493 *
1494 * @returns VBox status.
1495 * @param pVCpu The cross context virtual CPU structure.
1496 * @param GCPtr The address of the first page.
1497 * @param cb The size of the range in bytes.
1498 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1499 */
1500VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1501{
1502 VMCPU_ASSERT_EMT(pVCpu);
1503 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1504}
1505
1506
1507/**
1508 * Modify page flags for a range of pages in the guest's tables
1509 *
1510 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1511 *
1512 * @returns VBox status code.
1513 * @param pVCpu The cross context virtual CPU structure.
1514 * @param GCPtr Virtual address of the first page in the range.
1515 * @param cb Size (in bytes) of the range to apply the modification to.
1516 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1517 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1518 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1519 */
1520VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1521{
1522 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1523 VMCPU_ASSERT_EMT(pVCpu);
1524
1525 /*
1526 * Validate input.
1527 */
1528 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1529 Assert(cb);
1530
1531 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1532
1533 /*
1534 * Adjust input.
1535 */
1536 cb += GCPtr & PAGE_OFFSET_MASK;
1537 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1538 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1539
1540 /*
1541 * Call worker.
1542 */
1543 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1544
1545 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1546 return rc;
1547}
1548
1549
1550#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1551
1552/**
1553 * Performs the lazy mapping of the 32-bit guest PD.
1554 *
1555 * @returns VBox status code.
1556 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1557 * @param ppPd Where to return the pointer to the mapping. This is
1558 * always set.
1559 */
1560int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
1561{
1562 PVM pVM = pVCpu->CTX_SUFF(pVM);
1563 pgmLock(pVM);
1564
1565 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
1566
1567 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1568 PPGMPAGE pPage;
1569 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1570 if (RT_SUCCESS(rc))
1571 {
1572 RTHCPTR HCPtrGuestCR3;
1573 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1574 if (RT_SUCCESS(rc))
1575 {
1576 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1577# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1578 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1579# endif
1580 *ppPd = (PX86PD)HCPtrGuestCR3;
1581
1582 pgmUnlock(pVM);
1583 return VINF_SUCCESS;
1584 }
1585
1586 AssertRC(rc);
1587 }
1588 pgmUnlock(pVM);
1589
1590 *ppPd = NULL;
1591 return rc;
1592}
1593
1594
1595/**
1596 * Performs the lazy mapping of the PAE guest PDPT.
1597 *
1598 * @returns VBox status code.
1599 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1600 * @param ppPdpt Where to return the pointer to the mapping. This is
1601 * always set.
1602 */
1603int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
1604{
1605 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1606 PVM pVM = pVCpu->CTX_SUFF(pVM);
1607 pgmLock(pVM);
1608
1609 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1610 PPGMPAGE pPage;
1611 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1612 if (RT_SUCCESS(rc))
1613 {
1614 RTHCPTR HCPtrGuestCR3;
1615 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1616 if (RT_SUCCESS(rc))
1617 {
1618 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1619# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1620 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1621# endif
1622 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
1623
1624 pgmUnlock(pVM);
1625 return VINF_SUCCESS;
1626 }
1627
1628 AssertRC(rc);
1629 }
1630
1631 pgmUnlock(pVM);
1632 *ppPdpt = NULL;
1633 return rc;
1634}
1635
1636
1637/**
1638 * Performs the lazy mapping / updating of a PAE guest PD.
1639 *
1640 * @returns Pointer to the mapping.
1641 * @returns VBox status code.
1642 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1643 * @param iPdpt Which PD entry to map (0..3).
1644 * @param ppPd Where to return the pointer to the mapping. This is
1645 * always set.
1646 */
1647int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
1648{
1649 PVM pVM = pVCpu->CTX_SUFF(pVM);
1650 pgmLock(pVM);
1651
1652 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
1653 Assert(pGuestPDPT);
1654 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1655 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1656 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
1657
1658 PPGMPAGE pPage;
1659 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1660 if (RT_SUCCESS(rc))
1661 {
1662 RTRCPTR RCPtr = NIL_RTRCPTR;
1663 RTHCPTR HCPtr = NIL_RTHCPTR;
1664#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1665 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
1666 AssertRC(rc);
1667#endif
1668 if (RT_SUCCESS(rc) && fChanged)
1669 {
1670 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1671 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1672 }
1673 if (RT_SUCCESS(rc))
1674 {
1675 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1676# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1677 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1678# endif
1679 if (fChanged)
1680 {
1681 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1682 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1683 }
1684
1685 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
1686 pgmUnlock(pVM);
1687 return VINF_SUCCESS;
1688 }
1689 }
1690
1691 /* Invalid page or some failure, invalidate the entry. */
1692 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1693 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
1694# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1695 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
1696# endif
1697 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
1698
1699 pgmUnlock(pVM);
1700 return rc;
1701}
1702
1703#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1704#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1705/**
1706 * Performs the lazy mapping of the 32-bit guest PD.
1707 *
1708 * @returns VBox status code.
1709 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1710 * @param ppPml4 Where to return the pointer to the mapping. This will
1711 * always be set.
1712 */
1713int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
1714{
1715 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1716 PVM pVM = pVCpu->CTX_SUFF(pVM);
1717 pgmLock(pVM);
1718
1719 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1720 PPGMPAGE pPage;
1721 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1722 if (RT_SUCCESS(rc))
1723 {
1724 RTHCPTR HCPtrGuestCR3;
1725 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1726 if (RT_SUCCESS(rc))
1727 {
1728 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1729# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1730 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1731# endif
1732 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
1733
1734 pgmUnlock(pVM);
1735 return VINF_SUCCESS;
1736 }
1737 }
1738
1739 pgmUnlock(pVM);
1740 *ppPml4 = NULL;
1741 return rc;
1742}
1743#endif
1744
1745
1746/**
1747 * Gets the PAE PDPEs values cached by the CPU.
1748 *
1749 * @returns VBox status code.
1750 * @param pVCpu The cross context virtual CPU structure.
1751 * @param paPdpes Where to return the four PDPEs. The array
1752 * pointed to must have 4 entries.
1753 */
1754VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes)
1755{
1756 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1757
1758 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
1759 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
1760 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
1761 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
1762 return VINF_SUCCESS;
1763}
1764
1765
1766/**
1767 * Sets the PAE PDPEs values cached by the CPU.
1768 *
1769 * @remarks This must be called *AFTER* PGMUpdateCR3.
1770 *
1771 * @param pVCpu The cross context virtual CPU structure.
1772 * @param paPdpes The four PDPE values. The array pointed to must
1773 * have exactly 4 entries.
1774 *
1775 * @remarks No-long-jump zone!!!
1776 */
1777VMM_INT_DECL(void) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes)
1778{
1779 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1780
1781 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
1782 {
1783 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
1784 {
1785 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
1786
1787 /* Force lazy remapping if it changed in any way. */
1788 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
1789# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1790 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
1791# endif
1792 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
1793 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
1794 }
1795 }
1796
1797 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1798}
1799
1800
1801/**
1802 * Gets the current CR3 register value for the shadow memory context.
1803 * @returns CR3 value.
1804 * @param pVCpu The cross context virtual CPU structure.
1805 */
1806VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1807{
1808 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1809 AssertPtrReturn(pPoolPage, 0);
1810 return pPoolPage->Core.Key;
1811}
1812
1813
1814/**
1815 * Gets the current CR3 register value for the nested memory context.
1816 * @returns CR3 value.
1817 * @param pVCpu The cross context virtual CPU structure.
1818 */
1819VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1820{
1821 NOREF(enmShadowMode);
1822 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1823 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1824}
1825
1826
1827/**
1828 * Gets the current CR3 register value for the HC intermediate memory context.
1829 * @returns CR3 value.
1830 * @param pVM The cross context VM structure.
1831 */
1832VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1833{
1834 switch (pVM->pgm.s.enmHostMode)
1835 {
1836 case SUPPAGINGMODE_32_BIT:
1837 case SUPPAGINGMODE_32_BIT_GLOBAL:
1838 return pVM->pgm.s.HCPhysInterPD;
1839
1840 case SUPPAGINGMODE_PAE:
1841 case SUPPAGINGMODE_PAE_GLOBAL:
1842 case SUPPAGINGMODE_PAE_NX:
1843 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1844 return pVM->pgm.s.HCPhysInterPaePDPT;
1845
1846 case SUPPAGINGMODE_AMD64:
1847 case SUPPAGINGMODE_AMD64_GLOBAL:
1848 case SUPPAGINGMODE_AMD64_NX:
1849 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1850 return pVM->pgm.s.HCPhysInterPaePDPT;
1851
1852 default:
1853 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1854 return NIL_RTHCPHYS;
1855 }
1856}
1857
1858
1859/**
1860 * Gets the current CR3 register value for the RC intermediate memory context.
1861 * @returns CR3 value.
1862 * @param pVM The cross context VM structure.
1863 * @param pVCpu The cross context virtual CPU structure.
1864 */
1865VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1866{
1867 switch (pVCpu->pgm.s.enmShadowMode)
1868 {
1869 case PGMMODE_32_BIT:
1870 return pVM->pgm.s.HCPhysInterPD;
1871
1872 case PGMMODE_PAE:
1873 case PGMMODE_PAE_NX:
1874 return pVM->pgm.s.HCPhysInterPaePDPT;
1875
1876 case PGMMODE_AMD64:
1877 case PGMMODE_AMD64_NX:
1878 return pVM->pgm.s.HCPhysInterPaePML4;
1879
1880 case PGMMODE_EPT:
1881 case PGMMODE_NESTED:
1882 return 0; /* not relevant */
1883
1884 default:
1885 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1886 return NIL_RTHCPHYS;
1887 }
1888}
1889
1890
1891/**
1892 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1893 * @returns CR3 value.
1894 * @param pVM The cross context VM structure.
1895 */
1896VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1897{
1898 return pVM->pgm.s.HCPhysInterPD;
1899}
1900
1901
1902/**
1903 * Gets the CR3 register value for the PAE intermediate memory context.
1904 * @returns CR3 value.
1905 * @param pVM The cross context VM structure.
1906 */
1907VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1908{
1909 return pVM->pgm.s.HCPhysInterPaePDPT;
1910}
1911
1912
1913/**
1914 * Gets the CR3 register value for the AMD64 intermediate memory context.
1915 * @returns CR3 value.
1916 * @param pVM The cross context VM structure.
1917 */
1918VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1919{
1920 return pVM->pgm.s.HCPhysInterPaePML4;
1921}
1922
1923
1924/**
1925 * Performs and schedules necessary updates following a CR3 load or reload.
1926 *
1927 * This will normally involve mapping the guest PD or nPDPT
1928 *
1929 * @returns VBox status code.
1930 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1931 * safely be ignored and overridden since the FF will be set too then.
1932 * @param pVCpu The cross context virtual CPU structure.
1933 * @param cr3 The new cr3.
1934 * @param fGlobal Indicates whether this is a global flush or not.
1935 */
1936VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1937{
1938 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1939 PVM pVM = pVCpu->CTX_SUFF(pVM);
1940
1941 VMCPU_ASSERT_EMT(pVCpu);
1942
1943 /*
1944 * Always flag the necessary updates; necessary for hardware acceleration
1945 */
1946 /** @todo optimize this, it shouldn't always be necessary. */
1947 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1948 if (fGlobal)
1949 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1950 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1951
1952 /*
1953 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1954 */
1955 int rc = VINF_SUCCESS;
1956 RTGCPHYS GCPhysCR3;
1957 switch (pVCpu->pgm.s.enmGuestMode)
1958 {
1959 case PGMMODE_PAE:
1960 case PGMMODE_PAE_NX:
1961 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1962 break;
1963 case PGMMODE_AMD64:
1964 case PGMMODE_AMD64_NX:
1965 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1966 break;
1967 default:
1968 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1969 break;
1970 }
1971 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
1972
1973 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1974 {
1975 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1976 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1977 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1978 if (RT_LIKELY(rc == VINF_SUCCESS))
1979 {
1980 if (pgmMapAreMappingsFloating(pVM))
1981 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1982 }
1983 else
1984 {
1985 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1986 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1987 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1988 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1989 if (pgmMapAreMappingsFloating(pVM))
1990 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1991 }
1992
1993 if (fGlobal)
1994 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1995 else
1996 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
1997 }
1998 else
1999 {
2000# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2001 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2002 if (pPool->cDirtyPages)
2003 {
2004 pgmLock(pVM);
2005 pgmPoolResetDirtyPages(pVM);
2006 pgmUnlock(pVM);
2007 }
2008# endif
2009 /*
2010 * Check if we have a pending update of the CR3 monitoring.
2011 */
2012 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2013 {
2014 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2015 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
2016 }
2017 if (fGlobal)
2018 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2019 else
2020 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
2021 }
2022
2023 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2024 return rc;
2025}
2026
2027
2028/**
2029 * Performs and schedules necessary updates following a CR3 load or reload when
2030 * using nested or extended paging.
2031 *
2032 * This API is an alternative to PDMFlushTLB that avoids actually flushing the
2033 * TLB and triggering a SyncCR3.
2034 *
2035 * This will normally involve mapping the guest PD or nPDPT
2036 *
2037 * @returns VBox status code.
2038 * @retval VINF_SUCCESS.
2039 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2040 * paging modes). This can safely be ignored and overridden since the
2041 * FF will be set too then.
2042 * @param pVCpu The cross context virtual CPU structure.
2043 * @param cr3 The new cr3.
2044 */
2045VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
2046{
2047 VMCPU_ASSERT_EMT(pVCpu);
2048 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
2049
2050 /* We assume we're only called in nested paging mode. */
2051 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2052 Assert(!pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2053 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
2054
2055 /*
2056 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2057 */
2058 int rc = VINF_SUCCESS;
2059 RTGCPHYS GCPhysCR3;
2060 switch (pVCpu->pgm.s.enmGuestMode)
2061 {
2062 case PGMMODE_PAE:
2063 case PGMMODE_PAE_NX:
2064 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2065 break;
2066 case PGMMODE_AMD64:
2067 case PGMMODE_AMD64_NX:
2068 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2069 break;
2070 default:
2071 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2072 break;
2073 }
2074 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2075
2076 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2077 {
2078 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2079 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2080 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2081 }
2082
2083 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2084 return rc;
2085}
2086
2087
2088/**
2089 * Synchronize the paging structures.
2090 *
2091 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2092 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2093 * in several places, most importantly whenever the CR3 is loaded.
2094 *
2095 * @returns VBox status code.
2096 * @param pVCpu The cross context virtual CPU structure.
2097 * @param cr0 Guest context CR0 register
2098 * @param cr3 Guest context CR3 register
2099 * @param cr4 Guest context CR4 register
2100 * @param fGlobal Including global page directories or not
2101 */
2102VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2103{
2104 int rc;
2105
2106 VMCPU_ASSERT_EMT(pVCpu);
2107
2108 /*
2109 * The pool may have pending stuff and even require a return to ring-3 to
2110 * clear the whole thing.
2111 */
2112 rc = pgmPoolSyncCR3(pVCpu);
2113 if (rc != VINF_SUCCESS)
2114 return rc;
2115
2116 /*
2117 * We might be called when we shouldn't.
2118 *
2119 * The mode switching will ensure that the PD is resynced after every mode
2120 * switch. So, if we find ourselves here when in protected or real mode
2121 * we can safely clear the FF and return immediately.
2122 */
2123 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2124 {
2125 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2126 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2127 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2128 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2129 return VINF_SUCCESS;
2130 }
2131
2132 /* If global pages are not supported, then all flushes are global. */
2133 if (!(cr4 & X86_CR4_PGE))
2134 fGlobal = true;
2135 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2136 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2137
2138 /*
2139 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2140 * This should be done before SyncCR3.
2141 */
2142 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2143 {
2144 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2145
2146 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2147 RTGCPHYS GCPhysCR3;
2148 switch (pVCpu->pgm.s.enmGuestMode)
2149 {
2150 case PGMMODE_PAE:
2151 case PGMMODE_PAE_NX:
2152 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2153 break;
2154 case PGMMODE_AMD64:
2155 case PGMMODE_AMD64_NX:
2156 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2157 break;
2158 default:
2159 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2160 break;
2161 }
2162 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2163
2164 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2165 {
2166 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2167 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2168 }
2169
2170 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2171 if ( rc == VINF_PGM_SYNC_CR3
2172 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2173 {
2174 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2175#ifdef IN_RING3
2176 rc = pgmPoolSyncCR3(pVCpu);
2177#else
2178 if (rc == VINF_PGM_SYNC_CR3)
2179 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2180 return VINF_PGM_SYNC_CR3;
2181#endif
2182 }
2183 AssertRCReturn(rc, rc);
2184 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2185 }
2186
2187 /*
2188 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2189 */
2190 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2191 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
2192 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2193 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2194 if (rc == VINF_SUCCESS)
2195 {
2196 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2197 {
2198 /* Go back to ring 3 if a pgm pool sync is again pending. */
2199 return VINF_PGM_SYNC_CR3;
2200 }
2201
2202 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2203 {
2204 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2205 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2206 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2207 }
2208
2209 /*
2210 * Check if we have a pending update of the CR3 monitoring.
2211 */
2212 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2213 {
2214 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2215 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);
2216 Assert(pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2217 }
2218 }
2219
2220 /*
2221 * Now flush the CR3 (guest context).
2222 */
2223 if (rc == VINF_SUCCESS)
2224 PGM_INVL_VCPU_TLBS(pVCpu);
2225 return rc;
2226}
2227
2228
2229/**
2230 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2231 *
2232 * @returns VBox status code, with the following informational code for
2233 * VM scheduling.
2234 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2235 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2236 * (I.e. not in R3.)
2237 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2238 *
2239 * @param pVCpu The cross context virtual CPU structure.
2240 * @param cr0 The new cr0.
2241 * @param cr4 The new cr4.
2242 * @param efer The new extended feature enable register.
2243 */
2244VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2245{
2246 PGMMODE enmGuestMode;
2247
2248 VMCPU_ASSERT_EMT(pVCpu);
2249
2250 /*
2251 * Calc the new guest mode.
2252 */
2253 if (!(cr0 & X86_CR0_PE))
2254 enmGuestMode = PGMMODE_REAL;
2255 else if (!(cr0 & X86_CR0_PG))
2256 enmGuestMode = PGMMODE_PROTECTED;
2257 else if (!(cr4 & X86_CR4_PAE))
2258 {
2259 bool const fPse = !!(cr4 & X86_CR4_PSE);
2260 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2261 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2262 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2263 enmGuestMode = PGMMODE_32_BIT;
2264 }
2265 else if (!(efer & MSR_K6_EFER_LME))
2266 {
2267 if (!(efer & MSR_K6_EFER_NXE))
2268 enmGuestMode = PGMMODE_PAE;
2269 else
2270 enmGuestMode = PGMMODE_PAE_NX;
2271 }
2272 else
2273 {
2274 if (!(efer & MSR_K6_EFER_NXE))
2275 enmGuestMode = PGMMODE_AMD64;
2276 else
2277 enmGuestMode = PGMMODE_AMD64_NX;
2278 }
2279
2280 /*
2281 * Did it change?
2282 */
2283 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2284 return VINF_SUCCESS;
2285
2286 /* Flush the TLB */
2287 PGM_INVL_VCPU_TLBS(pVCpu);
2288
2289#ifdef IN_RING3
2290 return PGMR3ChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2291#else
2292 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2293 return VINF_PGM_CHANGE_MODE;
2294#endif
2295}
2296
2297
2298/**
2299 * Called by CPUM or REM when CR0.WP changes to 1.
2300 *
2301 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2302 * @thread EMT
2303 */
2304VMMDECL(void) PGMCr0WpEnabled(PVMCPU pVCpu)
2305{
2306 /*
2307 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
2308 *
2309 * Use the counter to judge whether there might be pool pages with active
2310 * hacks in them. If there are, we will be running the risk of messing up
2311 * the guest by allowing it to write to read-only pages. Thus, we have to
2312 * clear the page pool ASAP if there is the slightest chance.
2313 */
2314 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
2315 {
2316 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
2317
2318 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
2319 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
2320 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2321 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2322 }
2323}
2324
2325
2326/**
2327 * Gets the current guest paging mode.
2328 *
2329 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2330 *
2331 * @returns The current paging mode.
2332 * @param pVCpu The cross context virtual CPU structure.
2333 */
2334VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2335{
2336 return pVCpu->pgm.s.enmGuestMode;
2337}
2338
2339
2340/**
2341 * Gets the current shadow paging mode.
2342 *
2343 * @returns The current paging mode.
2344 * @param pVCpu The cross context virtual CPU structure.
2345 */
2346VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2347{
2348 return pVCpu->pgm.s.enmShadowMode;
2349}
2350
2351
2352/**
2353 * Gets the current host paging mode.
2354 *
2355 * @returns The current paging mode.
2356 * @param pVM The cross context VM structure.
2357 */
2358VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2359{
2360 switch (pVM->pgm.s.enmHostMode)
2361 {
2362 case SUPPAGINGMODE_32_BIT:
2363 case SUPPAGINGMODE_32_BIT_GLOBAL:
2364 return PGMMODE_32_BIT;
2365
2366 case SUPPAGINGMODE_PAE:
2367 case SUPPAGINGMODE_PAE_GLOBAL:
2368 return PGMMODE_PAE;
2369
2370 case SUPPAGINGMODE_PAE_NX:
2371 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2372 return PGMMODE_PAE_NX;
2373
2374 case SUPPAGINGMODE_AMD64:
2375 case SUPPAGINGMODE_AMD64_GLOBAL:
2376 return PGMMODE_AMD64;
2377
2378 case SUPPAGINGMODE_AMD64_NX:
2379 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2380 return PGMMODE_AMD64_NX;
2381
2382 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2383 }
2384
2385 return PGMMODE_INVALID;
2386}
2387
2388
2389/**
2390 * Get mode name.
2391 *
2392 * @returns read-only name string.
2393 * @param enmMode The mode which name is desired.
2394 */
2395VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2396{
2397 switch (enmMode)
2398 {
2399 case PGMMODE_REAL: return "Real";
2400 case PGMMODE_PROTECTED: return "Protected";
2401 case PGMMODE_32_BIT: return "32-bit";
2402 case PGMMODE_PAE: return "PAE";
2403 case PGMMODE_PAE_NX: return "PAE+NX";
2404 case PGMMODE_AMD64: return "AMD64";
2405 case PGMMODE_AMD64_NX: return "AMD64+NX";
2406 case PGMMODE_NESTED: return "Nested";
2407 case PGMMODE_EPT: return "EPT";
2408 default: return "unknown mode value";
2409 }
2410}
2411
2412
2413
2414/**
2415 * Notification from CPUM that the EFER.NXE bit has changed.
2416 *
2417 * @param pVCpu The cross context virtual CPU structure of the CPU for
2418 * which EFER changed.
2419 * @param fNxe The new NXE state.
2420 */
2421VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2422{
2423/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
2424 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
2425
2426 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
2427 if (fNxe)
2428 {
2429 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2430 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2431 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2432 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2433 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2434 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2435 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2436 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2437 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2438 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2439 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
2440
2441 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
2442 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
2443 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
2444 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
2445 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
2446 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
2447 }
2448 else
2449 {
2450 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2451 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2452 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2453 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
2454 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
2455 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2456 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2457 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2458 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2459 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2460 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
2461
2462 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
2463 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
2464 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
2465 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
2466 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
2467 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
2468 }
2469}
2470
2471
2472/**
2473 * Check if any pgm pool pages are marked dirty (not monitored)
2474 *
2475 * @returns bool locked/not locked
2476 * @param pVM The cross context VM structure.
2477 */
2478VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2479{
2480 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2481}
2482
2483
2484/**
2485 * Check if this VCPU currently owns the PGM lock.
2486 *
2487 * @returns bool owner/not owner
2488 * @param pVM The cross context VM structure.
2489 */
2490VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2491{
2492 return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
2493}
2494
2495
2496/**
2497 * Enable or disable large page usage
2498 *
2499 * @returns VBox status code.
2500 * @param pVM The cross context VM structure.
2501 * @param fUseLargePages Use/not use large pages
2502 */
2503VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2504{
2505 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2506
2507 pVM->fUseLargePages = fUseLargePages;
2508 return VINF_SUCCESS;
2509}
2510
2511
2512/**
2513 * Acquire the PGM lock.
2514 *
2515 * @returns VBox status code
2516 * @param pVM The cross context VM structure.
2517 */
2518#if defined(VBOX_STRICT) && defined(IN_RING3)
2519int pgmLockDebug(PVM pVM, RT_SRC_POS_DECL)
2520#else
2521int pgmLock(PVM pVM)
2522#endif
2523{
2524#if defined(VBOX_STRICT) && defined(IN_RING3)
2525 int rc = PDMCritSectEnterDebug(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
2526#else
2527 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
2528#endif
2529#if defined(IN_RC) || defined(IN_RING0)
2530 if (rc == VERR_SEM_BUSY)
2531 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2532#endif
2533 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2534 return rc;
2535}
2536
2537
2538/**
2539 * Release the PGM lock.
2540 *
2541 * @returns VBox status code
2542 * @param pVM The cross context VM structure.
2543 */
2544void pgmUnlock(PVM pVM)
2545{
2546 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
2547 pVM->pgm.s.cDeprecatedPageLocks = 0;
2548 int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
2549 if (rc == VINF_SEM_NESTED)
2550 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
2551}
2552
2553#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2554
2555/**
2556 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
2557 *
2558 * @returns VBox status code.
2559 * @param pVM The cross context VM structure.
2560 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2561 * @param GCPhys The guest physical address of the page to map. The
2562 * offset bits are not ignored.
2563 * @param ppv Where to return the address corresponding to @a GCPhys.
2564 */
2565int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
2566{
2567 pgmLock(pVM);
2568
2569 /*
2570 * Convert it to a writable page and it on to the dynamic mapper.
2571 */
2572 int rc;
2573 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2574 if (RT_LIKELY(pPage))
2575 {
2576 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2577 if (RT_SUCCESS(rc))
2578 {
2579 void *pv;
2580 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
2581 if (RT_SUCCESS(rc))
2582 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
2583 }
2584 else
2585 AssertRC(rc);
2586 }
2587 else
2588 {
2589 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2590 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2591 }
2592
2593 pgmUnlock(pVM);
2594 return rc;
2595}
2596
2597#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2598#if !defined(IN_R0) || defined(LOG_ENABLED)
2599
2600/** Format handler for PGMPAGE.
2601 * @copydoc FNRTSTRFORMATTYPE */
2602static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2603 const char *pszType, void const *pvValue,
2604 int cchWidth, int cchPrecision, unsigned fFlags,
2605 void *pvUser)
2606{
2607 size_t cch;
2608 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2609 if (RT_VALID_PTR(pPage))
2610 {
2611 char szTmp[64+80];
2612
2613 cch = 0;
2614
2615 /* The single char state stuff. */
2616 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2617 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
2618
2619#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2620 if (IS_PART_INCLUDED(5))
2621 {
2622 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2623 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2624 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2625 }
2626
2627 /* The type. */
2628 if (IS_PART_INCLUDED(4))
2629 {
2630 szTmp[cch++] = ':';
2631 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2632 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
2633 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
2634 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
2635 }
2636
2637 /* The numbers. */
2638 if (IS_PART_INCLUDED(3))
2639 {
2640 szTmp[cch++] = ':';
2641 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2642 }
2643
2644 if (IS_PART_INCLUDED(2))
2645 {
2646 szTmp[cch++] = ':';
2647 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2648 }
2649
2650 if (IS_PART_INCLUDED(6))
2651 {
2652 szTmp[cch++] = ':';
2653 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2654 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
2655 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2656 }
2657#undef IS_PART_INCLUDED
2658
2659 cch = pfnOutput(pvArgOutput, szTmp, cch);
2660 }
2661 else
2662 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
2663 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
2664 return cch;
2665}
2666
2667
2668/** Format handler for PGMRAMRANGE.
2669 * @copydoc FNRTSTRFORMATTYPE */
2670static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2671 const char *pszType, void const *pvValue,
2672 int cchWidth, int cchPrecision, unsigned fFlags,
2673 void *pvUser)
2674{
2675 size_t cch;
2676 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2677 if (VALID_PTR(pRam))
2678 {
2679 char szTmp[80];
2680 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2681 cch = pfnOutput(pvArgOutput, szTmp, cch);
2682 }
2683 else
2684 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
2685 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
2686 return cch;
2687}
2688
2689/** Format type andlers to be registered/deregistered. */
2690static const struct
2691{
2692 char szType[24];
2693 PFNRTSTRFORMATTYPE pfnHandler;
2694} g_aPgmFormatTypes[] =
2695{
2696 { "pgmpage", pgmFormatTypeHandlerPage },
2697 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2698};
2699
2700#endif /* !IN_R0 || LOG_ENABLED */
2701
2702/**
2703 * Registers the global string format types.
2704 *
2705 * This should be called at module load time or in some other manner that ensure
2706 * that it's called exactly one time.
2707 *
2708 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2709 */
2710VMMDECL(int) PGMRegisterStringFormatTypes(void)
2711{
2712#if !defined(IN_R0) || defined(LOG_ENABLED)
2713 int rc = VINF_SUCCESS;
2714 unsigned i;
2715 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2716 {
2717 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2718# ifdef IN_RING0
2719 if (rc == VERR_ALREADY_EXISTS)
2720 {
2721 /* in case of cleanup failure in ring-0 */
2722 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2723 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2724 }
2725# endif
2726 }
2727 if (RT_FAILURE(rc))
2728 while (i-- > 0)
2729 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2730
2731 return rc;
2732#else
2733 return VINF_SUCCESS;
2734#endif
2735}
2736
2737
2738/**
2739 * Deregisters the global string format types.
2740 *
2741 * This should be called at module unload time or in some other manner that
2742 * ensure that it's called exactly one time.
2743 */
2744VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2745{
2746#if !defined(IN_R0) || defined(LOG_ENABLED)
2747 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2748 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2749#endif
2750}
2751
2752#ifdef VBOX_STRICT
2753
2754/**
2755 * Asserts that there are no mapping conflicts.
2756 *
2757 * @returns Number of conflicts.
2758 * @param pVM The cross context VM structure.
2759 */
2760VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2761{
2762 unsigned cErrors = 0;
2763
2764 /* Only applies to raw mode -> 1 VPCU */
2765 Assert(pVM->cCpus == 1);
2766 PVMCPU pVCpu = &pVM->aCpus[0];
2767
2768 /*
2769 * Check for mapping conflicts.
2770 */
2771 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2772 pMapping;
2773 pMapping = pMapping->CTX_SUFF(pNext))
2774 {
2775 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2776 for (RTGCPTR GCPtr = pMapping->GCPtr;
2777 GCPtr <= pMapping->GCPtrLast;
2778 GCPtr += PAGE_SIZE)
2779 {
2780 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2781 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2782 {
2783 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2784 cErrors++;
2785 break;
2786 }
2787 }
2788 }
2789
2790 return cErrors;
2791}
2792
2793
2794/**
2795 * Asserts that everything related to the guest CR3 is correctly shadowed.
2796 *
2797 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2798 * and assert the correctness of the guest CR3 mapping before asserting that the
2799 * shadow page tables is in sync with the guest page tables.
2800 *
2801 * @returns Number of conflicts.
2802 * @param pVM The cross context VM structure.
2803 * @param pVCpu The cross context virtual CPU structure.
2804 * @param cr3 The current guest CR3 register value.
2805 * @param cr4 The current guest CR4 register value.
2806 */
2807VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2808{
2809 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2810 pgmLock(pVM);
2811 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2812 pgmUnlock(pVM);
2813 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2814 return cErrors;
2815}
2816
2817#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette