VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 37529

Last change on this file since 37529 was 37452, checked in by vboxsync, 14 years ago

IOM,PDMCritSect: Extended PDMCritSectEnter to handle rcBusy=VINF_SUCCESS as a request to call ring-3 to acquire a busy lock. Implemented device level locking in the MMIO code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 84.8 KB
Line 
1/* $Id: PGMAll.cpp 37452 2011-06-14 18:13:48Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/selm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/sup.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/csam.h>
30#include <VBox/vmm/patm.h>
31#include <VBox/vmm/trpm.h>
32#include <VBox/vmm/rem.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hwaccm.h>
35#include <VBox/vmm/hwacc_vmx.h>
36#include "PGMInternal.h"
37#include <VBox/vmm/vm.h>
38#include "PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*******************************************************************************
48* Structures and Typedefs *
49*******************************************************************************/
50/**
51 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
52 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
53 */
54typedef struct PGMHVUSTATE
55{
56 /** The VM handle. */
57 PVM pVM;
58 /** The VMCPU handle. */
59 PVMCPU pVCpu;
60 /** The todo flags. */
61 RTUINT fTodo;
62 /** The CR4 register value. */
63 uint32_t cr4;
64} PGMHVUSTATE, *PPGMHVUSTATE;
65
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
71DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
72#ifndef IN_RC
73static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
74static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
75#endif
76
77
78/*
79 * Shadow - 32-bit mode
80 */
81#define PGM_SHW_TYPE PGM_TYPE_32BIT
82#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
83#include "PGMAllShw.h"
84
85/* Guest - real mode */
86#define PGM_GST_TYPE PGM_TYPE_REAL
87#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
88#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
89#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
90#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
91#include "PGMGstDefs.h"
92#include "PGMAllGst.h"
93#include "PGMAllBth.h"
94#undef BTH_PGMPOOLKIND_PT_FOR_PT
95#undef BTH_PGMPOOLKIND_ROOT
96#undef PGM_BTH_NAME
97#undef PGM_GST_TYPE
98#undef PGM_GST_NAME
99
100/* Guest - protected mode */
101#define PGM_GST_TYPE PGM_TYPE_PROT
102#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
103#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
104#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
105#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
106#include "PGMGstDefs.h"
107#include "PGMAllGst.h"
108#include "PGMAllBth.h"
109#undef BTH_PGMPOOLKIND_PT_FOR_PT
110#undef BTH_PGMPOOLKIND_ROOT
111#undef PGM_BTH_NAME
112#undef PGM_GST_TYPE
113#undef PGM_GST_NAME
114
115/* Guest - 32-bit mode */
116#define PGM_GST_TYPE PGM_TYPE_32BIT
117#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
118#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
119#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
120#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
121#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
122#include "PGMGstDefs.h"
123#include "PGMAllGst.h"
124#include "PGMAllBth.h"
125#undef BTH_PGMPOOLKIND_PT_FOR_BIG
126#undef BTH_PGMPOOLKIND_PT_FOR_PT
127#undef BTH_PGMPOOLKIND_ROOT
128#undef PGM_BTH_NAME
129#undef PGM_GST_TYPE
130#undef PGM_GST_NAME
131
132#undef PGM_SHW_TYPE
133#undef PGM_SHW_NAME
134
135
136/*
137 * Shadow - PAE mode
138 */
139#define PGM_SHW_TYPE PGM_TYPE_PAE
140#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
141#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
142#include "PGMAllShw.h"
143
144/* Guest - real mode */
145#define PGM_GST_TYPE PGM_TYPE_REAL
146#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
147#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
148#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
149#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
150#include "PGMGstDefs.h"
151#include "PGMAllBth.h"
152#undef BTH_PGMPOOLKIND_PT_FOR_PT
153#undef BTH_PGMPOOLKIND_ROOT
154#undef PGM_BTH_NAME
155#undef PGM_GST_TYPE
156#undef PGM_GST_NAME
157
158/* Guest - protected mode */
159#define PGM_GST_TYPE PGM_TYPE_PROT
160#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
161#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
162#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
163#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
164#include "PGMGstDefs.h"
165#include "PGMAllBth.h"
166#undef BTH_PGMPOOLKIND_PT_FOR_PT
167#undef BTH_PGMPOOLKIND_ROOT
168#undef PGM_BTH_NAME
169#undef PGM_GST_TYPE
170#undef PGM_GST_NAME
171
172/* Guest - 32-bit mode */
173#define PGM_GST_TYPE PGM_TYPE_32BIT
174#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
175#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
176#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
177#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
178#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
179#include "PGMGstDefs.h"
180#include "PGMAllBth.h"
181#undef BTH_PGMPOOLKIND_PT_FOR_BIG
182#undef BTH_PGMPOOLKIND_PT_FOR_PT
183#undef BTH_PGMPOOLKIND_ROOT
184#undef PGM_BTH_NAME
185#undef PGM_GST_TYPE
186#undef PGM_GST_NAME
187
188
189/* Guest - PAE mode */
190#define PGM_GST_TYPE PGM_TYPE_PAE
191#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
192#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
193#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
194#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
195#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
196#include "PGMGstDefs.h"
197#include "PGMAllGst.h"
198#include "PGMAllBth.h"
199#undef BTH_PGMPOOLKIND_PT_FOR_BIG
200#undef BTH_PGMPOOLKIND_PT_FOR_PT
201#undef BTH_PGMPOOLKIND_ROOT
202#undef PGM_BTH_NAME
203#undef PGM_GST_TYPE
204#undef PGM_GST_NAME
205
206#undef PGM_SHW_TYPE
207#undef PGM_SHW_NAME
208
209
210#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
211/*
212 * Shadow - AMD64 mode
213 */
214# define PGM_SHW_TYPE PGM_TYPE_AMD64
215# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
216# include "PGMAllShw.h"
217
218/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
219# define PGM_GST_TYPE PGM_TYPE_PROT
220# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
221# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
222# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
223# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
224# include "PGMGstDefs.h"
225# include "PGMAllBth.h"
226# undef BTH_PGMPOOLKIND_PT_FOR_PT
227# undef BTH_PGMPOOLKIND_ROOT
228# undef PGM_BTH_NAME
229# undef PGM_GST_TYPE
230# undef PGM_GST_NAME
231
232# ifdef VBOX_WITH_64_BITS_GUESTS
233/* Guest - AMD64 mode */
234# define PGM_GST_TYPE PGM_TYPE_AMD64
235# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
236# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
237# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
238# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
239# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
240# include "PGMGstDefs.h"
241# include "PGMAllGst.h"
242# include "PGMAllBth.h"
243# undef BTH_PGMPOOLKIND_PT_FOR_BIG
244# undef BTH_PGMPOOLKIND_PT_FOR_PT
245# undef BTH_PGMPOOLKIND_ROOT
246# undef PGM_BTH_NAME
247# undef PGM_GST_TYPE
248# undef PGM_GST_NAME
249# endif /* VBOX_WITH_64_BITS_GUESTS */
250
251# undef PGM_SHW_TYPE
252# undef PGM_SHW_NAME
253
254
255/*
256 * Shadow - Nested paging mode
257 */
258# define PGM_SHW_TYPE PGM_TYPE_NESTED
259# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
260# include "PGMAllShw.h"
261
262/* Guest - real mode */
263# define PGM_GST_TYPE PGM_TYPE_REAL
264# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
265# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
266# include "PGMGstDefs.h"
267# include "PGMAllBth.h"
268# undef PGM_BTH_NAME
269# undef PGM_GST_TYPE
270# undef PGM_GST_NAME
271
272/* Guest - protected mode */
273# define PGM_GST_TYPE PGM_TYPE_PROT
274# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
275# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
276# include "PGMGstDefs.h"
277# include "PGMAllBth.h"
278# undef PGM_BTH_NAME
279# undef PGM_GST_TYPE
280# undef PGM_GST_NAME
281
282/* Guest - 32-bit mode */
283# define PGM_GST_TYPE PGM_TYPE_32BIT
284# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
285# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
286# include "PGMGstDefs.h"
287# include "PGMAllBth.h"
288# undef PGM_BTH_NAME
289# undef PGM_GST_TYPE
290# undef PGM_GST_NAME
291
292/* Guest - PAE mode */
293# define PGM_GST_TYPE PGM_TYPE_PAE
294# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
295# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
296# include "PGMGstDefs.h"
297# include "PGMAllBth.h"
298# undef PGM_BTH_NAME
299# undef PGM_GST_TYPE
300# undef PGM_GST_NAME
301
302# ifdef VBOX_WITH_64_BITS_GUESTS
303/* Guest - AMD64 mode */
304# define PGM_GST_TYPE PGM_TYPE_AMD64
305# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
306# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
307# include "PGMGstDefs.h"
308# include "PGMAllBth.h"
309# undef PGM_BTH_NAME
310# undef PGM_GST_TYPE
311# undef PGM_GST_NAME
312# endif /* VBOX_WITH_64_BITS_GUESTS */
313
314# undef PGM_SHW_TYPE
315# undef PGM_SHW_NAME
316
317
318/*
319 * Shadow - EPT
320 */
321# define PGM_SHW_TYPE PGM_TYPE_EPT
322# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
323# include "PGMAllShw.h"
324
325/* Guest - real mode */
326# define PGM_GST_TYPE PGM_TYPE_REAL
327# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
328# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
329# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
330# include "PGMGstDefs.h"
331# include "PGMAllBth.h"
332# undef BTH_PGMPOOLKIND_PT_FOR_PT
333# undef PGM_BTH_NAME
334# undef PGM_GST_TYPE
335# undef PGM_GST_NAME
336
337/* Guest - protected mode */
338# define PGM_GST_TYPE PGM_TYPE_PROT
339# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
340# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
341# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
342# include "PGMGstDefs.h"
343# include "PGMAllBth.h"
344# undef BTH_PGMPOOLKIND_PT_FOR_PT
345# undef PGM_BTH_NAME
346# undef PGM_GST_TYPE
347# undef PGM_GST_NAME
348
349/* Guest - 32-bit mode */
350# define PGM_GST_TYPE PGM_TYPE_32BIT
351# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
352# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
353# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
354# include "PGMGstDefs.h"
355# include "PGMAllBth.h"
356# undef BTH_PGMPOOLKIND_PT_FOR_PT
357# undef PGM_BTH_NAME
358# undef PGM_GST_TYPE
359# undef PGM_GST_NAME
360
361/* Guest - PAE mode */
362# define PGM_GST_TYPE PGM_TYPE_PAE
363# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
364# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
365# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
366# include "PGMGstDefs.h"
367# include "PGMAllBth.h"
368# undef BTH_PGMPOOLKIND_PT_FOR_PT
369# undef PGM_BTH_NAME
370# undef PGM_GST_TYPE
371# undef PGM_GST_NAME
372
373# ifdef VBOX_WITH_64_BITS_GUESTS
374/* Guest - AMD64 mode */
375# define PGM_GST_TYPE PGM_TYPE_AMD64
376# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
377# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
378# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
379# include "PGMGstDefs.h"
380# include "PGMAllBth.h"
381# undef BTH_PGMPOOLKIND_PT_FOR_PT
382# undef PGM_BTH_NAME
383# undef PGM_GST_TYPE
384# undef PGM_GST_NAME
385# endif /* VBOX_WITH_64_BITS_GUESTS */
386
387# undef PGM_SHW_TYPE
388# undef PGM_SHW_NAME
389
390#endif /* !IN_RC */
391
392
393#ifndef IN_RING3
394/**
395 * #PF Handler.
396 *
397 * @returns VBox status code (appropriate for trap handling and GC return).
398 * @param pVCpu VMCPU handle.
399 * @param uErr The trap error code.
400 * @param pRegFrame Trap register frame.
401 * @param pvFault The fault address.
402 */
403VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
404{
405 PVM pVM = pVCpu->CTX_SUFF(pVM);
406
407 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
408 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
409 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
410
411
412#ifdef VBOX_WITH_STATISTICS
413 /*
414 * Error code stats.
415 */
416 if (uErr & X86_TRAP_PF_US)
417 {
418 if (!(uErr & X86_TRAP_PF_P))
419 {
420 if (uErr & X86_TRAP_PF_RW)
421 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
422 else
423 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
424 }
425 else if (uErr & X86_TRAP_PF_RW)
426 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
427 else if (uErr & X86_TRAP_PF_RSVD)
428 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
429 else if (uErr & X86_TRAP_PF_ID)
430 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
431 else
432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
433 }
434 else
435 { /* Supervisor */
436 if (!(uErr & X86_TRAP_PF_P))
437 {
438 if (uErr & X86_TRAP_PF_RW)
439 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
440 else
441 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
442 }
443 else if (uErr & X86_TRAP_PF_RW)
444 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
445 else if (uErr & X86_TRAP_PF_ID)
446 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
447 else if (uErr & X86_TRAP_PF_RSVD)
448 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
449 }
450#endif /* VBOX_WITH_STATISTICS */
451
452 /*
453 * Call the worker.
454 */
455 bool fLockTaken = false;
456 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
457 if (fLockTaken)
458 {
459 PGM_LOCK_ASSERT_OWNER(pVM);
460 pgmUnlock(pVM);
461 }
462 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
463
464 /*
465 * Return code tweaks.
466 */
467 if (rc != VINF_SUCCESS)
468 {
469 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
470 rc = VINF_SUCCESS;
471
472# ifdef IN_RING0
473 /* Note: hack alert for difficult to reproduce problem. */
474 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
475 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
476 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
477 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
478 {
479 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
480 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
481 rc = VINF_SUCCESS;
482 }
483# endif
484 }
485
486 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
487 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
488 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
489 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
490 return rc;
491}
492#endif /* !IN_RING3 */
493
494
495/**
496 * Prefetch a page
497 *
498 * Typically used to sync commonly used pages before entering raw mode
499 * after a CR3 reload.
500 *
501 * @returns VBox status code suitable for scheduling.
502 * @retval VINF_SUCCESS on success.
503 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
504 * @param pVCpu VMCPU handle.
505 * @param GCPtrPage Page to invalidate.
506 */
507VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
508{
509 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
510 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
511 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
512 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
513 return rc;
514}
515
516
517/**
518 * Gets the mapping corresponding to the specified address (if any).
519 *
520 * @returns Pointer to the mapping.
521 * @returns NULL if not
522 *
523 * @param pVM The virtual machine.
524 * @param GCPtr The guest context pointer.
525 */
526PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
527{
528 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
529 while (pMapping)
530 {
531 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
532 break;
533 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
534 return pMapping;
535 pMapping = pMapping->CTX_SUFF(pNext);
536 }
537 return NULL;
538}
539
540
541/**
542 * Verifies a range of pages for read or write access
543 *
544 * Only checks the guest's page tables
545 *
546 * @returns VBox status code.
547 * @param pVCpu VMCPU handle.
548 * @param Addr Guest virtual address to check
549 * @param cbSize Access size
550 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
551 * @remarks Current not in use.
552 */
553VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
554{
555 /*
556 * Validate input.
557 */
558 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
559 {
560 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
561 return VERR_INVALID_PARAMETER;
562 }
563
564 uint64_t fPage;
565 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
566 if (RT_FAILURE(rc))
567 {
568 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
569 return VINF_EM_RAW_GUEST_TRAP;
570 }
571
572 /*
573 * Check if the access would cause a page fault
574 *
575 * Note that hypervisor page directories are not present in the guest's tables, so this check
576 * is sufficient.
577 */
578 bool fWrite = !!(fAccess & X86_PTE_RW);
579 bool fUser = !!(fAccess & X86_PTE_US);
580 if ( !(fPage & X86_PTE_P)
581 || (fWrite && !(fPage & X86_PTE_RW))
582 || (fUser && !(fPage & X86_PTE_US)) )
583 {
584 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
585 return VINF_EM_RAW_GUEST_TRAP;
586 }
587 if ( RT_SUCCESS(rc)
588 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
589 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
590 return rc;
591}
592
593
594/**
595 * Verifies a range of pages for read or write access
596 *
597 * Supports handling of pages marked for dirty bit tracking and CSAM
598 *
599 * @returns VBox status code.
600 * @param pVCpu VMCPU handle.
601 * @param Addr Guest virtual address to check
602 * @param cbSize Access size
603 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
604 */
605VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
606{
607 PVM pVM = pVCpu->CTX_SUFF(pVM);
608
609 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
610
611 /*
612 * Get going.
613 */
614 uint64_t fPageGst;
615 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
616 if (RT_FAILURE(rc))
617 {
618 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
619 return VINF_EM_RAW_GUEST_TRAP;
620 }
621
622 /*
623 * Check if the access would cause a page fault
624 *
625 * Note that hypervisor page directories are not present in the guest's tables, so this check
626 * is sufficient.
627 */
628 const bool fWrite = !!(fAccess & X86_PTE_RW);
629 const bool fUser = !!(fAccess & X86_PTE_US);
630 if ( !(fPageGst & X86_PTE_P)
631 || (fWrite && !(fPageGst & X86_PTE_RW))
632 || (fUser && !(fPageGst & X86_PTE_US)) )
633 {
634 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
635 return VINF_EM_RAW_GUEST_TRAP;
636 }
637
638 if (!pVM->pgm.s.fNestedPaging)
639 {
640 /*
641 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
642 */
643 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
644 if ( rc == VERR_PAGE_NOT_PRESENT
645 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
646 {
647 /*
648 * Page is not present in our page tables.
649 * Try to sync it!
650 */
651 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
652 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
653 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
654 if (rc != VINF_SUCCESS)
655 return rc;
656 }
657 else
658 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
659 }
660
661#if 0 /* def VBOX_STRICT; triggers too often now */
662 /*
663 * This check is a bit paranoid, but useful.
664 */
665 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
666 uint64_t fPageShw;
667 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
668 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
669 || (fWrite && !(fPageShw & X86_PTE_RW))
670 || (fUser && !(fPageShw & X86_PTE_US)) )
671 {
672 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
673 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
674 return VINF_EM_RAW_GUEST_TRAP;
675 }
676#endif
677
678 if ( RT_SUCCESS(rc)
679 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
680 || Addr + cbSize < Addr))
681 {
682 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
683 for (;;)
684 {
685 Addr += PAGE_SIZE;
686 if (cbSize > PAGE_SIZE)
687 cbSize -= PAGE_SIZE;
688 else
689 cbSize = 1;
690 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
691 if (rc != VINF_SUCCESS)
692 break;
693 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
694 break;
695 }
696 }
697 return rc;
698}
699
700
701/**
702 * Emulation of the invlpg instruction (HC only actually).
703 *
704 * @returns Strict VBox status code, special care required.
705 * @retval VINF_PGM_SYNC_CR3 - handled.
706 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
707 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
708 *
709 * @param pVCpu VMCPU handle.
710 * @param GCPtrPage Page to invalidate.
711 *
712 * @remark ASSUMES the page table entry or page directory is valid. Fairly
713 * safe, but there could be edge cases!
714 *
715 * @todo Flush page or page directory only if necessary!
716 * @todo VBOXSTRICTRC
717 */
718VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
719{
720 PVM pVM = pVCpu->CTX_SUFF(pVM);
721 int rc;
722 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
723
724#ifndef IN_RING3
725 /*
726 * Notify the recompiler so it can record this instruction.
727 */
728 REMNotifyInvalidatePage(pVM, GCPtrPage);
729#endif /* !IN_RING3 */
730
731
732#ifdef IN_RC
733 /*
734 * Check for conflicts and pending CR3 monitoring updates.
735 */
736 if (pgmMapAreMappingsFloating(pVM))
737 {
738 if ( pgmGetMapping(pVM, GCPtrPage)
739 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
740 {
741 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
742 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
743 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
744 return VINF_PGM_SYNC_CR3;
745 }
746
747 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
748 {
749 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
750 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
751 return VINF_EM_RAW_EMULATE_INSTR;
752 }
753 }
754#endif /* IN_RC */
755
756 /*
757 * Call paging mode specific worker.
758 */
759 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
760 pgmLock(pVM);
761 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
762 pgmUnlock(pVM);
763 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
764
765#ifdef IN_RING3
766 /*
767 * Check if we have a pending update of the CR3 monitoring.
768 */
769 if ( RT_SUCCESS(rc)
770 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
771 {
772 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
773 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
774 }
775
776 /*
777 * Inform CSAM about the flush
778 *
779 * Note: This is to check if monitored pages have been changed; when we implement
780 * callbacks for virtual handlers, this is no longer required.
781 */
782 CSAMR3FlushPage(pVM, GCPtrPage);
783#endif /* IN_RING3 */
784
785 /* Ignore all irrelevant error codes. */
786 if ( rc == VERR_PAGE_NOT_PRESENT
787 || rc == VERR_PAGE_TABLE_NOT_PRESENT
788 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
789 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
790 rc = VINF_SUCCESS;
791
792 return rc;
793}
794
795
796/**
797 * Executes an instruction using the interpreter.
798 *
799 * @returns VBox status code (appropriate for trap handling and GC return).
800 * @param pVM VM handle.
801 * @param pVCpu VMCPU handle.
802 * @param pRegFrame Register frame.
803 * @param pvFault Fault address.
804 */
805VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
806{
807 uint32_t cb;
808 VBOXSTRICTRC rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault, &cb);
809 if (rc == VERR_EM_INTERPRETER)
810 rc = VINF_EM_RAW_EMULATE_INSTR;
811 if (rc != VINF_SUCCESS)
812 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
813 return rc;
814}
815
816
817/**
818 * Gets effective page information (from the VMM page directory).
819 *
820 * @returns VBox status.
821 * @param pVCpu VMCPU handle.
822 * @param GCPtr Guest Context virtual address of the page.
823 * @param pfFlags Where to store the flags. These are X86_PTE_*.
824 * @param pHCPhys Where to store the HC physical address of the page.
825 * This is page aligned.
826 * @remark You should use PGMMapGetPage() for pages in a mapping.
827 */
828VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
829{
830 pgmLock(pVCpu->CTX_SUFF(pVM));
831 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
832 pgmUnlock(pVCpu->CTX_SUFF(pVM));
833 return rc;
834}
835
836
837/**
838 * Modify page flags for a range of pages in the shadow context.
839 *
840 * The existing flags are ANDed with the fMask and ORed with the fFlags.
841 *
842 * @returns VBox status code.
843 * @param pVCpu VMCPU handle.
844 * @param GCPtr Virtual address of the first page in the range.
845 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
846 * @param fMask The AND mask - page flags X86_PTE_*.
847 * Be very CAREFUL when ~'ing constants which could be 32-bit!
848 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
849 * @remark You must use PGMMapModifyPage() for pages in a mapping.
850 */
851DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
852{
853 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
854 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
855
856 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
857
858 PVM pVM = pVCpu->CTX_SUFF(pVM);
859 pgmLock(pVM);
860 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
861 pgmUnlock(pVM);
862 return rc;
863}
864
865
866/**
867 * Changing the page flags for a single page in the shadow page tables so as to
868 * make it read-only.
869 *
870 * @returns VBox status code.
871 * @param pVCpu VMCPU handle.
872 * @param GCPtr Virtual address of the first page in the range.
873 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
874 */
875VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
876{
877 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
878}
879
880
881/**
882 * Changing the page flags for a single page in the shadow page tables so as to
883 * make it writable.
884 *
885 * The call must know with 101% certainty that the guest page tables maps this
886 * as writable too. This function will deal shared, zero and write monitored
887 * pages.
888 *
889 * @returns VBox status code.
890 * @param pVCpu VMCPU handle.
891 * @param GCPtr Virtual address of the first page in the range.
892 * @param fMmio2 Set if it is an MMIO2 page.
893 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
894 */
895VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
896{
897 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
898}
899
900
901/**
902 * Changing the page flags for a single page in the shadow page tables so as to
903 * make it not present.
904 *
905 * @returns VBox status code.
906 * @param pVCpu VMCPU handle.
907 * @param GCPtr Virtual address of the first page in the range.
908 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
909 */
910VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
911{
912 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
913}
914
915
916/**
917 * Gets the shadow page directory for the specified address, PAE.
918 *
919 * @returns Pointer to the shadow PD.
920 * @param pVCpu The VMCPU handle.
921 * @param GCPtr The address.
922 * @param uGstPdpe Guest PDPT entry. Valid.
923 * @param ppPD Receives address of page directory
924 */
925int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
926{
927 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
928 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
929 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
930 PVM pVM = pVCpu->CTX_SUFF(pVM);
931 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
932 PPGMPOOLPAGE pShwPage;
933 int rc;
934
935 PGM_LOCK_ASSERT_OWNER(pVM);
936
937 /* Allocate page directory if not present. */
938 if ( !pPdpe->n.u1Present
939 && !(pPdpe->u & X86_PDPE_PG_MASK))
940 {
941 RTGCPTR64 GCPdPt;
942 PGMPOOLKIND enmKind;
943
944 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
945 {
946 /* AMD-V nested paging or real/protected mode without paging. */
947 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
948 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
949 }
950 else
951 {
952 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
953 {
954 if (!(uGstPdpe & X86_PDPE_P))
955 {
956 /* PD not present; guest must reload CR3 to change it.
957 * No need to monitor anything in this case.
958 */
959 Assert(!HWACCMIsEnabled(pVM));
960
961 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
962 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
963 uGstPdpe |= X86_PDPE_P;
964 }
965 else
966 {
967 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
968 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
969 }
970 }
971 else
972 {
973 GCPdPt = CPUMGetGuestCR3(pVCpu);
974 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
975 }
976 }
977
978 /* Create a reference back to the PDPT by using the index in its shadow page. */
979 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
980 AssertRCReturn(rc, rc);
981
982 /* The PD was cached or created; hook it up now. */
983 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
984
985# if defined(IN_RC)
986 /*
987 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
988 * PDPT entry; the CPU fetches them only during cr3 load, so any
989 * non-present PDPT will continue to cause page faults.
990 */
991 ASMReloadCR3();
992# endif
993 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
994 }
995 else
996 {
997 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
998 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
999 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1000
1001 pgmPoolCacheUsed(pPool, pShwPage);
1002 }
1003 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1004 return VINF_SUCCESS;
1005}
1006
1007
1008/**
1009 * Gets the pointer to the shadow page directory entry for an address, PAE.
1010 *
1011 * @returns Pointer to the PDE.
1012 * @param pVCpu The current CPU.
1013 * @param GCPtr The address.
1014 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1015 */
1016DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1017{
1018 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1019 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1020 PVM pVM = pVCpu->CTX_SUFF(pVM);
1021
1022 PGM_LOCK_ASSERT_OWNER(pVM);
1023
1024 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1025 if (!pPdpt->a[iPdPt].n.u1Present)
1026 {
1027 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1028 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1029 }
1030 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1031
1032 /* Fetch the pgm pool shadow descriptor. */
1033 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1034 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
1035
1036 *ppShwPde = pShwPde;
1037 return VINF_SUCCESS;
1038}
1039
1040#ifndef IN_RC
1041
1042/**
1043 * Syncs the SHADOW page directory pointer for the specified address.
1044 *
1045 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1046 *
1047 * The caller is responsible for making sure the guest has a valid PD before
1048 * calling this function.
1049 *
1050 * @returns VBox status.
1051 * @param pVCpu VMCPU handle.
1052 * @param GCPtr The address.
1053 * @param uGstPml4e Guest PML4 entry (valid).
1054 * @param uGstPdpe Guest PDPT entry (valid).
1055 * @param ppPD Receives address of page directory
1056 */
1057static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1058{
1059 PPGMCPU pPGM = &pVCpu->pgm.s;
1060 PVM pVM = pVCpu->CTX_SUFF(pVM);
1061 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1062 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1063 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1064 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1065 PPGMPOOLPAGE pShwPage;
1066 int rc;
1067
1068 PGM_LOCK_ASSERT_OWNER(pVM);
1069
1070 /* Allocate page directory pointer table if not present. */
1071 if ( !pPml4e->n.u1Present
1072 && !(pPml4e->u & X86_PML4E_PG_MASK))
1073 {
1074 RTGCPTR64 GCPml4;
1075 PGMPOOLKIND enmKind;
1076
1077 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1078
1079 if (fNestedPagingOrNoGstPaging)
1080 {
1081 /* AMD-V nested paging or real/protected mode without paging */
1082 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1083 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1084 }
1085 else
1086 {
1087 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1088 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1089 }
1090
1091 /* Create a reference back to the PDPT by using the index in its shadow page. */
1092 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1093 AssertRCReturn(rc, rc);
1094 }
1095 else
1096 {
1097 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1098 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1099
1100 pgmPoolCacheUsed(pPool, pShwPage);
1101 }
1102 /* The PDPT was cached or created; hook it up now. */
1103 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
1104
1105 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1106 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1107 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1108
1109 /* Allocate page directory if not present. */
1110 if ( !pPdpe->n.u1Present
1111 && !(pPdpe->u & X86_PDPE_PG_MASK))
1112 {
1113 RTGCPTR64 GCPdPt;
1114 PGMPOOLKIND enmKind;
1115
1116 if (fNestedPagingOrNoGstPaging)
1117 {
1118 /* AMD-V nested paging or real/protected mode without paging */
1119 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1120 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1121 }
1122 else
1123 {
1124 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1125 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1126 }
1127
1128 /* Create a reference back to the PDPT by using the index in its shadow page. */
1129 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1130 AssertRCReturn(rc, rc);
1131 }
1132 else
1133 {
1134 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1135 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1136
1137 pgmPoolCacheUsed(pPool, pShwPage);
1138 }
1139 /* The PD was cached or created; hook it up now. */
1140 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
1141
1142 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1143 return VINF_SUCCESS;
1144}
1145
1146
1147/**
1148 * Gets the SHADOW page directory pointer for the specified address (long mode).
1149 *
1150 * @returns VBox status.
1151 * @param pVCpu VMCPU handle.
1152 * @param GCPtr The address.
1153 * @param ppPdpt Receives address of pdpt
1154 * @param ppPD Receives address of page directory
1155 */
1156DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1157{
1158 PPGMCPU pPGM = &pVCpu->pgm.s;
1159 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1160 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1161
1162 PGM_LOCK_ASSERT_OWNER(PGMCPU2VM(pPGM));
1163
1164 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1165 if (ppPml4e)
1166 *ppPml4e = (PX86PML4E)pPml4e;
1167
1168 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1169
1170 if (!pPml4e->n.u1Present)
1171 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1172
1173 PVM pVM = pVCpu->CTX_SUFF(pVM);
1174 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1175 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1176 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1177
1178 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1179 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1180 if (!pPdpt->a[iPdPt].n.u1Present)
1181 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1182
1183 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1184 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1185
1186 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1187 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1188 return VINF_SUCCESS;
1189}
1190
1191
1192/**
1193 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1194 * backing pages in case the PDPT or PML4 entry is missing.
1195 *
1196 * @returns VBox status.
1197 * @param pVCpu VMCPU handle.
1198 * @param GCPtr The address.
1199 * @param ppPdpt Receives address of pdpt
1200 * @param ppPD Receives address of page directory
1201 */
1202static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1203{
1204 PVM pVM = pVCpu->CTX_SUFF(pVM);
1205 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1206 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1207 PEPTPML4 pPml4;
1208 PEPTPML4E pPml4e;
1209 PPGMPOOLPAGE pShwPage;
1210 int rc;
1211
1212 Assert(pVM->pgm.s.fNestedPaging);
1213 PGM_LOCK_ASSERT_OWNER(pVM);
1214
1215 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1216 Assert(pPml4);
1217
1218 /* Allocate page directory pointer table if not present. */
1219 pPml4e = &pPml4->a[iPml4];
1220 if ( !pPml4e->n.u1Present
1221 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1222 {
1223 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1224 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1225
1226 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1227 AssertRCReturn(rc, rc);
1228 }
1229 else
1230 {
1231 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1232 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1233
1234 pgmPoolCacheUsed(pPool, pShwPage);
1235 }
1236 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1237 pPml4e->u = pShwPage->Core.Key;
1238 pPml4e->n.u1Present = 1;
1239 pPml4e->n.u1Write = 1;
1240 pPml4e->n.u1Execute = 1;
1241
1242 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1243 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1244 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1245
1246 if (ppPdpt)
1247 *ppPdpt = pPdpt;
1248
1249 /* Allocate page directory if not present. */
1250 if ( !pPdpe->n.u1Present
1251 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1252 {
1253 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1254
1255 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1256 AssertRCReturn(rc, rc);
1257 }
1258 else
1259 {
1260 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1261 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1262
1263 pgmPoolCacheUsed(pPool, pShwPage);
1264 }
1265 /* The PD was cached or created; hook it up now and fill with the default value. */
1266 pPdpe->u = pShwPage->Core.Key;
1267 pPdpe->n.u1Present = 1;
1268 pPdpe->n.u1Write = 1;
1269 pPdpe->n.u1Execute = 1;
1270
1271 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1272 return VINF_SUCCESS;
1273}
1274
1275#endif /* IN_RC */
1276
1277#ifdef IN_RING0
1278/**
1279 * Synchronizes a range of nested page table entries.
1280 *
1281 * The caller must own the PGM lock.
1282 *
1283 * @param pVCpu The current CPU.
1284 * @param GCPhys Where to start.
1285 * @param cPages How many pages which entries should be synced.
1286 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1287 * host paging mode for AMD-V).
1288 */
1289int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhysFault, uint32_t cPages, PGMMODE enmShwPagingMode)
1290{
1291 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1292
1293 int rc;
1294 switch (enmShwPagingMode)
1295 {
1296 case PGMMODE_32_BIT:
1297 {
1298 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1299 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1300 break;
1301 }
1302
1303 case PGMMODE_PAE:
1304 case PGMMODE_PAE_NX:
1305 {
1306 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1307 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1308 break;
1309 }
1310
1311 case PGMMODE_AMD64:
1312 case PGMMODE_AMD64_NX:
1313 {
1314 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1315 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1316 break;
1317 }
1318
1319 case PGMMODE_EPT:
1320 {
1321 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1322 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1323 break;
1324 }
1325
1326 default:
1327 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_INTERNAL_ERROR_5);
1328 }
1329 return rc;
1330}
1331#endif /* IN_RING0 */
1332
1333
1334/**
1335 * Gets effective Guest OS page information.
1336 *
1337 * When GCPtr is in a big page, the function will return as if it was a normal
1338 * 4KB page. If the need for distinguishing between big and normal page becomes
1339 * necessary at a later point, a PGMGstGetPage() will be created for that
1340 * purpose.
1341 *
1342 * @returns VBox status.
1343 * @param pVCpu The current CPU.
1344 * @param GCPtr Guest Context virtual address of the page.
1345 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1346 * @param pGCPhys Where to store the GC physical address of the page.
1347 * This is page aligned. The fact that the
1348 */
1349VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1350{
1351 VMCPU_ASSERT_EMT(pVCpu);
1352 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1353}
1354
1355
1356/**
1357 * Checks if the page is present.
1358 *
1359 * @returns true if the page is present.
1360 * @returns false if the page is not present.
1361 * @param pVCpu VMCPU handle.
1362 * @param GCPtr Address within the page.
1363 */
1364VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1365{
1366 VMCPU_ASSERT_EMT(pVCpu);
1367 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1368 return RT_SUCCESS(rc);
1369}
1370
1371
1372/**
1373 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1374 *
1375 * @returns VBox status.
1376 * @param pVCpu VMCPU handle.
1377 * @param GCPtr The address of the first page.
1378 * @param cb The size of the range in bytes.
1379 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1380 */
1381VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1382{
1383 VMCPU_ASSERT_EMT(pVCpu);
1384 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1385}
1386
1387
1388/**
1389 * Modify page flags for a range of pages in the guest's tables
1390 *
1391 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1392 *
1393 * @returns VBox status code.
1394 * @param pVCpu VMCPU handle.
1395 * @param GCPtr Virtual address of the first page in the range.
1396 * @param cb Size (in bytes) of the range to apply the modification to.
1397 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1398 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1399 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1400 */
1401VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1402{
1403 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1404 VMCPU_ASSERT_EMT(pVCpu);
1405
1406 /*
1407 * Validate input.
1408 */
1409 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1410 Assert(cb);
1411
1412 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1413
1414 /*
1415 * Adjust input.
1416 */
1417 cb += GCPtr & PAGE_OFFSET_MASK;
1418 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1419 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1420
1421 /*
1422 * Call worker.
1423 */
1424 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1425
1426 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1427 return rc;
1428}
1429
1430
1431#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1432
1433/**
1434 * Performs the lazy mapping of the 32-bit guest PD.
1435 *
1436 * @returns VBox status code.
1437 * @param pVCpu The current CPU.
1438 * @param ppPd Where to return the pointer to the mapping. This is
1439 * always set.
1440 */
1441int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
1442{
1443 PVM pVM = pVCpu->CTX_SUFF(pVM);
1444 pgmLock(pVM);
1445
1446 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
1447
1448 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1449 PPGMPAGE pPage;
1450 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1451 if (RT_SUCCESS(rc))
1452 {
1453 RTHCPTR HCPtrGuestCR3;
1454 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1455 if (RT_SUCCESS(rc))
1456 {
1457 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1458# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1459 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1460# endif
1461 *ppPd = (PX86PD)HCPtrGuestCR3;
1462
1463 pgmUnlock(pVM);
1464 return VINF_SUCCESS;
1465 }
1466
1467 AssertRC(rc);
1468 }
1469 pgmUnlock(pVM);
1470
1471 *ppPd = NULL;
1472 return rc;
1473}
1474
1475
1476/**
1477 * Performs the lazy mapping of the PAE guest PDPT.
1478 *
1479 * @returns VBox status code.
1480 * @param pVCpu The current CPU.
1481 * @param ppPdpt Where to return the pointer to the mapping. This is
1482 * always set.
1483 */
1484int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
1485{
1486 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1487 PVM pVM = pVCpu->CTX_SUFF(pVM);
1488 pgmLock(pVM);
1489
1490 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1491 PPGMPAGE pPage;
1492 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1493 if (RT_SUCCESS(rc))
1494 {
1495 RTHCPTR HCPtrGuestCR3;
1496 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1497 if (RT_SUCCESS(rc))
1498 {
1499 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1500# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1501 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1502# endif
1503 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
1504
1505 pgmUnlock(pVM);
1506 return VINF_SUCCESS;
1507 }
1508
1509 AssertRC(rc);
1510 }
1511
1512 pgmUnlock(pVM);
1513 *ppPdpt = NULL;
1514 return rc;
1515}
1516
1517
1518/**
1519 * Performs the lazy mapping / updating of a PAE guest PD.
1520 *
1521 * @returns Pointer to the mapping.
1522 * @returns VBox status code.
1523 * @param pVCpu The current CPU.
1524 * @param iPdpt Which PD entry to map (0..3).
1525 * @param ppPd Where to return the pointer to the mapping. This is
1526 * always set.
1527 */
1528int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
1529{
1530 PVM pVM = pVCpu->CTX_SUFF(pVM);
1531 pgmLock(pVM);
1532
1533 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
1534 Assert(pGuestPDPT);
1535 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1536 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1537 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
1538
1539 PPGMPAGE pPage;
1540 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1541 if (RT_SUCCESS(rc))
1542 {
1543 RTRCPTR RCPtr = NIL_RTRCPTR;
1544 RTHCPTR HCPtr = NIL_RTHCPTR;
1545#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1546 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &HCPtr);
1547 AssertRC(rc);
1548#endif
1549 if (RT_SUCCESS(rc) && fChanged)
1550 {
1551 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1552 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1553 }
1554 if (RT_SUCCESS(rc))
1555 {
1556 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1557# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1558 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1559# endif
1560 if (fChanged)
1561 {
1562 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1563 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1564 }
1565
1566 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
1567 pgmUnlock(pVM);
1568 return VINF_SUCCESS;
1569 }
1570 }
1571
1572 /* Invalid page or some failure, invalidate the entry. */
1573 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1574 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
1575# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1576 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
1577# endif
1578 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
1579
1580 pgmUnlock(pVM);
1581 return rc;
1582}
1583
1584#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1585#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1586/**
1587 * Performs the lazy mapping of the 32-bit guest PD.
1588 *
1589 * @returns VBox status code.
1590 * @param pVCpu The current CPU.
1591 * @param ppPml4 Where to return the pointer to the mapping. This will
1592 * always be set.
1593 */
1594int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
1595{
1596 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1597 PVM pVM = pVCpu->CTX_SUFF(pVM);
1598 pgmLock(pVM);
1599
1600 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1601 PPGMPAGE pPage;
1602 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1603 if (RT_SUCCESS(rc))
1604 {
1605 RTHCPTR HCPtrGuestCR3;
1606 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1607 if (RT_SUCCESS(rc))
1608 {
1609 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1610# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1611 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1612# endif
1613 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
1614
1615 pgmUnlock(pVM);
1616 return VINF_SUCCESS;
1617 }
1618 }
1619
1620 pgmUnlock(pVM);
1621 *ppPml4 = NULL;
1622 return rc;
1623}
1624#endif
1625
1626/**
1627 * Gets the specified page directory pointer table entry.
1628 *
1629 * @returns PDP entry
1630 * @param pVCpu VMCPU handle.
1631 * @param iPdpt PDPT index
1632 */
1633VMMDECL(int) PGMGstQueryPaePDPtr(PVMCPU pVCpu, unsigned iPdpt, PX86PDPE pPdpe)
1634{
1635 Assert(iPdpt <= 3);
1636 PX86PDPT pPdpt;
1637 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pPdpt);
1638 if (RT_SUCCESS(rc))
1639 *pPdpe = pPdpt->a[iPdpt & 3];
1640 return rc;
1641}
1642
1643
1644/**
1645 * Gets the current CR3 register value for the shadow memory context.
1646 * @returns CR3 value.
1647 * @param pVCpu VMCPU handle.
1648 */
1649VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1650{
1651 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1652 AssertPtrReturn(pPoolPage, 0);
1653 return pPoolPage->Core.Key;
1654}
1655
1656
1657/**
1658 * Gets the current CR3 register value for the nested memory context.
1659 * @returns CR3 value.
1660 * @param pVCpu VMCPU handle.
1661 */
1662VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1663{
1664 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1665 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1666}
1667
1668
1669/**
1670 * Gets the current CR3 register value for the HC intermediate memory context.
1671 * @returns CR3 value.
1672 * @param pVM The VM handle.
1673 */
1674VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1675{
1676 switch (pVM->pgm.s.enmHostMode)
1677 {
1678 case SUPPAGINGMODE_32_BIT:
1679 case SUPPAGINGMODE_32_BIT_GLOBAL:
1680 return pVM->pgm.s.HCPhysInterPD;
1681
1682 case SUPPAGINGMODE_PAE:
1683 case SUPPAGINGMODE_PAE_GLOBAL:
1684 case SUPPAGINGMODE_PAE_NX:
1685 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1686 return pVM->pgm.s.HCPhysInterPaePDPT;
1687
1688 case SUPPAGINGMODE_AMD64:
1689 case SUPPAGINGMODE_AMD64_GLOBAL:
1690 case SUPPAGINGMODE_AMD64_NX:
1691 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1692 return pVM->pgm.s.HCPhysInterPaePDPT;
1693
1694 default:
1695 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1696 return ~0;
1697 }
1698}
1699
1700
1701/**
1702 * Gets the current CR3 register value for the RC intermediate memory context.
1703 * @returns CR3 value.
1704 * @param pVM The VM handle.
1705 * @param pVCpu VMCPU handle.
1706 */
1707VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1708{
1709 switch (pVCpu->pgm.s.enmShadowMode)
1710 {
1711 case PGMMODE_32_BIT:
1712 return pVM->pgm.s.HCPhysInterPD;
1713
1714 case PGMMODE_PAE:
1715 case PGMMODE_PAE_NX:
1716 return pVM->pgm.s.HCPhysInterPaePDPT;
1717
1718 case PGMMODE_AMD64:
1719 case PGMMODE_AMD64_NX:
1720 return pVM->pgm.s.HCPhysInterPaePML4;
1721
1722 case PGMMODE_EPT:
1723 case PGMMODE_NESTED:
1724 return 0; /* not relevant */
1725
1726 default:
1727 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1728 return ~0;
1729 }
1730}
1731
1732
1733/**
1734 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1735 * @returns CR3 value.
1736 * @param pVM The VM handle.
1737 */
1738VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1739{
1740 return pVM->pgm.s.HCPhysInterPD;
1741}
1742
1743
1744/**
1745 * Gets the CR3 register value for the PAE intermediate memory context.
1746 * @returns CR3 value.
1747 * @param pVM The VM handle.
1748 */
1749VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1750{
1751 return pVM->pgm.s.HCPhysInterPaePDPT;
1752}
1753
1754
1755/**
1756 * Gets the CR3 register value for the AMD64 intermediate memory context.
1757 * @returns CR3 value.
1758 * @param pVM The VM handle.
1759 */
1760VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1761{
1762 return pVM->pgm.s.HCPhysInterPaePML4;
1763}
1764
1765
1766/**
1767 * Performs and schedules necessary updates following a CR3 load or reload.
1768 *
1769 * This will normally involve mapping the guest PD or nPDPT
1770 *
1771 * @returns VBox status code.
1772 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1773 * safely be ignored and overridden since the FF will be set too then.
1774 * @param pVCpu VMCPU handle.
1775 * @param cr3 The new cr3.
1776 * @param fGlobal Indicates whether this is a global flush or not.
1777 */
1778VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1779{
1780 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1781 PVM pVM = pVCpu->CTX_SUFF(pVM);
1782
1783 VMCPU_ASSERT_EMT(pVCpu);
1784
1785 /*
1786 * Always flag the necessary updates; necessary for hardware acceleration
1787 */
1788 /** @todo optimize this, it shouldn't always be necessary. */
1789 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1790 if (fGlobal)
1791 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1792 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1793
1794 /*
1795 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1796 */
1797 int rc = VINF_SUCCESS;
1798 RTGCPHYS GCPhysCR3;
1799 switch (pVCpu->pgm.s.enmGuestMode)
1800 {
1801 case PGMMODE_PAE:
1802 case PGMMODE_PAE_NX:
1803 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1804 break;
1805 case PGMMODE_AMD64:
1806 case PGMMODE_AMD64_NX:
1807 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1808 break;
1809 default:
1810 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1811 break;
1812 }
1813
1814 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1815 {
1816 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1817 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1818 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1819 if (RT_LIKELY(rc == VINF_SUCCESS))
1820 {
1821 if (pgmMapAreMappingsFloating(pVM))
1822 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1823 }
1824 else
1825 {
1826 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1827 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1828 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1829 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1830 if (pgmMapAreMappingsFloating(pVM))
1831 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1832 }
1833
1834 if (fGlobal)
1835 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1836 else
1837 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
1838 }
1839 else
1840 {
1841# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1842 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1843 if (pPool->cDirtyPages)
1844 {
1845 pgmLock(pVM);
1846 pgmPoolResetDirtyPages(pVM);
1847 pgmUnlock(pVM);
1848 }
1849# endif
1850 /*
1851 * Check if we have a pending update of the CR3 monitoring.
1852 */
1853 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1854 {
1855 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1856 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
1857 }
1858 if (fGlobal)
1859 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1860 else
1861 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
1862 }
1863
1864 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1865 return rc;
1866}
1867
1868
1869/**
1870 * Performs and schedules necessary updates following a CR3 load or reload when
1871 * using nested or extended paging.
1872 *
1873 * This API is an alternative to PDMFlushTLB that avoids actually flushing the
1874 * TLB and triggering a SyncCR3.
1875 *
1876 * This will normally involve mapping the guest PD or nPDPT
1877 *
1878 * @returns VBox status code.
1879 * @retval VINF_SUCCESS.
1880 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1881 * requires a CR3 sync. This can safely be ignored and overridden since
1882 * the FF will be set too then.)
1883 * @param pVCpu VMCPU handle.
1884 * @param cr3 The new cr3.
1885 */
1886VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1887{
1888 PVM pVM = pVCpu->CTX_SUFF(pVM);
1889
1890 VMCPU_ASSERT_EMT(pVCpu);
1891 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1892
1893 /* We assume we're only called in nested paging mode. */
1894 Assert(pVM->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1895 Assert(pVM->pgm.s.fMappingsDisabled);
1896 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1897
1898 /*
1899 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1900 */
1901 int rc = VINF_SUCCESS;
1902 RTGCPHYS GCPhysCR3;
1903 switch (pVCpu->pgm.s.enmGuestMode)
1904 {
1905 case PGMMODE_PAE:
1906 case PGMMODE_PAE_NX:
1907 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1908 break;
1909 case PGMMODE_AMD64:
1910 case PGMMODE_AMD64_NX:
1911 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1912 break;
1913 default:
1914 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1915 break;
1916 }
1917 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1918 {
1919 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1920 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1921 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1922 }
1923 return rc;
1924}
1925
1926
1927/**
1928 * Synchronize the paging structures.
1929 *
1930 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1931 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1932 * in several places, most importantly whenever the CR3 is loaded.
1933 *
1934 * @returns VBox status code.
1935 * @param pVCpu VMCPU handle.
1936 * @param cr0 Guest context CR0 register
1937 * @param cr3 Guest context CR3 register
1938 * @param cr4 Guest context CR4 register
1939 * @param fGlobal Including global page directories or not
1940 */
1941VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1942{
1943 PVM pVM = pVCpu->CTX_SUFF(pVM);
1944 int rc;
1945
1946 VMCPU_ASSERT_EMT(pVCpu);
1947
1948 /*
1949 * The pool may have pending stuff and even require a return to ring-3 to
1950 * clear the whole thing.
1951 */
1952 rc = pgmPoolSyncCR3(pVCpu);
1953 if (rc != VINF_SUCCESS)
1954 return rc;
1955
1956 /*
1957 * We might be called when we shouldn't.
1958 *
1959 * The mode switching will ensure that the PD is resynced
1960 * after every mode switch. So, if we find ourselves here
1961 * when in protected or real mode we can safely disable the
1962 * FF and return immediately.
1963 */
1964 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1965 {
1966 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1967 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
1968 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1969 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1970 return VINF_SUCCESS;
1971 }
1972
1973 /* If global pages are not supported, then all flushes are global. */
1974 if (!(cr4 & X86_CR4_PGE))
1975 fGlobal = true;
1976 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1977 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1978
1979 /*
1980 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1981 * This should be done before SyncCR3.
1982 */
1983 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1984 {
1985 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1986
1987 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
1988 RTGCPHYS GCPhysCR3;
1989 switch (pVCpu->pgm.s.enmGuestMode)
1990 {
1991 case PGMMODE_PAE:
1992 case PGMMODE_PAE_NX:
1993 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1994 break;
1995 case PGMMODE_AMD64:
1996 case PGMMODE_AMD64_NX:
1997 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1998 break;
1999 default:
2000 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2001 break;
2002 }
2003
2004 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2005 {
2006 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2007 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2008 }
2009
2010 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2011 if ( rc == VINF_PGM_SYNC_CR3
2012 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2013 {
2014 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2015#ifdef IN_RING3
2016 rc = pgmPoolSyncCR3(pVCpu);
2017#else
2018 if (rc == VINF_PGM_SYNC_CR3)
2019 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2020 return VINF_PGM_SYNC_CR3;
2021#endif
2022 }
2023 AssertRCReturn(rc, rc);
2024 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
2025 }
2026
2027 /*
2028 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2029 */
2030 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2031 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
2032 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2033 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2034 if (rc == VINF_SUCCESS)
2035 {
2036 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2037 {
2038 /* Go back to ring 3 if a pgm pool sync is again pending. */
2039 return VINF_PGM_SYNC_CR3;
2040 }
2041
2042 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2043 {
2044 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2045 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2046 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2047 }
2048
2049 /*
2050 * Check if we have a pending update of the CR3 monitoring.
2051 */
2052 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2053 {
2054 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2055 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
2056 }
2057 }
2058
2059 /*
2060 * Now flush the CR3 (guest context).
2061 */
2062 if (rc == VINF_SUCCESS)
2063 PGM_INVL_VCPU_TLBS(pVCpu);
2064 return rc;
2065}
2066
2067
2068/**
2069 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2070 *
2071 * @returns VBox status code, with the following informational code for
2072 * VM scheduling.
2073 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2074 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2075 * (I.e. not in R3.)
2076 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2077 *
2078 * @param pVCpu VMCPU handle.
2079 * @param cr0 The new cr0.
2080 * @param cr4 The new cr4.
2081 * @param efer The new extended feature enable register.
2082 */
2083VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2084{
2085 PVM pVM = pVCpu->CTX_SUFF(pVM);
2086 PGMMODE enmGuestMode;
2087
2088 VMCPU_ASSERT_EMT(pVCpu);
2089
2090 /*
2091 * Calc the new guest mode.
2092 */
2093 if (!(cr0 & X86_CR0_PE))
2094 enmGuestMode = PGMMODE_REAL;
2095 else if (!(cr0 & X86_CR0_PG))
2096 enmGuestMode = PGMMODE_PROTECTED;
2097 else if (!(cr4 & X86_CR4_PAE))
2098 {
2099 bool const fPse = !!(cr4 & X86_CR4_PSE);
2100 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2101 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2102 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2103 enmGuestMode = PGMMODE_32_BIT;
2104 }
2105 else if (!(efer & MSR_K6_EFER_LME))
2106 {
2107 if (!(efer & MSR_K6_EFER_NXE))
2108 enmGuestMode = PGMMODE_PAE;
2109 else
2110 enmGuestMode = PGMMODE_PAE_NX;
2111 }
2112 else
2113 {
2114 if (!(efer & MSR_K6_EFER_NXE))
2115 enmGuestMode = PGMMODE_AMD64;
2116 else
2117 enmGuestMode = PGMMODE_AMD64_NX;
2118 }
2119
2120 /*
2121 * Did it change?
2122 */
2123 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2124 return VINF_SUCCESS;
2125
2126 /* Flush the TLB */
2127 PGM_INVL_VCPU_TLBS(pVCpu);
2128
2129#ifdef IN_RING3
2130 return PGMR3ChangeMode(pVM, pVCpu, enmGuestMode);
2131#else
2132 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2133 return VINF_PGM_CHANGE_MODE;
2134#endif
2135}
2136
2137
2138/**
2139 * Gets the current guest paging mode.
2140 *
2141 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2142 *
2143 * @returns The current paging mode.
2144 * @param pVCpu VMCPU handle.
2145 */
2146VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2147{
2148 return pVCpu->pgm.s.enmGuestMode;
2149}
2150
2151
2152/**
2153 * Gets the current shadow paging mode.
2154 *
2155 * @returns The current paging mode.
2156 * @param pVCpu VMCPU handle.
2157 */
2158VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2159{
2160 return pVCpu->pgm.s.enmShadowMode;
2161}
2162
2163
2164/**
2165 * Gets the current host paging mode.
2166 *
2167 * @returns The current paging mode.
2168 * @param pVM The VM handle.
2169 */
2170VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2171{
2172 switch (pVM->pgm.s.enmHostMode)
2173 {
2174 case SUPPAGINGMODE_32_BIT:
2175 case SUPPAGINGMODE_32_BIT_GLOBAL:
2176 return PGMMODE_32_BIT;
2177
2178 case SUPPAGINGMODE_PAE:
2179 case SUPPAGINGMODE_PAE_GLOBAL:
2180 return PGMMODE_PAE;
2181
2182 case SUPPAGINGMODE_PAE_NX:
2183 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2184 return PGMMODE_PAE_NX;
2185
2186 case SUPPAGINGMODE_AMD64:
2187 case SUPPAGINGMODE_AMD64_GLOBAL:
2188 return PGMMODE_AMD64;
2189
2190 case SUPPAGINGMODE_AMD64_NX:
2191 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2192 return PGMMODE_AMD64_NX;
2193
2194 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2195 }
2196
2197 return PGMMODE_INVALID;
2198}
2199
2200
2201/**
2202 * Get mode name.
2203 *
2204 * @returns read-only name string.
2205 * @param enmMode The mode which name is desired.
2206 */
2207VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2208{
2209 switch (enmMode)
2210 {
2211 case PGMMODE_REAL: return "Real";
2212 case PGMMODE_PROTECTED: return "Protected";
2213 case PGMMODE_32_BIT: return "32-bit";
2214 case PGMMODE_PAE: return "PAE";
2215 case PGMMODE_PAE_NX: return "PAE+NX";
2216 case PGMMODE_AMD64: return "AMD64";
2217 case PGMMODE_AMD64_NX: return "AMD64+NX";
2218 case PGMMODE_NESTED: return "Nested";
2219 case PGMMODE_EPT: return "EPT";
2220 default: return "unknown mode value";
2221 }
2222}
2223
2224
2225
2226/**
2227 * Notification from CPUM that the EFER.NXE bit has changed.
2228 *
2229 * @param pVCpu The virtual CPU for which EFER changed.
2230 * @param fNxe The new NXE state.
2231 */
2232VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2233{
2234/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
2235 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
2236
2237 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
2238 if (fNxe)
2239 {
2240 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2241 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2242 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2243 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2244 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2245 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2246 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2247 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2248 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2249 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2250 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
2251
2252 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
2253 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
2254 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
2255 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
2256 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
2257 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
2258 }
2259 else
2260 {
2261 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2262 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2263 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2264 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
2265 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
2266 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2267 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2268 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2269 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2270 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2271 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
2272
2273 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
2274 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
2275 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
2276 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
2277 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
2278 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
2279 }
2280}
2281
2282
2283/**
2284 * Check if any pgm pool pages are marked dirty (not monitored)
2285 *
2286 * @returns bool locked/not locked
2287 * @param pVM The VM to operate on.
2288 */
2289VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2290{
2291 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2292}
2293
2294
2295/**
2296 * Check if this VCPU currently owns the PGM lock.
2297 *
2298 * @returns bool owner/not owner
2299 * @param pVM The VM to operate on.
2300 */
2301VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2302{
2303 return PDMCritSectIsOwner(&pVM->pgm.s.CritSect);
2304}
2305
2306
2307/**
2308 * Enable or disable large page usage
2309 *
2310 * @returns VBox status code.
2311 * @param pVM The VM to operate on.
2312 * @param fUseLargePages Use/not use large pages
2313 */
2314VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2315{
2316 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2317
2318 pVM->fUseLargePages = fUseLargePages;
2319 return VINF_SUCCESS;
2320}
2321
2322
2323/**
2324 * Acquire the PGM lock.
2325 *
2326 * @returns VBox status code
2327 * @param pVM The VM to operate on.
2328 */
2329int pgmLock(PVM pVM)
2330{
2331 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2332#if defined(IN_RC) || defined(IN_RING0)
2333 if (rc == VERR_SEM_BUSY)
2334 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2335#endif
2336 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2337 return rc;
2338}
2339
2340
2341/**
2342 * Release the PGM lock.
2343 *
2344 * @returns VBox status code
2345 * @param pVM The VM to operate on.
2346 */
2347void pgmUnlock(PVM pVM)
2348{
2349 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2350}
2351
2352#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2353
2354/**
2355 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
2356 *
2357 * @returns VBox status code.
2358 * @param pVM The VM handle.
2359 * @param pVCpu The current CPU.
2360 * @param GCPhys The guest physical address of the page to map. The
2361 * offset bits are not ignored.
2362 * @param ppv Where to return the address corresponding to @a GCPhys.
2363 */
2364int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
2365{
2366 pgmLock(pVM);
2367
2368 /*
2369 * Convert it to a writable page and it on to the dynamic mapper.
2370 */
2371 int rc;
2372 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2373 if (RT_LIKELY(pPage))
2374 {
2375 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2376 if (RT_SUCCESS(rc))
2377 {
2378 void *pv;
2379 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
2380 if (RT_SUCCESS(rc))
2381 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
2382 }
2383 else
2384 AssertRC(rc);
2385 }
2386 else
2387 {
2388 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2389 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2390 }
2391
2392 pgmUnlock(pVM);
2393 return rc;
2394}
2395
2396#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2397#if !defined(IN_R0) || defined(LOG_ENABLED)
2398
2399/** Format handler for PGMPAGE.
2400 * @copydoc FNRTSTRFORMATTYPE */
2401static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2402 const char *pszType, void const *pvValue,
2403 int cchWidth, int cchPrecision, unsigned fFlags,
2404 void *pvUser)
2405{
2406 size_t cch;
2407 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2408 if (VALID_PTR(pPage))
2409 {
2410 char szTmp[64+80];
2411
2412 cch = 0;
2413
2414 /* The single char state stuff. */
2415 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2416 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
2417
2418#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2419 if (IS_PART_INCLUDED(5))
2420 {
2421 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2422 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2423 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2424 }
2425
2426 /* The type. */
2427 if (IS_PART_INCLUDED(4))
2428 {
2429 szTmp[cch++] = ':';
2430 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2431 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
2432 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
2433 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
2434 }
2435
2436 /* The numbers. */
2437 if (IS_PART_INCLUDED(3))
2438 {
2439 szTmp[cch++] = ':';
2440 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2441 }
2442
2443 if (IS_PART_INCLUDED(2))
2444 {
2445 szTmp[cch++] = ':';
2446 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2447 }
2448
2449 if (IS_PART_INCLUDED(6))
2450 {
2451 szTmp[cch++] = ':';
2452 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2453 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
2454 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2455 }
2456#undef IS_PART_INCLUDED
2457
2458 cch = pfnOutput(pvArgOutput, szTmp, cch);
2459 }
2460 else
2461 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2462 return cch;
2463}
2464
2465
2466/** Format handler for PGMRAMRANGE.
2467 * @copydoc FNRTSTRFORMATTYPE */
2468static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2469 const char *pszType, void const *pvValue,
2470 int cchWidth, int cchPrecision, unsigned fFlags,
2471 void *pvUser)
2472{
2473 size_t cch;
2474 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2475 if (VALID_PTR(pRam))
2476 {
2477 char szTmp[80];
2478 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2479 cch = pfnOutput(pvArgOutput, szTmp, cch);
2480 }
2481 else
2482 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2483 return cch;
2484}
2485
2486/** Format type andlers to be registered/deregistered. */
2487static const struct
2488{
2489 char szType[24];
2490 PFNRTSTRFORMATTYPE pfnHandler;
2491} g_aPgmFormatTypes[] =
2492{
2493 { "pgmpage", pgmFormatTypeHandlerPage },
2494 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2495};
2496
2497#endif /* !IN_R0 || LOG_ENABLED */
2498
2499/**
2500 * Registers the global string format types.
2501 *
2502 * This should be called at module load time or in some other manner that ensure
2503 * that it's called exactly one time.
2504 *
2505 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2506 */
2507VMMDECL(int) PGMRegisterStringFormatTypes(void)
2508{
2509#if !defined(IN_R0) || defined(LOG_ENABLED)
2510 int rc = VINF_SUCCESS;
2511 unsigned i;
2512 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2513 {
2514 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2515# ifdef IN_RING0
2516 if (rc == VERR_ALREADY_EXISTS)
2517 {
2518 /* in case of cleanup failure in ring-0 */
2519 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2520 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2521 }
2522# endif
2523 }
2524 if (RT_FAILURE(rc))
2525 while (i-- > 0)
2526 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2527
2528 return rc;
2529#else
2530 return VINF_SUCCESS;
2531#endif
2532}
2533
2534
2535/**
2536 * Deregisters the global string format types.
2537 *
2538 * This should be called at module unload time or in some other manner that
2539 * ensure that it's called exactly one time.
2540 */
2541VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2542{
2543#if !defined(IN_R0) || defined(LOG_ENABLED)
2544 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2545 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2546#endif
2547}
2548
2549#ifdef VBOX_STRICT
2550
2551/**
2552 * Asserts that there are no mapping conflicts.
2553 *
2554 * @returns Number of conflicts.
2555 * @param pVM The VM Handle.
2556 */
2557VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2558{
2559 unsigned cErrors = 0;
2560
2561 /* Only applies to raw mode -> 1 VPCU */
2562 Assert(pVM->cCpus == 1);
2563 PVMCPU pVCpu = &pVM->aCpus[0];
2564
2565 /*
2566 * Check for mapping conflicts.
2567 */
2568 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2569 pMapping;
2570 pMapping = pMapping->CTX_SUFF(pNext))
2571 {
2572 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2573 for (RTGCPTR GCPtr = pMapping->GCPtr;
2574 GCPtr <= pMapping->GCPtrLast;
2575 GCPtr += PAGE_SIZE)
2576 {
2577 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2578 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2579 {
2580 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2581 cErrors++;
2582 break;
2583 }
2584 }
2585 }
2586
2587 return cErrors;
2588}
2589
2590
2591/**
2592 * Asserts that everything related to the guest CR3 is correctly shadowed.
2593 *
2594 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2595 * and assert the correctness of the guest CR3 mapping before asserting that the
2596 * shadow page tables is in sync with the guest page tables.
2597 *
2598 * @returns Number of conflicts.
2599 * @param pVM The VM Handle.
2600 * @param pVCpu VMCPU handle.
2601 * @param cr3 The current guest CR3 register value.
2602 * @param cr4 The current guest CR4 register value.
2603 */
2604VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2605{
2606 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2607 pgmLock(pVM);
2608 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2609 pgmUnlock(pVM);
2610 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2611 return cErrors;
2612}
2613
2614#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette