VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 11146

Last change on this file since 11146 was 10824, checked in by vboxsync, 16 years ago

Naming

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 54.5 KB
Line 
1/* $Id: PGMAll.cpp 10824 2008-07-23 09:05:42Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include "PGMInternal.h"
40#include <VBox/vm.h>
41#include <iprt/assert.h>
42#include <iprt/asm.h>
43#include <iprt/string.h>
44#include <VBox/log.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/**
53 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
54 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
55 */
56typedef struct PGMHVUSTATE
57{
58 /** The VM handle. */
59 PVM pVM;
60 /** The todo flags. */
61 RTUINT fTodo;
62 /** The CR4 register value. */
63 uint32_t cr4;
64} PGMHVUSTATE, *PPGMHVUSTATE;
65
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70
71/*
72 * Shadow - 32-bit mode
73 */
74#define PGM_SHW_TYPE PGM_TYPE_32BIT
75#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
76#include "PGMAllShw.h"
77
78/* Guest - real mode */
79#define PGM_GST_TYPE PGM_TYPE_REAL
80#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
81#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
82#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
83#include "PGMAllGst.h"
84#include "PGMAllBth.h"
85#undef BTH_PGMPOOLKIND_PT_FOR_PT
86#undef PGM_BTH_NAME
87#undef PGM_GST_TYPE
88#undef PGM_GST_NAME
89
90/* Guest - protected mode */
91#define PGM_GST_TYPE PGM_TYPE_PROT
92#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
93#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
94#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
95#include "PGMAllGst.h"
96#include "PGMAllBth.h"
97#undef BTH_PGMPOOLKIND_PT_FOR_PT
98#undef PGM_BTH_NAME
99#undef PGM_GST_TYPE
100#undef PGM_GST_NAME
101
102/* Guest - 32-bit mode */
103#define PGM_GST_TYPE PGM_TYPE_32BIT
104#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
105#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
106#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
107#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
108#include "PGMAllGst.h"
109#include "PGMAllBth.h"
110#undef BTH_PGMPOOLKIND_PT_FOR_BIG
111#undef BTH_PGMPOOLKIND_PT_FOR_PT
112#undef PGM_BTH_NAME
113#undef PGM_GST_TYPE
114#undef PGM_GST_NAME
115
116#undef PGM_SHW_TYPE
117#undef PGM_SHW_NAME
118
119
120/*
121 * Shadow - PAE mode
122 */
123#define PGM_SHW_TYPE PGM_TYPE_PAE
124#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
125#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
126#include "PGMAllShw.h"
127
128/* Guest - real mode */
129#define PGM_GST_TYPE PGM_TYPE_REAL
130#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
131#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
132#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
133#include "PGMAllBth.h"
134#undef BTH_PGMPOOLKIND_PT_FOR_PT
135#undef PGM_BTH_NAME
136#undef PGM_GST_TYPE
137#undef PGM_GST_NAME
138
139/* Guest - protected mode */
140#define PGM_GST_TYPE PGM_TYPE_PROT
141#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
142#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
143#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
144#include "PGMAllBth.h"
145#undef BTH_PGMPOOLKIND_PT_FOR_PT
146#undef PGM_BTH_NAME
147#undef PGM_GST_TYPE
148#undef PGM_GST_NAME
149
150/* Guest - 32-bit mode */
151#define PGM_GST_TYPE PGM_TYPE_32BIT
152#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
153#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
154#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
155#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
156#include "PGMAllBth.h"
157#undef BTH_PGMPOOLKIND_PT_FOR_BIG
158#undef BTH_PGMPOOLKIND_PT_FOR_PT
159#undef PGM_BTH_NAME
160#undef PGM_GST_TYPE
161#undef PGM_GST_NAME
162
163
164/* Guest - PAE mode */
165#define PGM_GST_TYPE PGM_TYPE_PAE
166#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
167#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
168#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
169#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
170#include "PGMAllGst.h"
171#include "PGMAllBth.h"
172#undef BTH_PGMPOOLKIND_PT_FOR_BIG
173#undef BTH_PGMPOOLKIND_PT_FOR_PT
174#undef PGM_BTH_NAME
175#undef PGM_GST_TYPE
176#undef PGM_GST_NAME
177
178#undef PGM_SHW_TYPE
179#undef PGM_SHW_NAME
180
181
182#ifndef IN_GC /* AMD64 implies VT-x/AMD-V */
183/*
184 * Shadow - AMD64 mode
185 */
186#define PGM_SHW_TYPE PGM_TYPE_AMD64
187#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
188#include "PGMAllShw.h"
189
190/* Guest - protected mode */
191#define PGM_GST_TYPE PGM_TYPE_PROT
192#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
193#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
194#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
195#include "PGMAllBth.h"
196#undef BTH_PGMPOOLKIND_PT_FOR_PT
197#undef PGM_BTH_NAME
198#undef PGM_GST_TYPE
199#undef PGM_GST_NAME
200
201/* Guest - AMD64 mode */
202#define PGM_GST_TYPE PGM_TYPE_AMD64
203#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
204#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
205#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
206#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
207#include "PGMAllGst.h"
208#include "PGMAllBth.h"
209#undef BTH_PGMPOOLKIND_PT_FOR_BIG
210#undef BTH_PGMPOOLKIND_PT_FOR_PT
211#undef PGM_BTH_NAME
212#undef PGM_GST_TYPE
213#undef PGM_GST_NAME
214
215#undef PGM_SHW_TYPE
216#undef PGM_SHW_NAME
217
218/*
219 * Shadow - Nested paging mode
220 */
221#define PGM_SHW_TYPE PGM_TYPE_NESTED
222#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
223#include "PGMAllShw.h"
224
225/* Guest - real mode */
226#define PGM_GST_TYPE PGM_TYPE_REAL
227#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
228#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
229#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
230#include "PGMAllBth.h"
231#undef BTH_PGMPOOLKIND_PT_FOR_PT
232#undef PGM_BTH_NAME
233#undef PGM_GST_TYPE
234#undef PGM_GST_NAME
235
236/* Guest - protected mode */
237#define PGM_GST_TYPE PGM_TYPE_PROT
238#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
239#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
240#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
241#include "PGMAllBth.h"
242#undef BTH_PGMPOOLKIND_PT_FOR_PT
243#undef PGM_BTH_NAME
244#undef PGM_GST_TYPE
245#undef PGM_GST_NAME
246
247/* Guest - 32-bit mode */
248#define PGM_GST_TYPE PGM_TYPE_32BIT
249#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
250#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
251#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
252#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
253#include "PGMAllBth.h"
254#undef BTH_PGMPOOLKIND_PT_FOR_BIG
255#undef BTH_PGMPOOLKIND_PT_FOR_PT
256#undef PGM_BTH_NAME
257#undef PGM_GST_TYPE
258#undef PGM_GST_NAME
259
260/* Guest - PAE mode */
261#define PGM_GST_TYPE PGM_TYPE_PAE
262#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
263#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
264#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
265#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
266#include "PGMAllBth.h"
267#undef BTH_PGMPOOLKIND_PT_FOR_BIG
268#undef BTH_PGMPOOLKIND_PT_FOR_PT
269#undef PGM_BTH_NAME
270#undef PGM_GST_TYPE
271#undef PGM_GST_NAME
272
273/* Guest - AMD64 mode */
274#define PGM_GST_TYPE PGM_TYPE_AMD64
275#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
276#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
277#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
278#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
279#include "PGMAllBth.h"
280#undef BTH_PGMPOOLKIND_PT_FOR_BIG
281#undef BTH_PGMPOOLKIND_PT_FOR_PT
282#undef PGM_BTH_NAME
283#undef PGM_GST_TYPE
284#undef PGM_GST_NAME
285
286#undef PGM_SHW_TYPE
287#undef PGM_SHW_NAME
288
289
290#ifdef PGM_WITH_EPT
291/*
292 * Shadow - EPT
293 */
294#define PGM_SHW_TYPE PGM_TYPE_EPT
295#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
296#include "PGMAllShw.h"
297
298/* Guest - real mode */
299#define PGM_GST_TYPE PGM_TYPE_REAL
300#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
301#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
302#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
303#include "PGMAllBth.h"
304#undef BTH_PGMPOOLKIND_PT_FOR_PT
305#undef PGM_BTH_NAME
306#undef PGM_GST_TYPE
307#undef PGM_GST_NAME
308
309/* Guest - protected mode */
310#define PGM_GST_TYPE PGM_TYPE_PROT
311#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
312#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
313#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
314#include "PGMAllBth.h"
315#undef BTH_PGMPOOLKIND_PT_FOR_PT
316#undef PGM_BTH_NAME
317#undef PGM_GST_TYPE
318#undef PGM_GST_NAME
319
320/* Guest - 32-bit mode */
321#define PGM_GST_TYPE PGM_TYPE_32BIT
322#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
323#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
324#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
325#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
326#include "PGMAllBth.h"
327#undef BTH_PGMPOOLKIND_PT_FOR_BIG
328#undef BTH_PGMPOOLKIND_PT_FOR_PT
329#undef PGM_BTH_NAME
330#undef PGM_GST_TYPE
331#undef PGM_GST_NAME
332
333/* Guest - PAE mode */
334#define PGM_GST_TYPE PGM_TYPE_PAE
335#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
336#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
337#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
338#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
339#include "PGMAllBth.h"
340#undef BTH_PGMPOOLKIND_PT_FOR_BIG
341#undef BTH_PGMPOOLKIND_PT_FOR_PT
342#undef PGM_BTH_NAME
343#undef PGM_GST_TYPE
344#undef PGM_GST_NAME
345
346/* Guest - AMD64 mode */
347#define PGM_GST_TYPE PGM_TYPE_AMD64
348#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
349#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
350#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
351#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
352#include "PGMAllBth.h"
353#undef BTH_PGMPOOLKIND_PT_FOR_BIG
354#undef BTH_PGMPOOLKIND_PT_FOR_PT
355#undef PGM_BTH_NAME
356#undef PGM_GST_TYPE
357#undef PGM_GST_NAME
358
359#undef PGM_SHW_TYPE
360#undef PGM_SHW_NAME
361#endif /* PGM_WITH_EPT */
362
363#endif
364
365/**
366 * #PF Handler.
367 *
368 * @returns VBox status code (appropriate for trap handling and GC return).
369 * @param pVM VM Handle.
370 * @param uErr The trap error code.
371 * @param pRegFrame Trap register frame.
372 * @param pvFault The fault address.
373 */
374PGMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
375{
376 LogFlow(("PGMTrap0eHandler: uErr=%#x pvFault=%VGv eip=%VGv\n", (uint32_t)uErr, pvFault, pRegFrame->rip));
377 STAM_PROFILE_START(&pVM->pgm.s.StatGCTrap0e, a);
378 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = NULL; } );
379
380
381#ifdef VBOX_WITH_STATISTICS
382 /*
383 * Error code stats.
384 */
385 if (uErr & X86_TRAP_PF_US)
386 {
387 if (!(uErr & X86_TRAP_PF_P))
388 {
389 if (uErr & X86_TRAP_PF_RW)
390 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentWrite);
391 else
392 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentRead);
393 }
394 else if (uErr & X86_TRAP_PF_RW)
395 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSWrite);
396 else if (uErr & X86_TRAP_PF_RSVD)
397 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSReserved);
398 else if (uErr & X86_TRAP_PF_ID)
399 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNXE);
400 else
401 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSRead);
402 }
403 else
404 { /* Supervisor */
405 if (!(uErr & X86_TRAP_PF_P))
406 {
407 if (uErr & X86_TRAP_PF_RW)
408 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentWrite);
409 else
410 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentRead);
411 }
412 else if (uErr & X86_TRAP_PF_RW)
413 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVWrite);
414 else if (uErr & X86_TRAP_PF_ID)
415 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSNXE);
416 else if (uErr & X86_TRAP_PF_RSVD)
417 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVReserved);
418 }
419#endif
420
421 /*
422 * Call the worker.
423 */
424 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
425 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
426 rc = VINF_SUCCESS;
427 STAM_STATS({ if (!pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution))
428 pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eMisc; });
429 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatGCTrap0e, pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution), a);
430 return rc;
431}
432
433/**
434 * Prefetch a page
435 *
436 * Typically used to sync commonly used pages before entering raw mode
437 * after a CR3 reload.
438 *
439 * @returns VBox status code suitable for scheduling.
440 * @retval VINF_SUCCESS on success.
441 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
442 * @param pVM VM handle.
443 * @param GCPtrPage Page to invalidate.
444 */
445PGMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
446{
447 STAM_PROFILE_START(&pVM->pgm.s.StatHCPrefetch, a);
448 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, (RTGCUINTPTR)GCPtrPage);
449 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCPrefetch, a);
450 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%Vrc\n", rc));
451 return rc;
452}
453
454
455/**
456 * Gets the mapping corresponding to the specified address (if any).
457 *
458 * @returns Pointer to the mapping.
459 * @returns NULL if not
460 *
461 * @param pVM The virtual machine.
462 * @param GCPtr The guest context pointer.
463 */
464PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
465{
466 PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
467 while (pMapping)
468 {
469 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
470 break;
471 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
472 {
473 STAM_COUNTER_INC(&pVM->pgm.s.StatGCSyncPTConflict);
474 return pMapping;
475 }
476 pMapping = CTXALLSUFF(pMapping->pNext);
477 }
478 return NULL;
479}
480
481
482/**
483 * Verifies a range of pages for read or write access
484 *
485 * Only checks the guest's page tables
486 *
487 * @returns VBox status code.
488 * @param pVM VM handle.
489 * @param Addr Guest virtual address to check
490 * @param cbSize Access size
491 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
492 */
493PGMDECL(int) PGMIsValidAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
494{
495 /*
496 * Validate input.
497 */
498 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
499 {
500 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
501 return VERR_INVALID_PARAMETER;
502 }
503
504 uint64_t fPage;
505 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
506 if (VBOX_FAILURE(rc))
507 {
508 Log(("PGMIsValidAccess: access violation for %VGv rc=%d\n", Addr, rc));
509 return VINF_EM_RAW_GUEST_TRAP;
510 }
511
512 /*
513 * Check if the access would cause a page fault
514 *
515 * Note that hypervisor page directories are not present in the guest's tables, so this check
516 * is sufficient.
517 */
518 bool fWrite = !!(fAccess & X86_PTE_RW);
519 bool fUser = !!(fAccess & X86_PTE_US);
520 if ( !(fPage & X86_PTE_P)
521 || (fWrite && !(fPage & X86_PTE_RW))
522 || (fUser && !(fPage & X86_PTE_US)) )
523 {
524 Log(("PGMIsValidAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
525 return VINF_EM_RAW_GUEST_TRAP;
526 }
527 if ( VBOX_SUCCESS(rc)
528 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
529 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
530 return rc;
531}
532
533
534/**
535 * Verifies a range of pages for read or write access
536 *
537 * Supports handling of pages marked for dirty bit tracking and CSAM
538 *
539 * @returns VBox status code.
540 * @param pVM VM handle.
541 * @param Addr Guest virtual address to check
542 * @param cbSize Access size
543 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
544 */
545PGMDECL(int) PGMVerifyAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
546{
547 /*
548 * Validate input.
549 */
550 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
551 {
552 AssertMsgFailed(("PGMVerifyAccess: invalid access type %08x\n", fAccess));
553 return VERR_INVALID_PARAMETER;
554 }
555
556 uint64_t fPageGst;
557 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
558 if (VBOX_FAILURE(rc))
559 {
560 Log(("PGMVerifyAccess: access violation for %VGv rc=%d\n", Addr, rc));
561 return VINF_EM_RAW_GUEST_TRAP;
562 }
563
564 /*
565 * Check if the access would cause a page fault
566 *
567 * Note that hypervisor page directories are not present in the guest's tables, so this check
568 * is sufficient.
569 */
570 const bool fWrite = !!(fAccess & X86_PTE_RW);
571 const bool fUser = !!(fAccess & X86_PTE_US);
572 if ( !(fPageGst & X86_PTE_P)
573 || (fWrite && !(fPageGst & X86_PTE_RW))
574 || (fUser && !(fPageGst & X86_PTE_US)) )
575 {
576 Log(("PGMVerifyAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
577 return VINF_EM_RAW_GUEST_TRAP;
578 }
579
580 if (!HWACCMIsNestedPagingActive(pVM))
581 {
582 /*
583 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
584 */
585 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
586 if ( rc == VERR_PAGE_NOT_PRESENT
587 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
588 {
589 /*
590 * Page is not present in our page tables.
591 * Try to sync it!
592 */
593 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
594 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
595 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
596 if (rc != VINF_SUCCESS)
597 return rc;
598 }
599 else
600 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %VGv failed with %Vrc\n", Addr, rc));
601 }
602
603#if 0 /* def VBOX_STRICT; triggers too often now */
604 /*
605 * This check is a bit paranoid, but useful.
606 */
607 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
608 uint64_t fPageShw;
609 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
610 if ( (rc == VERR_PAGE_NOT_PRESENT || VBOX_FAILURE(rc))
611 || (fWrite && !(fPageShw & X86_PTE_RW))
612 || (fUser && !(fPageShw & X86_PTE_US)) )
613 {
614 AssertMsgFailed(("Unexpected access violation for %VGv! rc=%Vrc write=%d user=%d\n",
615 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
616 return VINF_EM_RAW_GUEST_TRAP;
617 }
618#endif
619
620 if ( VBOX_SUCCESS(rc)
621 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
622 || Addr + cbSize < Addr))
623 {
624 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
625 for (;;)
626 {
627 Addr += PAGE_SIZE;
628 if (cbSize > PAGE_SIZE)
629 cbSize -= PAGE_SIZE;
630 else
631 cbSize = 1;
632 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
633 if (rc != VINF_SUCCESS)
634 break;
635 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
636 break;
637 }
638 }
639 return rc;
640}
641
642
643#ifndef IN_GC
644/**
645 * Emulation of the invlpg instruction (HC only actually).
646 *
647 * @returns VBox status code.
648 * @param pVM VM handle.
649 * @param GCPtrPage Page to invalidate.
650 * @remark ASSUMES the page table entry or page directory is
651 * valid. Fairly safe, but there could be edge cases!
652 * @todo Flush page or page directory only if necessary!
653 */
654PGMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
655{
656 int rc;
657
658 Log3(("PGMInvalidatePage: GCPtrPage=%VGv\n", GCPtrPage));
659
660 /** @todo merge PGMGCInvalidatePage with this one */
661
662#ifndef IN_RING3
663 /*
664 * Notify the recompiler so it can record this instruction.
665 * Failure happens when it's out of space. We'll return to HC in that case.
666 */
667 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
668 if (VBOX_FAILURE(rc))
669 return rc;
670#endif
671
672 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
673 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
674 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
675
676#ifndef IN_RING0
677 /*
678 * Check if we have a pending update of the CR3 monitoring.
679 */
680 if ( VBOX_SUCCESS(rc)
681 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
682 {
683 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
684 Assert(!pVM->pgm.s.fMappingsFixed);
685 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
686 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
687 }
688#endif
689
690#ifdef IN_RING3
691 /*
692 * Inform CSAM about the flush
693 */
694 /** @note this is to check if monitored pages have been changed; when we implement callbacks for virtual handlers, this is no longer required. */
695 CSAMR3FlushPage(pVM, GCPtrPage);
696#endif
697 return rc;
698}
699#endif
700
701
702/**
703 * Executes an instruction using the interpreter.
704 *
705 * @returns VBox status code (appropriate for trap handling and GC return).
706 * @param pVM VM handle.
707 * @param pRegFrame Register frame.
708 * @param pvFault Fault address.
709 */
710PGMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
711{
712 uint32_t cb;
713 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
714 if (rc == VERR_EM_INTERPRETER)
715 rc = VINF_EM_RAW_EMULATE_INSTR;
716 if (rc != VINF_SUCCESS)
717 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%VGv)\n", rc, pvFault));
718 return rc;
719}
720
721
722/**
723 * Gets effective page information (from the VMM page directory).
724 *
725 * @returns VBox status.
726 * @param pVM VM Handle.
727 * @param GCPtr Guest Context virtual address of the page.
728 * @param pfFlags Where to store the flags. These are X86_PTE_*.
729 * @param pHCPhys Where to store the HC physical address of the page.
730 * This is page aligned.
731 * @remark You should use PGMMapGetPage() for pages in a mapping.
732 */
733PGMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
734{
735 return PGM_SHW_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pHCPhys);
736}
737
738
739/**
740 * Sets (replaces) the page flags for a range of pages in the shadow context.
741 *
742 * @returns VBox status.
743 * @param pVM VM handle.
744 * @param GCPtr The address of the first page.
745 * @param cb The size of the range in bytes.
746 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
747 * @remark You must use PGMMapSetPage() for pages in a mapping.
748 */
749PGMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
750{
751 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
752}
753
754
755/**
756 * Modify page flags for a range of pages in the shadow context.
757 *
758 * The existing flags are ANDed with the fMask and ORed with the fFlags.
759 *
760 * @returns VBox status code.
761 * @param pVM VM handle.
762 * @param GCPtr Virtual address of the first page in the range.
763 * @param cb Size (in bytes) of the range to apply the modification to.
764 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
765 * @param fMask The AND mask - page flags X86_PTE_*.
766 * Be very CAREFUL when ~'ing constants which could be 32-bit!
767 * @remark You must use PGMMapModifyPage() for pages in a mapping.
768 */
769PGMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
770{
771 /*
772 * Validate input.
773 */
774 if (fFlags & X86_PTE_PAE_PG_MASK)
775 {
776 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
777 return VERR_INVALID_PARAMETER;
778 }
779 if (!cb)
780 {
781 AssertFailed();
782 return VERR_INVALID_PARAMETER;
783 }
784
785 /*
786 * Align the input.
787 */
788 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
789 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
790 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
791
792 /*
793 * Call worker.
794 */
795 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
796}
797
798/**
799 * Syncs the SHADOW page directory pointer for the specified address. Allocates
800 * backing pages in case the PDPT entry is missing.
801 *
802 * @returns VBox status.
803 * @param pVM VM handle.
804 * @param GCPtr The address.
805 * @param pGstPdpe Guest PDPT entry
806 * @param ppPD Receives address of page directory
807 */
808PGMDECL(int) PGMShwSyncPAEPDPtr(PVM pVM, RTGCUINTPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
809{
810 PPGM pPGM = &pVM->pgm.s;
811 PPGMPOOL pPool = pPGM->CTXSUFF(pPool);
812 PPGMPOOLPAGE pShwPage;
813 int rc;
814
815 Assert(!HWACCMIsNestedPagingActive(pVM));
816
817 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
818 PX86PDPT pPdpt = pVM->pgm.s.CTXMID(p,PaePDPT);
819 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
820
821 /* Allocate page directory if not present. */
822 if ( !pPdpe->n.u1Present
823 && !(pPdpe->u & X86_PDPE_PG_MASK))
824 {
825 PX86PDPE pPdptGst = &CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt];
826
827 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
828 /* Create a reference back to the PDPT by using the index in its shadow page. */
829 rc = pgmPoolAlloc(pVM, pPdptGst->u & X86_PDPE_PG_MASK, PGMPOOLKIND_PAE_PD_FOR_PAE_PD, PGMPOOL_IDX_PDPT, iPdPt, &pShwPage);
830 if (rc == VERR_PGM_POOL_FLUSHED)
831 {
832 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
833 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
834 return VINF_PGM_SYNC_CR3;
835 }
836 AssertRCReturn(rc, rc);
837 }
838 else
839 {
840 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
841 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
842 }
843 /* The PD was cached or created; hook it up now. */
844 pPdpe->u |= pShwPage->Core.Key
845 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
846
847 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
848 return VINF_SUCCESS;
849}
850
851/**
852 * Gets the SHADOW page directory pointer for the specified address.
853 *
854 * @returns VBox status.
855 * @param pVM VM handle.
856 * @param GCPtr The address.
857 * @param ppPdpt Receives address of pdpt
858 * @param ppPD Receives address of page directory
859 */
860PGMDECL(int) PGMShwGetPAEPDPtr(PVM pVM, RTGCUINTPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
861{
862 PPGM pPGM = &pVM->pgm.s;
863 PPGMPOOL pPool = pPGM->CTXSUFF(pPool);
864 PPGMPOOLPAGE pShwPage;
865
866 Assert(!HWACCMIsNestedPagingActive(pVM));
867
868 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
869 PX86PDPT pPdpt = pVM->pgm.s.CTXMID(p,PaePDPT);
870 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
871
872 *ppPdpt = pPdpt;
873 if (!pPdpe->n.u1Present)
874 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
875
876 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
877 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
878
879 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
880 return VINF_SUCCESS;
881}
882
883#ifndef IN_GC
884/**
885 * Syncs the SHADOW page directory pointer for the specified address. Allocates
886 * backing pages in case the PDPT or PML4 entry is missing.
887 *
888 * @returns VBox status.
889 * @param pVM VM handle.
890 * @param GCPtr The address.
891 * @param pGstPml4e Guest PML4 entry
892 * @param pGstPdpe Guest PDPT entry
893 * @param ppPD Receives address of page directory
894 */
895PGMDECL(int) PGMShwSyncLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
896{
897 PPGM pPGM = &pVM->pgm.s;
898 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
899 PPGMPOOL pPool = pPGM->CTXSUFF(pPool);
900 PX86PML4E pPml4e;
901 PPGMPOOLPAGE pShwPage;
902 int rc;
903 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
904
905 Assert(pVM->pgm.s.pHCPaePML4);
906
907 /* Allocate page directory pointer table if not present. */
908 pPml4e = &pPGM->pHCPaePML4->a[iPml4e];
909 if ( !pPml4e->n.u1Present
910 && !(pPml4e->u & X86_PML4E_PG_MASK))
911 {
912 Assert(!(pPml4e->u & X86_PML4E_PG_MASK));
913
914 if (!fNestedPaging)
915 {
916 Assert(pVM->pgm.s.pHCShwAmd64CR3);
917 Assert(pPGM->pGstPaePML4HC);
918
919 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
920
921 rc = pgmPoolAlloc(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4e, &pShwPage);
922 }
923 else
924 rc = pgmPoolAlloc(pVM, GCPtr + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4e, &pShwPage);
925
926 if (rc == VERR_PGM_POOL_FLUSHED)
927 {
928 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
929 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
930 return VINF_PGM_SYNC_CR3;
931 }
932 AssertRCReturn(rc, rc);
933 }
934 else
935 {
936 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
937 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
938 }
939 /* The PDPT was cached or created; hook it up now. */
940 pPml4e->u |= pShwPage->Core.Key
941 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
942
943 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
944 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
945 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
946
947 /* Allocate page directory if not present. */
948 if ( !pPdpe->n.u1Present
949 && !(pPdpe->u & X86_PDPE_PG_MASK))
950 {
951 if (!fNestedPaging)
952 {
953 Assert(pPGM->pGstPaePML4HC);
954
955 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
956 PX86PDPT pPdptGst;
957 rc = PGM_GCPHYS_2_PTR(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, &pPdptGst);
958 AssertRCReturn(rc, rc);
959
960 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
961 /* Create a reference back to the PDPT by using the index in its shadow page. */
962 rc = pgmPoolAlloc(pVM, pPdptGst->a[iPdPt].u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, pShwPage->idx, iPdPt, &pShwPage);
963 }
964 else
965 rc = pgmPoolAlloc(pVM, GCPtr + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
966
967 if (rc == VERR_PGM_POOL_FLUSHED)
968 {
969 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
970 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
971 return VINF_PGM_SYNC_CR3;
972 }
973 AssertRCReturn(rc, rc);
974 }
975 else
976 {
977 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
978 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
979 }
980 /* The PD was cached or created; hook it up now. */
981 pPdpe->u |= pShwPage->Core.Key
982 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
983
984 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
985 return VINF_SUCCESS;
986}
987
988/**
989 * Gets the SHADOW page directory pointer for the specified address.
990 *
991 * @returns VBox status.
992 * @param pVM VM handle.
993 * @param GCPtr The address.
994 * @param ppPdpt Receives address of pdpt
995 * @param ppPD Receives address of page directory
996 */
997PGMDECL(int) PGMShwGetLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
998{
999 PPGM pPGM = &pVM->pgm.s;
1000 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1001 PPGMPOOL pPool = pPGM->CTXSUFF(pPool);
1002 PX86PML4E pPml4e;
1003 PPGMPOOLPAGE pShwPage;
1004
1005 AssertReturn(pVM->pgm.s.pHCPaePML4, VERR_INTERNAL_ERROR);
1006
1007 pPml4e = &pPGM->pHCPaePML4->a[iPml4e];
1008 if (!pPml4e->n.u1Present)
1009 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1010
1011 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1012 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1013
1014 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1015 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1016 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1017
1018 *ppPdpt = pPdpt;
1019 if (!pPdpe->n.u1Present)
1020 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1021
1022 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1023 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1024
1025 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1026 return VINF_SUCCESS;
1027}
1028#endif
1029
1030/**
1031 * Gets effective Guest OS page information.
1032 *
1033 * When GCPtr is in a big page, the function will return as if it was a normal
1034 * 4KB page. If the need for distinguishing between big and normal page becomes
1035 * necessary at a later point, a PGMGstGetPage() will be created for that
1036 * purpose.
1037 *
1038 * @returns VBox status.
1039 * @param pVM VM Handle.
1040 * @param GCPtr Guest Context virtual address of the page.
1041 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1042 * @param pGCPhys Where to store the GC physical address of the page.
1043 * This is page aligned. The fact that the
1044 */
1045PGMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1046{
1047 return PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pGCPhys);
1048}
1049
1050
1051/**
1052 * Checks if the page is present.
1053 *
1054 * @returns true if the page is present.
1055 * @returns false if the page is not present.
1056 * @param pVM The VM handle.
1057 * @param GCPtr Address within the page.
1058 */
1059PGMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
1060{
1061 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
1062 return VBOX_SUCCESS(rc);
1063}
1064
1065
1066/**
1067 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1068 *
1069 * @returns VBox status.
1070 * @param pVM VM handle.
1071 * @param GCPtr The address of the first page.
1072 * @param cb The size of the range in bytes.
1073 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1074 */
1075PGMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1076{
1077 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
1078}
1079
1080
1081/**
1082 * Modify page flags for a range of pages in the guest's tables
1083 *
1084 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1085 *
1086 * @returns VBox status code.
1087 * @param pVM VM handle.
1088 * @param GCPtr Virtual address of the first page in the range.
1089 * @param cb Size (in bytes) of the range to apply the modification to.
1090 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1091 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1092 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1093 */
1094PGMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1095{
1096 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
1097
1098 /*
1099 * Validate input.
1100 */
1101 if (fFlags & X86_PTE_PAE_PG_MASK)
1102 {
1103 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
1104 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
1105 return VERR_INVALID_PARAMETER;
1106 }
1107
1108 if (!cb)
1109 {
1110 AssertFailed();
1111 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
1112 return VERR_INVALID_PARAMETER;
1113 }
1114
1115 LogFlow(("PGMGstModifyPage %VGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1116
1117 /*
1118 * Adjust input.
1119 */
1120 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1121 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1122 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
1123
1124 /*
1125 * Call worker.
1126 */
1127 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
1128
1129 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
1130 return rc;
1131}
1132
1133
1134/**
1135 * Gets the current CR3 register value for the shadow memory context.
1136 * @returns CR3 value.
1137 * @param pVM The VM handle.
1138 */
1139PGMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM)
1140{
1141 PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;
1142 switch (enmShadowMode)
1143 {
1144 case PGMMODE_32_BIT:
1145 return pVM->pgm.s.HCPhys32BitPD;
1146
1147 case PGMMODE_PAE:
1148 case PGMMODE_PAE_NX:
1149 return pVM->pgm.s.HCPhysPaePDPT;
1150
1151 case PGMMODE_AMD64:
1152 case PGMMODE_AMD64_NX:
1153 return pVM->pgm.s.HCPhysPaePML4;
1154
1155 case PGMMODE_EPT:
1156 return pVM->pgm.s.HCPhysNestedRoot;
1157
1158 case PGMMODE_NESTED:
1159 return PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));
1160
1161 default:
1162 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1163 return ~0;
1164 }
1165}
1166
1167/**
1168 * Gets the current CR3 register value for the nested memory context.
1169 * @returns CR3 value.
1170 * @param pVM The VM handle.
1171 */
1172PGMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode)
1173{
1174 switch (enmShadowMode)
1175 {
1176 case PGMMODE_32_BIT:
1177 return pVM->pgm.s.HCPhys32BitPD;
1178
1179 case PGMMODE_PAE:
1180 case PGMMODE_PAE_NX:
1181 return pVM->pgm.s.HCPhysPaePDPT;
1182
1183 case PGMMODE_AMD64:
1184 case PGMMODE_AMD64_NX:
1185 return pVM->pgm.s.HCPhysPaePML4;
1186
1187 default:
1188 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1189 return ~0;
1190 }
1191}
1192
1193
1194/**
1195 * Gets the CR3 register value for the 32-Bit shadow memory context.
1196 * @returns CR3 value.
1197 * @param pVM The VM handle.
1198 */
1199PGMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM)
1200{
1201 return pVM->pgm.s.HCPhys32BitPD;
1202}
1203
1204
1205/**
1206 * Gets the CR3 register value for the PAE shadow memory context.
1207 * @returns CR3 value.
1208 * @param pVM The VM handle.
1209 */
1210PGMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM)
1211{
1212 return pVM->pgm.s.HCPhysPaePDPT;
1213}
1214
1215
1216/**
1217 * Gets the CR3 register value for the AMD64 shadow memory context.
1218 * @returns CR3 value.
1219 * @param pVM The VM handle.
1220 */
1221PGMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM)
1222{
1223 return pVM->pgm.s.HCPhysPaePML4;
1224}
1225
1226
1227/**
1228 * Gets the current CR3 register value for the HC intermediate memory context.
1229 * @returns CR3 value.
1230 * @param pVM The VM handle.
1231 */
1232PGMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1233{
1234 switch (pVM->pgm.s.enmHostMode)
1235 {
1236 case SUPPAGINGMODE_32_BIT:
1237 case SUPPAGINGMODE_32_BIT_GLOBAL:
1238 return pVM->pgm.s.HCPhysInterPD;
1239
1240 case SUPPAGINGMODE_PAE:
1241 case SUPPAGINGMODE_PAE_GLOBAL:
1242 case SUPPAGINGMODE_PAE_NX:
1243 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1244 return pVM->pgm.s.HCPhysInterPaePDPT;
1245
1246 case SUPPAGINGMODE_AMD64:
1247 case SUPPAGINGMODE_AMD64_GLOBAL:
1248 case SUPPAGINGMODE_AMD64_NX:
1249 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1250 return pVM->pgm.s.HCPhysInterPaePDPT;
1251
1252 default:
1253 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1254 return ~0;
1255 }
1256}
1257
1258
1259/**
1260 * Gets the current CR3 register value for the GC intermediate memory context.
1261 * @returns CR3 value.
1262 * @param pVM The VM handle.
1263 */
1264PGMDECL(RTHCPHYS) PGMGetInterGCCR3(PVM pVM)
1265{
1266 switch (pVM->pgm.s.enmShadowMode)
1267 {
1268 case PGMMODE_32_BIT:
1269 return pVM->pgm.s.HCPhysInterPD;
1270
1271 case PGMMODE_PAE:
1272 case PGMMODE_PAE_NX:
1273 return pVM->pgm.s.HCPhysInterPaePDPT;
1274
1275 case PGMMODE_AMD64:
1276 case PGMMODE_AMD64_NX:
1277 return pVM->pgm.s.HCPhysInterPaePML4;
1278
1279 case PGMMODE_EPT:
1280 case PGMMODE_NESTED:
1281 return 0; /* not relevant */
1282
1283 default:
1284 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
1285 return ~0;
1286 }
1287}
1288
1289
1290/**
1291 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1292 * @returns CR3 value.
1293 * @param pVM The VM handle.
1294 */
1295PGMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1296{
1297 return pVM->pgm.s.HCPhysInterPD;
1298}
1299
1300
1301/**
1302 * Gets the CR3 register value for the PAE intermediate memory context.
1303 * @returns CR3 value.
1304 * @param pVM The VM handle.
1305 */
1306PGMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1307{
1308 return pVM->pgm.s.HCPhysInterPaePDPT;
1309}
1310
1311
1312/**
1313 * Gets the CR3 register value for the AMD64 intermediate memory context.
1314 * @returns CR3 value.
1315 * @param pVM The VM handle.
1316 */
1317PGMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1318{
1319 return pVM->pgm.s.HCPhysInterPaePML4;
1320}
1321
1322
1323/**
1324 * Performs and schedules necessary updates following a CR3 load or reload.
1325 *
1326 * This will normally involve mapping the guest PD or nPDPT
1327 *
1328 * @returns VBox status code.
1329 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1330 * safely be ignored and overridden since the FF will be set too then.
1331 * @param pVM VM handle.
1332 * @param cr3 The new cr3.
1333 * @param fGlobal Indicates whether this is a global flush or not.
1334 */
1335PGMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
1336{
1337 STAM_PROFILE_START(&pVM->pgm.s.StatFlushTLB, a);
1338
1339 /*
1340 * Always flag the necessary updates; necessary for hardware acceleration
1341 */
1342 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1343 if (fGlobal)
1344 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1345 LogFlow(("PGMFlushTLB: cr3=%VX64 OldCr3=%VX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
1346
1347 /*
1348 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1349 */
1350 int rc = VINF_SUCCESS;
1351 RTGCPHYS GCPhysCR3;
1352 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1353 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1354 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1355 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1356 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1357 else
1358 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1359 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1360 {
1361 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1362 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1363 if (VBOX_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
1364 {
1365 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1366 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1367 }
1368 if (fGlobal)
1369 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3Global);
1370 else
1371 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3);
1372 }
1373 else
1374 {
1375 /*
1376 * Check if we have a pending update of the CR3 monitoring.
1377 */
1378 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1379 {
1380 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1381 Assert(!pVM->pgm.s.fMappingsFixed);
1382 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1383 }
1384 if (fGlobal)
1385 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3Global);
1386 else
1387 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3);
1388 }
1389
1390 STAM_PROFILE_STOP(&pVM->pgm.s.StatFlushTLB, a);
1391 return rc;
1392}
1393
1394/**
1395 * Performs and schedules necessary updates following a CR3 load or reload,
1396 * without actually flushing the TLB as with PGMFlushTLB.
1397 *
1398 * This will normally involve mapping the guest PD or nPDPT
1399 *
1400 * @returns VBox status code.
1401 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1402 * safely be ignored and overridden since the FF will be set too then.
1403 * @param pVM VM handle.
1404 * @param cr3 The new cr3.
1405 */
1406PGMDECL(int) PGMUpdateCR3(PVM pVM, uint64_t cr3)
1407{
1408 LogFlow(("PGMUpdateCR3: cr3=%VX64 OldCr3=%VX64\n", cr3, pVM->pgm.s.GCPhysCR3));
1409
1410 /* We assume we're only called in nested paging mode. */
1411 Assert(pVM->pgm.s.fMappingsFixed);
1412 Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1413 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_NESTED || pVM->pgm.s.enmShadowMode == PGMMODE_EPT);
1414
1415 /*
1416 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1417 */
1418 int rc = VINF_SUCCESS;
1419 RTGCPHYS GCPhysCR3;
1420 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1421 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1422 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1423 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1424 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1425 else
1426 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1427 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1428 {
1429 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1430 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1431 }
1432 AssertRC(rc);
1433 return rc;
1434}
1435
1436/**
1437 * Synchronize the paging structures.
1438 *
1439 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1440 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1441 * in several places, most importantly whenever the CR3 is loaded.
1442 *
1443 * @returns VBox status code.
1444 * @param pVM The virtual machine.
1445 * @param cr0 Guest context CR0 register
1446 * @param cr3 Guest context CR3 register
1447 * @param cr4 Guest context CR4 register
1448 * @param fGlobal Including global page directories or not
1449 */
1450PGMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1451{
1452 /*
1453 * We might be called when we shouldn't.
1454 *
1455 * The mode switching will ensure that the PD is resynced
1456 * after every mode switch. So, if we find ourselves here
1457 * when in protected or real mode we can safely disable the
1458 * FF and return immediately.
1459 */
1460 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1461 {
1462 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1463 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1464 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1465 return VINF_SUCCESS;
1466 }
1467
1468 /* If global pages are not supported, then all flushes are global */
1469 if (!(cr4 & X86_CR4_PGE))
1470 fGlobal = true;
1471 LogFlow(("PGMSyncCR3: cr0=%VX64 cr3=%VX64 cr4=%VX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1472 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1473
1474 /*
1475 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1476 */
1477 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1478 int rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1479 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1480 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%VRc\n", rc));
1481 if (rc == VINF_SUCCESS)
1482 {
1483 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1484 {
1485 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1486 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1487 }
1488
1489 /*
1490 * Check if we have a pending update of the CR3 monitoring.
1491 */
1492 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1493 {
1494 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1495 Assert(!pVM->pgm.s.fMappingsFixed);
1496 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1497 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1498 }
1499 }
1500
1501 /*
1502 * Now flush the CR3 (guest context).
1503 */
1504 if (rc == VINF_SUCCESS)
1505 PGM_INVL_GUEST_TLBS();
1506 return rc;
1507}
1508
1509
1510/**
1511 * Called whenever CR0 or CR4 in a way which may change
1512 * the paging mode.
1513 *
1514 * @returns VBox status code fit for scheduling in GC and R0.
1515 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1516 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1517 * @param pVM VM handle.
1518 * @param cr0 The new cr0.
1519 * @param cr4 The new cr4.
1520 * @param efer The new extended feature enable register.
1521 */
1522PGMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1523{
1524 PGMMODE enmGuestMode;
1525
1526 /*
1527 * Calc the new guest mode.
1528 */
1529 if (!(cr0 & X86_CR0_PE))
1530 enmGuestMode = PGMMODE_REAL;
1531 else if (!(cr0 & X86_CR0_PG))
1532 enmGuestMode = PGMMODE_PROTECTED;
1533 else if (!(cr4 & X86_CR4_PAE))
1534 enmGuestMode = PGMMODE_32_BIT;
1535 else if (!(efer & MSR_K6_EFER_LME))
1536 {
1537 if (!(efer & MSR_K6_EFER_NXE))
1538 enmGuestMode = PGMMODE_PAE;
1539 else
1540 enmGuestMode = PGMMODE_PAE_NX;
1541 }
1542 else
1543 {
1544 if (!(efer & MSR_K6_EFER_NXE))
1545 enmGuestMode = PGMMODE_AMD64;
1546 else
1547 enmGuestMode = PGMMODE_AMD64_NX;
1548 }
1549
1550 /*
1551 * Did it change?
1552 */
1553 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1554 return VINF_SUCCESS;
1555
1556 /* Flush the TLB */
1557 PGM_INVL_GUEST_TLBS();
1558
1559#ifdef IN_RING3
1560 return PGMR3ChangeMode(pVM, enmGuestMode);
1561#else
1562 Log(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1563 return VINF_PGM_CHANGE_MODE;
1564#endif
1565}
1566
1567
1568/**
1569 * Gets the current guest paging mode.
1570 *
1571 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1572 *
1573 * @returns The current paging mode.
1574 * @param pVM The VM handle.
1575 */
1576PGMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1577{
1578 return pVM->pgm.s.enmGuestMode;
1579}
1580
1581
1582/**
1583 * Gets the current shadow paging mode.
1584 *
1585 * @returns The current paging mode.
1586 * @param pVM The VM handle.
1587 */
1588PGMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1589{
1590 return pVM->pgm.s.enmShadowMode;
1591}
1592
1593/**
1594 * Gets the current host paging mode.
1595 *
1596 * @returns The current paging mode.
1597 * @param pVM The VM handle.
1598 */
1599PGMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1600{
1601 switch (pVM->pgm.s.enmHostMode)
1602 {
1603 case SUPPAGINGMODE_32_BIT:
1604 case SUPPAGINGMODE_32_BIT_GLOBAL:
1605 return PGMMODE_32_BIT;
1606
1607 case SUPPAGINGMODE_PAE:
1608 case SUPPAGINGMODE_PAE_GLOBAL:
1609 return PGMMODE_PAE;
1610
1611 case SUPPAGINGMODE_PAE_NX:
1612 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1613 return PGMMODE_PAE_NX;
1614
1615 case SUPPAGINGMODE_AMD64:
1616 case SUPPAGINGMODE_AMD64_GLOBAL:
1617 return PGMMODE_AMD64;
1618
1619 case SUPPAGINGMODE_AMD64_NX:
1620 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1621 return PGMMODE_AMD64_NX;
1622
1623 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
1624 }
1625
1626 return PGMMODE_INVALID;
1627}
1628
1629
1630/**
1631 * Get mode name.
1632 *
1633 * @returns read-only name string.
1634 * @param enmMode The mode which name is desired.
1635 */
1636PGMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1637{
1638 switch (enmMode)
1639 {
1640 case PGMMODE_REAL: return "Real";
1641 case PGMMODE_PROTECTED: return "Protected";
1642 case PGMMODE_32_BIT: return "32-bit";
1643 case PGMMODE_PAE: return "PAE";
1644 case PGMMODE_PAE_NX: return "PAE+NX";
1645 case PGMMODE_AMD64: return "AMD64";
1646 case PGMMODE_AMD64_NX: return "AMD64+NX";
1647 case PGMMODE_NESTED: return "Nested";
1648 case PGMMODE_EPT: return "EPT";
1649 default: return "unknown mode value";
1650 }
1651}
1652
1653
1654/**
1655 * Acquire the PGM lock.
1656 *
1657 * @returns VBox status code
1658 * @param pVM The VM to operate on.
1659 */
1660int pgmLock(PVM pVM)
1661{
1662 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1663#ifdef IN_GC
1664 if (rc == VERR_SEM_BUSY)
1665 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1666#elif defined(IN_RING0)
1667 if (rc == VERR_SEM_BUSY)
1668 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1669#endif
1670 AssertRC(rc);
1671 return rc;
1672}
1673
1674
1675/**
1676 * Release the PGM lock.
1677 *
1678 * @returns VBox status code
1679 * @param pVM The VM to operate on.
1680 */
1681void pgmUnlock(PVM pVM)
1682{
1683 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1684}
1685
1686
1687#ifdef VBOX_STRICT
1688
1689/**
1690 * Asserts that there are no mapping conflicts.
1691 *
1692 * @returns Number of conflicts.
1693 * @param pVM The VM Handle.
1694 */
1695PGMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
1696{
1697 unsigned cErrors = 0;
1698
1699 /*
1700 * Check for mapping conflicts.
1701 */
1702 for (PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
1703 pMapping;
1704 pMapping = CTXALLSUFF(pMapping->pNext))
1705 {
1706 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
1707 for (RTGCUINTPTR GCPtr = (RTGCUINTPTR)pMapping->GCPtr;
1708 GCPtr <= (RTGCUINTPTR)pMapping->GCPtrLast;
1709 GCPtr += PAGE_SIZE)
1710 {
1711 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
1712 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
1713 {
1714 AssertMsgFailed(("Conflict at %VGv with %s\n", GCPtr, HCSTRING(pMapping->pszDesc)));
1715 cErrors++;
1716 break;
1717 }
1718 }
1719 }
1720
1721 return cErrors;
1722}
1723
1724
1725/**
1726 * Asserts that everything related to the guest CR3 is correctly shadowed.
1727 *
1728 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
1729 * and assert the correctness of the guest CR3 mapping before asserting that the
1730 * shadow page tables is in sync with the guest page tables.
1731 *
1732 * @returns Number of conflicts.
1733 * @param pVM The VM Handle.
1734 * @param cr3 The current guest CR3 register value.
1735 * @param cr4 The current guest CR4 register value.
1736 */
1737PGMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
1738{
1739 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1740 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCUINTPTR)0);
1741 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1742 return cErrors;
1743 return 0;
1744}
1745
1746#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette