VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 4268

Last change on this file since 4268 was 4268, checked in by vboxsync, 17 years ago

Record invlpg occurrences in ring 0 too.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 56.0 KB
Line 
1/* $Id: PGMAll.cpp 4268 2007-08-21 17:10:47Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/pgm.h>
23#include <VBox/cpum.h>
24#include <VBox/selm.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/csam.h>
30#include <VBox/patm.h>
31#include <VBox/trpm.h>
32#include <VBox/rem.h>
33#include <VBox/em.h>
34#include "PGMInternal.h"
35#include <VBox/vm.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/string.h>
39#include <VBox/log.h>
40#include <VBox/param.h>
41#include <VBox/err.h>
42
43
44/*******************************************************************************
45* Structures and Typedefs *
46*******************************************************************************/
47/**
48 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
49 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
50 */
51typedef struct PGMHVUSTATE
52{
53 /** The VM handle. */
54 PVM pVM;
55 /** The todo flags. */
56 RTUINT fTodo;
57 /** The CR4 register value. */
58 uint32_t cr4;
59} PGMHVUSTATE, *PPGMHVUSTATE;
60
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65/** @def DUMP_PDE_BIG
66 * Debug routine for dumping a big PDE.
67 */
68#ifdef DEBUG_Sander
69/** Debug routine for dumping a big PDE. */
70static void pgmDumpPDEBig(const char *pszPrefix, int iPD, VBOXPDE Pde)
71{
72 Log(("%s: BIG %d u10PageNo=%08X P=%d W=%d U=%d CACHE=%d ACC=%d DIR=%d GBL=%d\n", pszPrefix, iPD, Pde.b.u10PageNo, Pde.b.u1Present, Pde.b.u1Write, Pde.b.u1User, Pde.b.u1CacheDisable, Pde.b.u1Accessed, Pde.b.u1Dirty, Pde.b.u1Global));
73 Log(("%s: BIG %d WRT=%d AVAIL=%X RSV=%X PAT=%d\n", pszPrefix, iPD, Pde.b.u1WriteThru, Pde.b.u3Available, Pde.b.u8PageNoHigh, Pde.b.u1PAT));
74}
75#define DUMP_PDE_BIG(a, b, c) pgmDumpPDEBig(a, b, c)
76#else
77#define DUMP_PDE_BIG(a, b, c) do { } while (0)
78#endif
79
80
81
82#if 1///@todo ndef RT_ARCH_AMD64
83/*
84 * Shadow - 32-bit mode
85 */
86#define PGM_SHW_TYPE PGM_TYPE_32BIT
87#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
88#include "PGMAllShw.h"
89
90/* Guest - real mode */
91#define PGM_GST_TYPE PGM_TYPE_REAL
92#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
93#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
94#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
95#include "PGMAllGst.h"
96#include "PGMAllBth.h"
97#undef BTH_PGMPOOLKIND_PT_FOR_PT
98#undef PGM_BTH_NAME
99#undef PGM_GST_TYPE
100#undef PGM_GST_NAME
101
102/* Guest - protected mode */
103#define PGM_GST_TYPE PGM_TYPE_PROT
104#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
105#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
106#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
107#include "PGMAllGst.h"
108#include "PGMAllBth.h"
109#undef BTH_PGMPOOLKIND_PT_FOR_PT
110#undef PGM_BTH_NAME
111#undef PGM_GST_TYPE
112#undef PGM_GST_NAME
113
114/* Guest - 32-bit mode */
115#define PGM_GST_TYPE PGM_TYPE_32BIT
116#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
117#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
118#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
119#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
120#include "PGMAllGst.h"
121#include "PGMAllBth.h"
122#undef BTH_PGMPOOLKIND_PT_FOR_BIG
123#undef BTH_PGMPOOLKIND_PT_FOR_PT
124#undef PGM_BTH_NAME
125#undef PGM_GST_TYPE
126#undef PGM_GST_NAME
127
128#undef PGM_SHW_TYPE
129#undef PGM_SHW_NAME
130#endif /* !RT_ARCH_AMD64 */
131
132
133/*
134 * Shadow - PAE mode
135 */
136#define PGM_SHW_TYPE PGM_TYPE_PAE
137#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
138#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
139#include "PGMAllShw.h"
140
141/* Guest - real mode */
142#define PGM_GST_TYPE PGM_TYPE_REAL
143#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
144#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
145#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
146#include "PGMAllBth.h"
147#undef BTH_PGMPOOLKIND_PT_FOR_PT
148#undef PGM_BTH_NAME
149#undef PGM_GST_TYPE
150#undef PGM_GST_NAME
151
152/* Guest - protected mode */
153#define PGM_GST_TYPE PGM_TYPE_PROT
154#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
155#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
156#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
157#include "PGMAllBth.h"
158#undef BTH_PGMPOOLKIND_PT_FOR_PT
159#undef PGM_BTH_NAME
160#undef PGM_GST_TYPE
161#undef PGM_GST_NAME
162
163/* Guest - 32-bit mode */
164#define PGM_GST_TYPE PGM_TYPE_32BIT
165#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
166#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
167#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
168#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
169#include "PGMAllBth.h"
170#undef BTH_PGMPOOLKIND_PT_FOR_BIG
171#undef BTH_PGMPOOLKIND_PT_FOR_PT
172#undef PGM_BTH_NAME
173#undef PGM_GST_TYPE
174#undef PGM_GST_NAME
175
176
177/* Guest - PAE mode */
178#define PGM_GST_TYPE PGM_TYPE_PAE
179#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
180#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
181#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
182#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
183#include "PGMAllGst.h"
184#include "PGMAllBth.h"
185#undef BTH_PGMPOOLKIND_PT_FOR_BIG
186#undef BTH_PGMPOOLKIND_PT_FOR_PT
187#undef PGM_BTH_NAME
188#undef PGM_GST_TYPE
189#undef PGM_GST_NAME
190
191#undef PGM_SHW_TYPE
192#undef PGM_SHW_NAME
193
194
195/*
196 * Shadow - AMD64 mode
197 */
198#define PGM_SHW_TYPE PGM_TYPE_AMD64
199#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
200#include "PGMAllShw.h"
201
202/* Guest - real mode */
203#define PGM_GST_TYPE PGM_TYPE_REAL
204#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
205#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_REAL(name)
206#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
207#include "PGMAllBth.h"
208#undef BTH_PGMPOOLKIND_PT_FOR_PT
209#undef PGM_BTH_NAME
210#undef PGM_GST_NAME
211#undef PGM_GST_TYPE
212
213/* Guest - protected mode */
214#define PGM_GST_TYPE PGM_TYPE_PROT
215#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
216#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
217#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
218#include "PGMAllBth.h"
219#undef BTH_PGMPOOLKIND_PT_FOR_PT
220#undef PGM_BTH_NAME
221#undef PGM_GST_TYPE
222#undef PGM_GST_NAME
223
224/* Guest - AMD64 mode */
225#define PGM_GST_TYPE PGM_TYPE_AMD64
226#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
227#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
228#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
229#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
230#include "PGMAllGst.h"
231#include "PGMAllBth.h"
232#undef BTH_PGMPOOLKIND_PT_FOR_BIG
233#undef BTH_PGMPOOLKIND_PT_FOR_PT
234#undef PGM_BTH_NAME
235#undef PGM_GST_TYPE
236#undef PGM_GST_NAME
237
238#undef PGM_SHW_TYPE
239#undef PGM_SHW_NAME
240
241
242
243/**
244 * #PF Handler.
245 *
246 * @returns VBox status code (appropriate for trap handling and GC return).
247 * @param pVM VM Handle.
248 * @param uErr The trap error code.
249 * @param pRegFrame Trap register frame.
250 * @param pvFault The fault address.
251 */
252PGMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
253{
254 LogFlow(("PGMTrap0eHandler: uErr=%#x pvFault=%VGv eip=%VGv\n", uErr, pvFault, pRegFrame->eip));
255 STAM_PROFILE_START(&pVM->pgm.s.StatGCTrap0e, a);
256 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = NULL; } );
257
258
259#ifdef VBOX_WITH_STATISTICS
260 /*
261 * Error code stats.
262 */
263 if (uErr & X86_TRAP_PF_US)
264 {
265 if (!(uErr & X86_TRAP_PF_P))
266 {
267 if (uErr & X86_TRAP_PF_RW)
268 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentWrite);
269 else
270 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentRead);
271 }
272 else if (uErr & X86_TRAP_PF_RW)
273 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSWrite);
274 else if (uErr & X86_TRAP_PF_RSVD)
275 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSReserved);
276 else
277 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSRead);
278 }
279 else
280 { //supervisor
281 if (!(uErr & X86_TRAP_PF_P))
282 {
283 if (uErr & X86_TRAP_PF_RW)
284 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentWrite);
285 else
286 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentRead);
287 }
288 else if (uErr & X86_TRAP_PF_RW)
289 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVWrite);
290 else if (uErr & X86_TRAP_PF_RSVD)
291 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVReserved);
292 }
293#endif
294
295 /*
296 * Call the worker.
297 */
298 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
299 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
300 rc = VINF_SUCCESS;
301 STAM_STATS({ if (!pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution))
302 pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eMisc; });
303 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatGCTrap0e, pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution), a);
304 return rc;
305}
306
307
308/**
309 * Prefetch a page
310 *
311 * Typically used to sync commonly used pages before entering raw mode
312 * after a CR3 reload.
313 *
314 * @returns VBox status code suitable for scheduling.
315 * @retval VINF_SUCCESS on success.
316 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
317 * @param pVM VM handle.
318 * @param GCPtrPage Page to invalidate.
319 */
320PGMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
321{
322 STAM_PROFILE_START(&pVM->pgm.s.StatHCPrefetch, a);
323 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, (RTGCUINTPTR)GCPtrPage);
324 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCPrefetch, a);
325 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%Vrc\n", rc));
326 return rc;
327}
328
329
330/**
331 * Gets the mapping corresponding to the specified address (if any).
332 *
333 * @returns Pointer to the mapping.
334 * @returns NULL if not
335 *
336 * @param pVM The virtual machine.
337 * @param GCPtr The guest context pointer.
338 */
339PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
340{
341 PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
342 while (pMapping)
343 {
344 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
345 break;
346 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
347 {
348 STAM_COUNTER_INC(&pVM->pgm.s.StatGCSyncPTConflict);
349 return pMapping;
350 }
351 pMapping = CTXALLSUFF(pMapping->pNext);
352 }
353 return NULL;
354}
355
356
357/**
358 * Verifies a range of pages for read or write access
359 *
360 * Only checks the guest's page tables
361 *
362 * @returns VBox status code.
363 * @param pVM VM handle.
364 * @param Addr Guest virtual address to check
365 * @param cbSize Access size
366 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
367 */
368PGMDECL(int) PGMIsValidAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
369{
370 /*
371 * Validate input.
372 */
373 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
374 {
375 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
376 return VERR_INVALID_PARAMETER;
377 }
378
379 uint64_t fPage;
380 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
381 if (VBOX_FAILURE(rc))
382 {
383 Log(("PGMIsValidAccess: access violation for %VGv rc=%d\n", Addr, rc));
384 return VINF_EM_RAW_GUEST_TRAP;
385 }
386
387 /*
388 * Check if the access would cause a page fault
389 *
390 * Note that hypervisor page directories are not present in the guest's tables, so this check
391 * is sufficient.
392 */
393 bool fWrite = !!(fAccess & X86_PTE_RW);
394 bool fUser = !!(fAccess & X86_PTE_US);
395 if ( !(fPage & X86_PTE_P)
396 || (fWrite && !(fPage & X86_PTE_RW))
397 || (fUser && !(fPage & X86_PTE_US)) )
398 {
399 Log(("PGMIsValidAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
400 return VINF_EM_RAW_GUEST_TRAP;
401 }
402 if ( VBOX_SUCCESS(rc)
403 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
404 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
405 return rc;
406}
407
408
409/**
410 * Verifies a range of pages for read or write access
411 *
412 * Supports handling of pages marked for dirty bit tracking and CSAM
413 *
414 * @returns VBox status code.
415 * @param pVM VM handle.
416 * @param Addr Guest virtual address to check
417 * @param cbSize Access size
418 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
419 */
420PGMDECL(int) PGMVerifyAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
421{
422 /*
423 * Validate input.
424 */
425 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
426 {
427 AssertMsgFailed(("PGMVerifyAccess: invalid access type %08x\n", fAccess));
428 return VERR_INVALID_PARAMETER;
429 }
430
431 uint64_t fPageGst;
432 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
433 if (VBOX_FAILURE(rc))
434 {
435 Log(("PGMVerifyAccess: access violation for %VGv rc=%d\n", Addr, rc));
436 return VINF_EM_RAW_GUEST_TRAP;
437 }
438
439 /*
440 * Check if the access would cause a page fault
441 *
442 * Note that hypervisor page directories are not present in the guest's tables, so this check
443 * is sufficient.
444 */
445 const bool fWrite = !!(fAccess & X86_PTE_RW);
446 const bool fUser = !!(fAccess & X86_PTE_US);
447 if ( !(fPageGst & X86_PTE_P)
448 || (fWrite && !(fPageGst & X86_PTE_RW))
449 || (fUser && !(fPageGst & X86_PTE_US)) )
450 {
451 Log(("PGMVerifyAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
452 return VINF_EM_RAW_GUEST_TRAP;
453 }
454
455 /*
456 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
457 */
458 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
459 if ( rc == VERR_PAGE_NOT_PRESENT
460 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
461 {
462 /*
463 * Page is not present in our page tables.
464 * Try to sync it!
465 */
466 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
467 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
468 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
469 if (rc != VINF_SUCCESS)
470 return rc;
471 }
472 else
473 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %VGv failed with %Vrc\n", Addr, rc));
474
475#if 0 /* def VBOX_STRICT; triggers too often now */
476 /*
477 * This check is a bit paranoid, but useful.
478 */
479 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
480 uint64_t fPageShw;
481 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
482 if ( (rc == VERR_PAGE_NOT_PRESENT || VBOX_FAILURE(rc))
483 || (fWrite && !(fPageShw & X86_PTE_RW))
484 || (fUser && !(fPageShw & X86_PTE_US)) )
485 {
486 AssertMsgFailed(("Unexpected access violation for %VGv! rc=%Vrc write=%d user=%d\n",
487 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
488 return VINF_EM_RAW_GUEST_TRAP;
489 }
490#endif
491
492 if ( VBOX_SUCCESS(rc)
493 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
494 || Addr + cbSize < Addr))
495 {
496 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
497 for (;;)
498 {
499 Addr += PAGE_SIZE;
500 if (cbSize > PAGE_SIZE)
501 cbSize -= PAGE_SIZE;
502 else
503 cbSize = 1;
504 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
505 if (rc != VINF_SUCCESS)
506 break;
507 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
508 break;
509 }
510 }
511 return rc;
512}
513
514
515#ifndef IN_GC
516/**
517 * Emulation of the invlpg instruction (HC only actually).
518 *
519 * @returns VBox status code.
520 * @param pVM VM handle.
521 * @param GCPtrPage Page to invalidate.
522 * @remark ASSUMES the page table entry or page directory is
523 * valid. Fairly safe, but there could be edge cases!
524 * @todo Flush page or page directory only if necessary!
525 */
526PGMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
527{
528 int rc;
529
530 LogFlow(("PGMInvalidatePage: GCPtrPage=%VGv\n", GCPtrPage));
531
532 /** @todo merge PGMGCInvalidatePage with this one */
533
534#ifndef IN_RING3
535 /*
536 * Notify the recompiler so it can record this instruction.
537 * Failure happens when it's out of space. We'll return to HC in that case.
538 */
539 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
540 if (VBOX_FAILURE(rc))
541 return rc;
542#endif
543
544 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
545 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
546 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
547
548#ifndef IN_RING0
549 /*
550 * Check if we have a pending update of the CR3 monitoring.
551 */
552 if ( VBOX_SUCCESS(rc)
553 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
554 {
555 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
556 Assert(!pVM->pgm.s.fMappingsFixed);
557 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
558 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
559 }
560#endif
561
562#ifdef IN_RING3
563 /*
564 * Inform CSAM about the flush
565 */
566 /** @note this is to check if monitored pages have been changed; when we implement callbacks for virtual handlers, this is no longer required. */
567 CSAMR3FlushPage(pVM, GCPtrPage);
568#endif
569 return rc;
570}
571#endif
572
573
574/**
575 * Executes an instruction using the interpreter.
576 *
577 * @returns VBox status code (appropriate for trap handling and GC return).
578 * @param pVM VM handle.
579 * @param pRegFrame Register frame.
580 * @param pvFault Fault address.
581 */
582PGMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
583{
584#ifdef IN_RING0
585 /** @todo */
586 int rc = VINF_EM_RAW_EMULATE_INSTR;
587#else
588 uint32_t cb;
589 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
590 if (rc == VERR_EM_INTERPRETER)
591 rc = VINF_EM_RAW_EMULATE_INSTR;
592 if (rc != VINF_SUCCESS)
593 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%VGv)\n", rc, pvFault));
594#endif
595 return rc;
596}
597
598
599/**
600 * Gets effective page information (from the VMM page directory).
601 *
602 * @returns VBox status.
603 * @param pVM VM Handle.
604 * @param GCPtr Guest Context virtual address of the page.
605 * @param pfFlags Where to store the flags. These are X86_PTE_*.
606 * @param pHCPhys Where to store the HC physical address of the page.
607 * This is page aligned.
608 * @remark You should use PGMMapGetPage() for pages in a mapping.
609 */
610PGMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
611{
612 return PGM_SHW_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pHCPhys);
613}
614
615
616/**
617 * Sets (replaces) the page flags for a range of pages in the shadow context.
618 *
619 * @returns VBox status.
620 * @param pVM VM handle.
621 * @param GCPtr The address of the first page.
622 * @param cb The size of the range in bytes.
623 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
624 * @remark You must use PGMMapSetPage() for pages in a mapping.
625 */
626PGMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
627{
628 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
629}
630
631
632/**
633 * Modify page flags for a range of pages in the shadow context.
634 *
635 * The existing flags are ANDed with the fMask and ORed with the fFlags.
636 *
637 * @returns VBox status code.
638 * @param pVM VM handle.
639 * @param GCPtr Virtual address of the first page in the range.
640 * @param cb Size (in bytes) of the range to apply the modification to.
641 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
642 * @param fMask The AND mask - page flags X86_PTE_*.
643 * Be very CAREFUL when ~'ing constants which could be 32-bit!
644 * @remark You must use PGMMapModifyPage() for pages in a mapping.
645 */
646PGMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
647{
648 /*
649 * Validate input.
650 */
651 if (fFlags & X86_PTE_PAE_PG_MASK)
652 {
653 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
654 return VERR_INVALID_PARAMETER;
655 }
656 if (!cb)
657 {
658 AssertFailed();
659 return VERR_INVALID_PARAMETER;
660 }
661
662 /*
663 * Align the input.
664 */
665 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
666 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
667 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
668
669 /*
670 * Call worker.
671 */
672 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
673}
674
675
676/**
677 * Gets effective Guest OS page information.
678 *
679 * When GCPtr is in a big page, the function will return as if it was a normal
680 * 4KB page. If the need for distinguishing between big and normal page becomes
681 * necessary at a later point, a PGMGstGetPage() will be created for that
682 * purpose.
683 *
684 * @returns VBox status.
685 * @param pVM VM Handle.
686 * @param GCPtr Guest Context virtual address of the page.
687 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
688 * @param pGCPhys Where to store the GC physical address of the page.
689 * This is page aligned. The fact that the
690 */
691PGMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
692{
693 return PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pGCPhys);
694}
695
696
697/**
698 * Checks if the page is present.
699 *
700 * @returns true if the page is present.
701 * @returns false if the page is not present.
702 * @param pVM The VM handle.
703 * @param GCPtr Address within the page.
704 */
705PGMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
706{
707 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
708 return VBOX_SUCCESS(rc);
709}
710
711
712/**
713 * Sets (replaces) the page flags for a range of pages in the guest's tables.
714 *
715 * @returns VBox status.
716 * @param pVM VM handle.
717 * @param GCPtr The address of the first page.
718 * @param cb The size of the range in bytes.
719 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
720 */
721PGMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
722{
723 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
724}
725
726
727/**
728 * Modify page flags for a range of pages in the guest's tables
729 *
730 * The existing flags are ANDed with the fMask and ORed with the fFlags.
731 *
732 * @returns VBox status code.
733 * @param pVM VM handle.
734 * @param GCPtr Virtual address of the first page in the range.
735 * @param cb Size (in bytes) of the range to apply the modification to.
736 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
737 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
738 * Be very CAREFUL when ~'ing constants which could be 32-bit!
739 */
740PGMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
741{
742 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
743
744 /*
745 * Validate input.
746 */
747 if (fFlags & X86_PTE_PAE_PG_MASK)
748 {
749 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
750 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
751 return VERR_INVALID_PARAMETER;
752 }
753
754 if (!cb)
755 {
756 AssertFailed();
757 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
758 return VERR_INVALID_PARAMETER;
759 }
760
761 LogFlow(("PGMGstModifyPage %VGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
762
763 /*
764 * Adjust input.
765 */
766 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
767 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
768 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
769
770 /*
771 * Call worker.
772 */
773 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
774
775 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
776 return rc;
777}
778
779
780/**
781 * Temporarily turns off the access monitoring of a page within a monitored
782 * physical write/all page access handler region.
783 *
784 * Use this when no further \#PFs are required for that page. Be aware that
785 * a page directory sync might reset the flags, and turn on access monitoring
786 * for the page.
787 *
788 * The caller must do required page table modifications.
789 *
790 * @returns VBox status code.
791 * @param pVM VM Handle
792 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
793 * @param GCPhysPage Physical address of the page to turn off access monitoring for.
794 */
795PGMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
796{
797 /*
798 * Validate the range.
799 */
800 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
801 if (pCur)
802 {
803 if ( GCPhysPage >= pCur->Core.Key
804 && GCPhysPage <= pCur->Core.KeyLast)
805 {
806 /*
807 * Ok, check that the type is right and then clear the flag.
808 */
809 unsigned fFlag;
810 switch (pCur->enmType)
811 {
812 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
813 fFlag = MM_RAM_FLAGS_PHYSICAL_WRITE;
814 break;
815
816 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
817 fFlag = MM_RAM_FLAGS_PHYSICAL_ALL;
818 break;
819
820 case PGMPHYSHANDLERTYPE_MMIO:
821 case PGMPHYSHANDLERTYPE_PHYSICAL:
822 AssertMsgFailed(("Cannot disable an MMIO or natural PHYSICAL access handler! enmType=%d\n", pCur->enmType));
823 return VERR_ACCESS_DENIED;
824
825 default:
826 AssertMsgFailed(("Invalid mapping type %d\n", pCur->enmType));
827 return VERR_INTERNAL_ERROR;
828 }
829
830 /** @todo add a function which does both clear and set! */
831 /* clear and set */
832 PPGMRAMRANGE pHint = NULL;
833 int rc = PGMRamFlagsClearByGCPhysWithHint(&pVM->pgm.s, GCPhysPage, fFlag, &pHint);
834 if (VBOX_SUCCESS(rc))
835 rc = PGMRamFlagsSetByGCPhysWithHint(&pVM->pgm.s, GCPhysPage, MM_RAM_FLAGS_PHYSICAL_TEMP_OFF, &pHint);
836 return rc;
837 }
838 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
839 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
840 return VERR_INVALID_PARAMETER;
841 }
842
843 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
844 return VERR_PGM_HANDLER_NOT_FOUND;
845}
846
847
848/**
849 * Turns access monitoring of a page within a monitored
850 * physical write/all page access handler regio back on.
851 *
852 * The caller must do required page table modifications.
853 *
854 * @returns VBox status code.
855 * @param pVM VM Handle
856 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
857 * @param GCPhysPage Physical address of the page to turn on access monitoring for.
858 */
859PGMDECL(int) PGMHandlerPhysicalPageReset(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
860{
861 /*
862 * Validate the range.
863 */
864 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
865 if (pCur)
866 {
867 if ( GCPhysPage >= pCur->Core.Key
868 && GCPhysPage <= pCur->Core.KeyLast)
869 {
870 /*
871 * Ok, check that the type is right and then clear the flag.
872 */
873 unsigned fFlag;
874 switch (pCur->enmType)
875 {
876 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
877 fFlag = MM_RAM_FLAGS_PHYSICAL_WRITE;
878 break;
879
880 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
881 fFlag = MM_RAM_FLAGS_PHYSICAL_ALL;
882 break;
883
884 case PGMPHYSHANDLERTYPE_MMIO:
885 case PGMPHYSHANDLERTYPE_PHYSICAL:
886 AssertMsgFailed(("Cannot enable an MMIO or natural PHYSICAL access handler! enmType=%d\n", pCur->enmType));
887 return VERR_ACCESS_DENIED;
888
889 default:
890 AssertMsgFailed(("Invalid mapping type %d\n", pCur->enmType));
891 return VERR_INTERNAL_ERROR;
892 }
893
894 /** @todo add a function which does both clear and set! */
895 /* set and clear */
896 PPGMRAMRANGE pHint = NULL;
897 int rc = PGMRamFlagsSetByGCPhysWithHint(&pVM->pgm.s, GCPhysPage, fFlag, &pHint);
898 if (VBOX_SUCCESS(rc))
899 rc = PGMRamFlagsClearByGCPhysWithHint(&pVM->pgm.s, GCPhysPage, MM_RAM_FLAGS_PHYSICAL_TEMP_OFF, &pHint);
900 return rc;
901
902 }
903 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
904 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
905 return VERR_INVALID_PARAMETER;
906 }
907
908 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
909 return VERR_PGM_HANDLER_NOT_FOUND;
910}
911
912
913/**
914 * Checks if a physical range is handled
915 *
916 * @returns boolean
917 * @param pVM VM Handle
918 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
919 */
920PGMDECL(bool) PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys)
921{
922 /*
923 * Find the handler.
924 */
925 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
926 if (pCur)
927 {
928 if ( GCPhys >= pCur->Core.Key
929 && GCPhys <= pCur->Core.KeyLast)
930 {
931 /*
932 * Validate type.
933 */
934 switch (pCur->enmType)
935 {
936 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
937 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
938 case PGMPHYSHANDLERTYPE_PHYSICAL:
939 case PGMPHYSHANDLERTYPE_MMIO:
940 return true;
941
942 default:
943 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCur->enmType));
944 return false;
945 }
946 }
947 }
948
949 return false;
950}
951
952
953#ifdef VBOX_STRICT
954DECLCALLBACK(int) pgmVirtHandlerDumpPhysRange(PAVLROGCPHYSNODECORE pNode, void *pvUser)
955{
956 PPGMPHYS2VIRTHANDLER pCur = (PPGMPHYS2VIRTHANDLER)pNode;
957 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
958 Log(("PHYS2VIRT: Range %VGp-%VGp for virtual handler: %s\n", pCur->Core.Key, pCur->Core.KeyLast, pVirt->pszDesc));
959 return 0;
960}
961
962
963void pgmHandlerVirtualDumpPhysPages(PVM pVM)
964{
965 RTAvlroGCPhysDoWithAll(CTXSUFF(&pVM->pgm.s.pTrees)->PhysToVirtHandlers, true, pgmVirtHandlerDumpPhysRange, 0);
966}
967#endif /* VBOX_STRICT */
968
969
970/**
971 * Gets the current CR3 register value for the shadow memory context.
972 * @returns CR3 value.
973 * @param pVM The VM handle.
974 */
975PGMDECL(uint32_t) PGMGetHyperCR3(PVM pVM)
976{
977 switch (pVM->pgm.s.enmShadowMode)
978 {
979 case PGMMODE_32_BIT:
980 return pVM->pgm.s.HCPhys32BitPD;
981
982 case PGMMODE_PAE:
983 case PGMMODE_PAE_NX:
984 return pVM->pgm.s.HCPhysPaePDPTR;
985
986 case PGMMODE_AMD64:
987 case PGMMODE_AMD64_NX:
988 return pVM->pgm.s.HCPhysPaePML4;
989
990 default:
991 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
992 return ~0;
993 }
994}
995
996
997/**
998 * Gets the CR3 register value for the 32-Bit shadow memory context.
999 * @returns CR3 value.
1000 * @param pVM The VM handle.
1001 */
1002PGMDECL(uint32_t) PGMGetHyper32BitCR3(PVM pVM)
1003{
1004 return pVM->pgm.s.HCPhys32BitPD;
1005}
1006
1007
1008/**
1009 * Gets the CR3 register value for the PAE shadow memory context.
1010 * @returns CR3 value.
1011 * @param pVM The VM handle.
1012 */
1013PGMDECL(uint32_t) PGMGetHyperPaeCR3(PVM pVM)
1014{
1015 return pVM->pgm.s.HCPhysPaePDPTR;
1016}
1017
1018
1019/**
1020 * Gets the CR3 register value for the AMD64 shadow memory context.
1021 * @returns CR3 value.
1022 * @param pVM The VM handle.
1023 */
1024PGMDECL(uint32_t) PGMGetHyperAmd64CR3(PVM pVM)
1025{
1026 return pVM->pgm.s.HCPhysPaePML4;
1027}
1028
1029
1030/**
1031 * Gets the current CR3 register value for the HC intermediate memory context.
1032 * @returns CR3 value.
1033 * @param pVM The VM handle.
1034 */
1035PGMDECL(uint32_t) PGMGetInterHCCR3(PVM pVM)
1036{
1037 switch (pVM->pgm.s.enmHostMode)
1038 {
1039 case SUPPAGINGMODE_32_BIT:
1040 case SUPPAGINGMODE_32_BIT_GLOBAL:
1041 return pVM->pgm.s.HCPhysInterPD;
1042
1043 case SUPPAGINGMODE_PAE:
1044 case SUPPAGINGMODE_PAE_GLOBAL:
1045 case SUPPAGINGMODE_PAE_NX:
1046 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1047 return pVM->pgm.s.HCPhysInterPaePDPTR;
1048
1049 case SUPPAGINGMODE_AMD64:
1050 case SUPPAGINGMODE_AMD64_GLOBAL:
1051 case SUPPAGINGMODE_AMD64_NX:
1052 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1053 return pVM->pgm.s.HCPhysInterPaePDPTR;
1054
1055 default:
1056 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1057 return ~0;
1058 }
1059}
1060
1061
1062/**
1063 * Gets the current CR3 register value for the GC intermediate memory context.
1064 * @returns CR3 value.
1065 * @param pVM The VM handle.
1066 */
1067PGMDECL(uint32_t) PGMGetInterGCCR3(PVM pVM)
1068{
1069 switch (pVM->pgm.s.enmShadowMode)
1070 {
1071 case PGMMODE_32_BIT:
1072 return pVM->pgm.s.HCPhysInterPD;
1073
1074 case PGMMODE_PAE:
1075 case PGMMODE_PAE_NX:
1076 return pVM->pgm.s.HCPhysInterPaePDPTR;
1077
1078 case PGMMODE_AMD64:
1079 case PGMMODE_AMD64_NX:
1080 return pVM->pgm.s.HCPhysInterPaePML4;
1081
1082 default:
1083 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
1084 return ~0;
1085 }
1086}
1087
1088
1089/**
1090 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1091 * @returns CR3 value.
1092 * @param pVM The VM handle.
1093 */
1094PGMDECL(uint32_t) PGMGetInter32BitCR3(PVM pVM)
1095{
1096 return pVM->pgm.s.HCPhysInterPD;
1097}
1098
1099
1100/**
1101 * Gets the CR3 register value for the PAE intermediate memory context.
1102 * @returns CR3 value.
1103 * @param pVM The VM handle.
1104 */
1105PGMDECL(uint32_t) PGMGetInterPaeCR3(PVM pVM)
1106{
1107 return pVM->pgm.s.HCPhysInterPaePDPTR;
1108}
1109
1110
1111/**
1112 * Gets the CR3 register value for the AMD64 intermediate memory context.
1113 * @returns CR3 value.
1114 * @param pVM The VM handle.
1115 */
1116PGMDECL(uint32_t) PGMGetInterAmd64CR3(PVM pVM)
1117{
1118 return pVM->pgm.s.HCPhysInterPaePML4;
1119}
1120
1121
1122/**
1123 * Performs and schedules necessary updates following a CR3 load or reload.
1124 *
1125 * This will normally involve mapping the guest PD or nPDPTR
1126 *
1127 * @returns VBox status code.
1128 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1129 * safely be ignored and overridden since the FF will be set too then.
1130 * @param pVM VM handle.
1131 * @param cr3 The new cr3.
1132 * @param fGlobal Indicates whether this is a global flush or not.
1133 */
1134PGMDECL(int) PGMFlushTLB(PVM pVM, uint32_t cr3, bool fGlobal)
1135{
1136 /*
1137 * Always flag the necessary updates; necessary for hardware acceleration
1138 */
1139 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1140 if (fGlobal)
1141 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1142
1143 /*
1144 * When in real or protected mode there is no TLB flushing, but
1145 * we may still be called because of REM not caring/knowing this.
1146 * REM is simple and we wish to keep it that way.
1147 */
1148 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1149 return VINF_SUCCESS;
1150 LogFlow(("PGMFlushTLB: cr3=%#x OldCr3=%#x fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
1151 STAM_PROFILE_START(&pVM->pgm.s.StatFlushTLB, a);
1152
1153 /*
1154 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1155 */
1156 int rc = VINF_SUCCESS;
1157 RTGCPHYS GCPhysCR3;
1158 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1159 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1160 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1161 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1162 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1163 else
1164 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1165 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1166 {
1167 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1168 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1169 if (VBOX_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
1170 {
1171 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1172 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1173 }
1174 if (fGlobal)
1175 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3Global);
1176 else
1177 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3);
1178 }
1179 else
1180 {
1181 /*
1182 * Check if we have a pending update of the CR3 monitoring.
1183 */
1184 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1185 {
1186 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1187 Assert(!pVM->pgm.s.fMappingsFixed);
1188 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1189 }
1190 if (fGlobal)
1191 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3Global);
1192 else
1193 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3);
1194 }
1195
1196 STAM_PROFILE_STOP(&pVM->pgm.s.StatFlushTLB, a);
1197 return rc;
1198}
1199
1200
1201/**
1202 * Synchronize the paging structures.
1203 *
1204 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1205 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1206 * in several places, most importantly whenever the CR3 is loaded.
1207 *
1208 * @returns VBox status code.
1209 * @param pVM The virtual machine.
1210 * @param cr0 Guest context CR0 register
1211 * @param cr3 Guest context CR3 register
1212 * @param cr4 Guest context CR4 register
1213 * @param fGlobal Including global page directories or not
1214 */
1215PGMDECL(int) PGMSyncCR3(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal)
1216{
1217 /*
1218 * We might be called when we shouldn't.
1219 *
1220 * The mode switching will ensure that the PD is resynced
1221 * after every mode switch. So, if we find ourselves here
1222 * when in protected or real mode we can safely disable the
1223 * FF and return immediately.
1224 */
1225 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1226 {
1227 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1228 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1229 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1230 return VINF_SUCCESS;
1231 }
1232
1233 /* If global pages are not supported, then all flushes are global */
1234 if (!(cr4 & X86_CR4_PGE))
1235 fGlobal = true;
1236 LogFlow(("PGMSyncCR3: cr0=%08x cr3=%08x cr4=%08x fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1237 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1238
1239 /*
1240 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1241 */
1242 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1243 int rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1244 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1245 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%VRc\n", rc));
1246 if (rc == VINF_SUCCESS)
1247 {
1248 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1249 {
1250 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1251 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1252 }
1253
1254 /*
1255 * Check if we have a pending update of the CR3 monitoring.
1256 */
1257 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1258 {
1259 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1260 Assert(!pVM->pgm.s.fMappingsFixed);
1261 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1262 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1263 }
1264 }
1265
1266 /*
1267 * Now flush the CR3 (guest context).
1268 */
1269 if (rc == VINF_SUCCESS)
1270 PGM_INVL_GUEST_TLBS();
1271 return rc;
1272}
1273
1274
1275/**
1276 * Called whenever CR0 or CR4 in a way which may change
1277 * the paging mode.
1278 *
1279 * @returns VBox status code fit for scheduling in GC and R0.
1280 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1281 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1282 * @param pVM VM handle.
1283 * @param cr0 The new cr0.
1284 * @param cr4 The new cr4.
1285 * @param efer The new extended feature enable register.
1286 */
1287PGMDECL(int) PGMChangeMode(PVM pVM, uint32_t cr0, uint32_t cr4, uint64_t efer)
1288{
1289 PGMMODE enmGuestMode;
1290
1291 /*
1292 * Calc the new guest mode.
1293 */
1294 if (!(cr0 & X86_CR0_PE))
1295 enmGuestMode = PGMMODE_REAL;
1296 else if (!(cr0 & X86_CR0_PG))
1297 enmGuestMode = PGMMODE_PROTECTED;
1298 else if (!(cr4 & X86_CR4_PAE))
1299 enmGuestMode = PGMMODE_32_BIT;
1300 else if (!(efer & MSR_K6_EFER_LME))
1301 {
1302 if (!(efer & MSR_K6_EFER_NXE))
1303 enmGuestMode = PGMMODE_PAE;
1304 else
1305 enmGuestMode = PGMMODE_PAE_NX;
1306 }
1307 else
1308 {
1309 if (!(efer & MSR_K6_EFER_NXE))
1310 enmGuestMode = PGMMODE_AMD64;
1311 else
1312 enmGuestMode = PGMMODE_AMD64_NX;
1313 }
1314
1315 /*
1316 * Did it change?
1317 */
1318 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1319 return VINF_SUCCESS;
1320#ifdef IN_RING3
1321 return pgmR3ChangeMode(pVM, enmGuestMode);
1322#else
1323 Log(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1324 return VINF_PGM_CHANGE_MODE;
1325#endif
1326}
1327
1328
1329/**
1330 * Gets the current guest paging mode.
1331 *
1332 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1333 *
1334 * @returns The current paging mode.
1335 * @param pVM The VM handle.
1336 */
1337PGMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1338{
1339 return pVM->pgm.s.enmGuestMode;
1340}
1341
1342
1343/**
1344 * Gets the current shadow paging mode.
1345 *
1346 * @returns The current paging mode.
1347 * @param pVM The VM handle.
1348 */
1349PGMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1350{
1351 return pVM->pgm.s.enmShadowMode;
1352}
1353
1354
1355/**
1356 * Get mode name.
1357 *
1358 * @returns read-only name string.
1359 * @param enmMode The mode which name is desired.
1360 */
1361PGMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1362{
1363 switch (enmMode)
1364 {
1365 case PGMMODE_REAL: return "real";
1366 case PGMMODE_PROTECTED: return "protected";
1367 case PGMMODE_32_BIT: return "32-bit";
1368 case PGMMODE_PAE: return "PAE";
1369 case PGMMODE_PAE_NX: return "PAE+NX";
1370 case PGMMODE_AMD64: return "AMD64";
1371 case PGMMODE_AMD64_NX: return "AMD64+NX";
1372 default: return "unknown mode value";
1373 }
1374}
1375
1376
1377/**
1378 * Acquire the PGM lock.
1379 *
1380 * @returns VBox status code
1381 * @param pVM The VM to operate on.
1382 */
1383int pgmLock(PVM pVM)
1384{
1385 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1386#ifdef IN_GC
1387 if (rc == VERR_SEM_BUSY)
1388 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1389#elif defined(IN_RING0)
1390 if (rc == VERR_SEM_BUSY)
1391 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1392#endif
1393 AssertRC(rc);
1394 return rc;
1395}
1396
1397
1398/**
1399 * Release the PGM lock.
1400 *
1401 * @returns VBox status code
1402 * @param pVM The VM to operate on.
1403 */
1404void pgmUnlock(PVM pVM)
1405{
1406 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1407}
1408
1409
1410#ifdef VBOX_STRICT
1411
1412/**
1413 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1414 * and its AVL enumerators.
1415 */
1416typedef struct PGMAHAFIS
1417{
1418 /** The VM handle. */
1419 PVM pVM;
1420 /** Number of errors. */
1421 unsigned cErrors;
1422 /** The flags we've found. */
1423 unsigned fFlagsFound;
1424 /** The flags we're matching up to.
1425 * This is also on the stack as a const, thus only valid during enumeration. */
1426 unsigned fFlags;
1427 /** The current physical address. */
1428 RTGCPHYS GCPhys;
1429} PGMAHAFIS, *PPGMAHAFIS;
1430
1431/**
1432 * Verify virtual handler by matching physical address.
1433 *
1434 * @returns 0
1435 * @param pNode Pointer to a PGMVIRTHANDLER.
1436 * @param pvUser Pointer to user parameter.
1437 */
1438static DECLCALLBACK(int) pgmVirtHandlerVerifyOneByPhysAddr(PAVLROGCPTRNODECORE pNode, void *pvUser)
1439{
1440 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1441 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1442
1443 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1444 {
1445 if ((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == pState->GCPhys)
1446 {
1447 switch (pCur->enmType)
1448 {
1449 case PGMVIRTHANDLERTYPE_EIP:
1450 case PGMVIRTHANDLERTYPE_NORMAL: pState->fFlagsFound |= MM_RAM_FLAGS_VIRTUAL_HANDLER; break;
1451 case PGMVIRTHANDLERTYPE_WRITE: pState->fFlagsFound |= MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_WRITE; break;
1452 case PGMVIRTHANDLERTYPE_ALL: pState->fFlagsFound |= MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_ALL; break;
1453 /* hypervisor handlers need no flags and wouldn't have nowhere to put them in any case. */
1454 case PGMVIRTHANDLERTYPE_HYPERVISOR:
1455 return 0;
1456 }
1457 if ( (pState->fFlags & (MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_WRITE | MM_RAM_FLAGS_VIRTUAL_ALL))
1458 == pState->fFlagsFound)
1459 break;
1460 }
1461 }
1462 return 0;
1463}
1464
1465
1466/**
1467 * Verify a virtual handler.
1468 *
1469 * @returns 0
1470 * @param pNode Pointer to a PGMVIRTHANDLER.
1471 * @param pvUser Pointer to user parameter.
1472 */
1473static DECLCALLBACK(int) pgmVirtHandlerVerifyOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1474{
1475 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)pNode;
1476 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1477 PVM pVM = pState->pVM;
1478
1479 if ( pVirt->aPhysToVirt[0].Core.Key != NIL_RTGCPHYS
1480 && (pVirt->aPhysToVirt[0].Core.Key & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->GCPtr & PAGE_OFFSET_MASK))
1481 {
1482 AssertMsgFailed(("virt handler phys out has incorrect key! %VGp %VGv %s\n",
1483 pVirt->aPhysToVirt[0].Core.Key, pVirt->GCPtr, HCSTRING(pVirt->pszDesc)));
1484 pState->cErrors++;
1485 }
1486
1487 /*
1488 * Calc flags.
1489 */
1490 unsigned fFlags;
1491 switch (pVirt->enmType)
1492 {
1493 case PGMVIRTHANDLERTYPE_EIP:
1494 case PGMVIRTHANDLERTYPE_NORMAL: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER; break;
1495 case PGMVIRTHANDLERTYPE_WRITE: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_WRITE; break;
1496 case PGMVIRTHANDLERTYPE_ALL: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_ALL; break;
1497 /* hypervisor handlers need no flags and wouldn't have nowhere to put them in any case. */
1498 case PGMVIRTHANDLERTYPE_HYPERVISOR:
1499 return 0;
1500 default:
1501 AssertMsgFailed(("unknown enmType=%d\n", pVirt->enmType));
1502 return 0;
1503 }
1504
1505 /*
1506 * Check pages against flags.
1507 */
1508 RTGCUINTPTR GCPtr = (RTGCUINTPTR)pVirt->GCPtr;
1509 for (unsigned iPage = 0; iPage < pVirt->cPages; iPage++, GCPtr += PAGE_SIZE)
1510 {
1511 RTGCPHYS GCPhysGst;
1512 uint64_t fGst;
1513 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, &fGst, &GCPhysGst);
1514 if (rc == VERR_PAGE_NOT_PRESENT)
1515 {
1516 if (pVirt->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
1517 {
1518 AssertMsgFailed(("virt handler phys out of sync. %VGp GCPhysNew=~0 iPage=%#x %VGv %s\n",
1519 pVirt->aPhysToVirt[iPage].Core.Key, iPage, GCPtr, HCSTRING(pVirt->pszDesc)));
1520 pState->cErrors++;
1521 }
1522 continue;
1523 }
1524
1525 AssertRCReturn(rc, 0);
1526 if ((pVirt->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) != GCPhysGst)
1527 {
1528 AssertMsgFailed(("virt handler phys out of sync. %VGp GCPhysGst=%VGp iPage=%#x %VGv %s\n",
1529 pVirt->aPhysToVirt[iPage].Core.Key, GCPhysGst, iPage, GCPtr, HCSTRING(pVirt->pszDesc)));
1530 pState->cErrors++;
1531 continue;
1532 }
1533
1534 RTHCPHYS HCPhys;
1535 rc = PGMRamGCPhys2HCPhysWithFlags(&pVM->pgm.s, GCPhysGst, &HCPhys);
1536 if (VBOX_FAILURE(rc))
1537 {
1538 AssertMsgFailed(("virt handler getting ram flags rc=%Vrc. GCPhysGst=%VGp iPage=%#x %VGv %s\n",
1539 rc, GCPhysGst, iPage, GCPtr, HCSTRING(pVirt->pszDesc)));
1540 pState->cErrors++;
1541 continue;
1542 }
1543
1544 if ((HCPhys & fFlags) != fFlags)
1545 {
1546 AssertMsgFailed(("virt handler flags mismatch. HCPhys=%VHp fFlags=%#x GCPhysGst=%VGp iPage=%#x %VGv %s\n",
1547 HCPhys, fFlags, GCPhysGst, iPage, GCPtr, HCSTRING(pVirt->pszDesc)));
1548 pState->cErrors++;
1549 continue;
1550 }
1551 } /* for pages in virtual mapping. */
1552
1553 return 0;
1554}
1555
1556
1557/**
1558 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1559 * that the physical addresses associated with virtual handlers are correct.
1560 *
1561 * @returns Number of mismatches.
1562 * @param pVM The VM handle.
1563 */
1564PGMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVM pVM)
1565{
1566 PPGM pPGM = &pVM->pgm.s;
1567 PGMAHAFIS State;
1568 State.cErrors = 0;
1569 State.pVM = pVM;
1570
1571 /*
1572 * Check the RAM flags against the handlers.
1573 */
1574 for (PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges); pRam; pRam = CTXSUFF(pRam->pNext))
1575 {
1576 const unsigned cPages = pRam->cb >> PAGE_SHIFT;
1577 for (unsigned iPage = 0; iPage < cPages; iPage++)
1578 {
1579 State.GCPhys = pRam->GCPhys + (iPage << PAGE_SHIFT);
1580 const unsigned fFlags = pRam->aHCPhys[iPage]
1581 & ( MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_WRITE | MM_RAM_FLAGS_VIRTUAL_ALL
1582 | MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_PHYSICAL_TEMP_OFF);
1583 if (fFlags)
1584 {
1585 State.fFlagsFound = 0; /* build flags and compare. */
1586
1587 /* physical first. (simple because of page alignment) */
1588 if ( !(fFlags & MM_RAM_FLAGS_PHYSICAL_TEMP_OFF)
1589 && (fFlags & (MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL)))
1590 {
1591 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pPGM->CTXSUFF(pTrees)->PhysHandlers, State.GCPhys);
1592 if (!pPhys)
1593 {
1594 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTXSUFF(pTrees)->PhysHandlers, State.GCPhys, true);
1595 if ( pPhys
1596 && pPhys->Core.Key > (State.GCPhys + PAGE_SIZE - 1))
1597 pPhys = NULL;
1598 Assert(!pPhys || pPhys->Core.Key >= State.GCPhys);
1599 }
1600 if (pPhys)
1601 {
1602 switch (pPhys->enmType)
1603 {
1604 case PGMPHYSHANDLERTYPE_PHYSICAL: State.fFlagsFound |= MM_RAM_FLAGS_PHYSICAL_HANDLER; break;
1605 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE: State.fFlagsFound |= MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_WRITE; break;
1606 case PGMPHYSHANDLERTYPE_MMIO:
1607 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL: State.fFlagsFound |= MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_ALL; break;
1608 default: AssertMsgFailed(("Invalid type phys type %d\n", pPhys->enmType)); State.cErrors++; break;
1609 }
1610 if ( (fFlags & (MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL))
1611 != State.fFlagsFound)
1612 {
1613 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%#x fFlags=%#x fFlagsFound=%#x %s\n",
1614 State.GCPhys, fFlags, State.fFlagsFound, pPhys->pszDesc));
1615 State.cErrors++;
1616 }
1617
1618#ifdef IN_RING3
1619 /* validate that REM is handling it. */
1620 if (!REMR3IsPageAccessHandled(pVM, State.GCPhys))
1621 {
1622 AssertMsgFailed(("ram range vs phys handler REM mismatch. GCPhys=%#x fFlags=%#x %s\n",
1623 State.GCPhys, fFlags, pPhys->pszDesc));
1624 State.cErrors++;
1625 }
1626#endif
1627 }
1628 else
1629 {
1630 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%#x\n", State.GCPhys));
1631 State.cErrors++;
1632 }
1633 }
1634
1635 /* virtual flags. */
1636 if (fFlags & (MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_WRITE | MM_RAM_FLAGS_VIRTUAL_ALL))
1637 {
1638 State.fFlags = fFlags;
1639 RTAvlroGCPtrDoWithAll(CTXSUFF(&pVM->pgm.s.pTrees)->VirtHandlers, true, pgmVirtHandlerVerifyOneByPhysAddr, &State);
1640 if ( (fFlags & (MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_WRITE | MM_RAM_FLAGS_VIRTUAL_ALL))
1641 != State.fFlagsFound)
1642 {
1643 AssertMsgFailed(("ram range vs virt handler flags mismatch. GCPhys=%#x fFlags=%#x fFlagsFound=%#x\n",
1644 State.GCPhys, fFlags, State.fFlagsFound));
1645 State.cErrors++;
1646 }
1647
1648 }
1649 }
1650 } /* foreach page in ram range. */
1651 } /* foreach ram range. */
1652
1653 /*
1654 * Check that the physical addresses of the virtual handlers matches up.
1655 */
1656 RTAvlroGCPtrDoWithAll(CTXSUFF(&pVM->pgm.s.pTrees)->VirtHandlers, true, pgmVirtHandlerVerifyOne, &State);
1657
1658 return State.cErrors;
1659}
1660
1661
1662/**
1663 * Asserts that there are no mapping conflicts.
1664 *
1665 * @returns Number of conflicts.
1666 * @param pVM The VM Handle.
1667 */
1668PGMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
1669{
1670 unsigned cErrors = 0;
1671
1672 /*
1673 * Check for mapping conflicts.
1674 */
1675 for (PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
1676 pMapping;
1677 pMapping = CTXALLSUFF(pMapping->pNext))
1678 {
1679 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
1680 for (RTGCUINTPTR GCPtr = (RTGCUINTPTR)pMapping->GCPtr;
1681 GCPtr <= (RTGCUINTPTR)pMapping->GCPtrLast;
1682 GCPtr += PAGE_SIZE)
1683 {
1684 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
1685 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
1686 {
1687 AssertMsgFailed(("Conflict at %VGv with %s\n", GCPtr, HCSTRING(pMapping->pszDesc)));
1688 cErrors++;
1689 break;
1690 }
1691 }
1692 }
1693
1694 return cErrors;
1695}
1696
1697
1698/**
1699 * Asserts that everything related to the guest CR3 is correctly shadowed.
1700 *
1701 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
1702 * and assert the correctness of the guest CR3 mapping before asserting that the
1703 * shadow page tables is in sync with the guest page tables.
1704 *
1705 * @returns Number of conflicts.
1706 * @param pVM The VM Handle.
1707 * @param cr3 The current guest CR3 register value.
1708 * @param cr4 The current guest CR4 register value.
1709 */
1710PGMDECL(unsigned) PGMAssertCR3(PVM pVM, uint32_t cr3, uint32_t cr4)
1711{
1712 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1713 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCUINTPTR)0);
1714 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1715 return cErrors;
1716}
1717
1718#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette