VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 8098

Last change on this file since 8098 was 7907, checked in by vboxsync, 17 years ago

LogFlow update for 64 bits crx registers

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 37.3 KB
Line 
1/* $Id: PGMAll.cpp 7907 2008-04-11 11:26:07Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/pgm.h>
23#include <VBox/cpum.h>
24#include <VBox/selm.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/csam.h>
30#include <VBox/patm.h>
31#include <VBox/trpm.h>
32#include <VBox/rem.h>
33#include <VBox/em.h>
34#include <VBox/hwaccm.h>
35#include "PGMInternal.h"
36#include <VBox/vm.h>
37#include <iprt/assert.h>
38#include <iprt/asm.h>
39#include <iprt/string.h>
40#include <VBox/log.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43
44
45/*******************************************************************************
46* Structures and Typedefs *
47*******************************************************************************/
48/**
49 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
50 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
51 */
52typedef struct PGMHVUSTATE
53{
54 /** The VM handle. */
55 PVM pVM;
56 /** The todo flags. */
57 RTUINT fTodo;
58 /** The CR4 register value. */
59 uint32_t cr4;
60} PGMHVUSTATE, *PPGMHVUSTATE;
61
62
63/*******************************************************************************
64* Internal Functions *
65*******************************************************************************/
66
67#if 1///@todo ndef RT_ARCH_AMD64
68/*
69 * Shadow - 32-bit mode
70 */
71#define PGM_SHW_TYPE PGM_TYPE_32BIT
72#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
73#include "PGMAllShw.h"
74
75/* Guest - real mode */
76#define PGM_GST_TYPE PGM_TYPE_REAL
77#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
78#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
79#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
80#include "PGMAllGst.h"
81#include "PGMAllBth.h"
82#undef BTH_PGMPOOLKIND_PT_FOR_PT
83#undef PGM_BTH_NAME
84#undef PGM_GST_TYPE
85#undef PGM_GST_NAME
86
87/* Guest - protected mode */
88#define PGM_GST_TYPE PGM_TYPE_PROT
89#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
90#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
91#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
92#include "PGMAllGst.h"
93#include "PGMAllBth.h"
94#undef BTH_PGMPOOLKIND_PT_FOR_PT
95#undef PGM_BTH_NAME
96#undef PGM_GST_TYPE
97#undef PGM_GST_NAME
98
99/* Guest - 32-bit mode */
100#define PGM_GST_TYPE PGM_TYPE_32BIT
101#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
102#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
103#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
104#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
105#include "PGMAllGst.h"
106#include "PGMAllBth.h"
107#undef BTH_PGMPOOLKIND_PT_FOR_BIG
108#undef BTH_PGMPOOLKIND_PT_FOR_PT
109#undef PGM_BTH_NAME
110#undef PGM_GST_TYPE
111#undef PGM_GST_NAME
112
113#undef PGM_SHW_TYPE
114#undef PGM_SHW_NAME
115#endif /* !RT_ARCH_AMD64 */
116
117
118/*
119 * Shadow - PAE mode
120 */
121#define PGM_SHW_TYPE PGM_TYPE_PAE
122#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
123#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
124#include "PGMAllShw.h"
125
126/* Guest - real mode */
127#define PGM_GST_TYPE PGM_TYPE_REAL
128#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
129#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
130#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
131#include "PGMAllBth.h"
132#undef BTH_PGMPOOLKIND_PT_FOR_PT
133#undef PGM_BTH_NAME
134#undef PGM_GST_TYPE
135#undef PGM_GST_NAME
136
137/* Guest - protected mode */
138#define PGM_GST_TYPE PGM_TYPE_PROT
139#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
140#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
141#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
142#include "PGMAllBth.h"
143#undef BTH_PGMPOOLKIND_PT_FOR_PT
144#undef PGM_BTH_NAME
145#undef PGM_GST_TYPE
146#undef PGM_GST_NAME
147
148/* Guest - 32-bit mode */
149#define PGM_GST_TYPE PGM_TYPE_32BIT
150#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
151#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
152#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
153#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
154#include "PGMAllBth.h"
155#undef BTH_PGMPOOLKIND_PT_FOR_BIG
156#undef BTH_PGMPOOLKIND_PT_FOR_PT
157#undef PGM_BTH_NAME
158#undef PGM_GST_TYPE
159#undef PGM_GST_NAME
160
161
162/* Guest - PAE mode */
163#define PGM_GST_TYPE PGM_TYPE_PAE
164#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
165#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
166#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
167#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
168#include "PGMAllGst.h"
169#include "PGMAllBth.h"
170#undef BTH_PGMPOOLKIND_PT_FOR_BIG
171#undef BTH_PGMPOOLKIND_PT_FOR_PT
172#undef PGM_BTH_NAME
173#undef PGM_GST_TYPE
174#undef PGM_GST_NAME
175
176#undef PGM_SHW_TYPE
177#undef PGM_SHW_NAME
178
179
180/*
181 * Shadow - AMD64 mode
182 */
183#define PGM_SHW_TYPE PGM_TYPE_AMD64
184#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
185#include "PGMAllShw.h"
186
187/* Guest - AMD64 mode */
188#define PGM_GST_TYPE PGM_TYPE_AMD64
189#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
190#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
191#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
192#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
193#include "PGMAllGst.h"
194#include "PGMAllBth.h"
195#undef BTH_PGMPOOLKIND_PT_FOR_BIG
196#undef BTH_PGMPOOLKIND_PT_FOR_PT
197#undef PGM_BTH_NAME
198#undef PGM_GST_TYPE
199#undef PGM_GST_NAME
200
201#undef PGM_SHW_TYPE
202#undef PGM_SHW_NAME
203
204
205
206/**
207 * #PF Handler.
208 *
209 * @returns VBox status code (appropriate for trap handling and GC return).
210 * @param pVM VM Handle.
211 * @param uErr The trap error code.
212 * @param pRegFrame Trap register frame.
213 * @param pvFault The fault address.
214 */
215PGMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
216{
217 LogFlow(("PGMTrap0eHandler: uErr=%#x pvFault=%VGv eip=%VGv\n", uErr, pvFault, pRegFrame->eip));
218 STAM_PROFILE_START(&pVM->pgm.s.StatGCTrap0e, a);
219 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = NULL; } );
220
221
222#ifdef VBOX_WITH_STATISTICS
223 /*
224 * Error code stats.
225 */
226 if (uErr & X86_TRAP_PF_US)
227 {
228 if (!(uErr & X86_TRAP_PF_P))
229 {
230 if (uErr & X86_TRAP_PF_RW)
231 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentWrite);
232 else
233 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentRead);
234 }
235 else if (uErr & X86_TRAP_PF_RW)
236 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSWrite);
237 else if (uErr & X86_TRAP_PF_RSVD)
238 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSReserved);
239 else if (uErr & X86_TRAP_PF_ID)
240 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNXE);
241 else
242 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSRead);
243 }
244 else
245 { /* Supervisor */
246 if (!(uErr & X86_TRAP_PF_P))
247 {
248 if (uErr & X86_TRAP_PF_RW)
249 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentWrite);
250 else
251 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentRead);
252 }
253 else if (uErr & X86_TRAP_PF_RW)
254 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVWrite);
255 else if (uErr & X86_TRAP_PF_ID)
256 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSNXE);
257 else if (uErr & X86_TRAP_PF_RSVD)
258 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVReserved);
259 }
260#endif
261
262 /*
263 * Call the worker.
264 */
265 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
266 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
267 rc = VINF_SUCCESS;
268 STAM_STATS({ if (!pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution))
269 pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eMisc; });
270 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatGCTrap0e, pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution), a);
271 return rc;
272}
273
274
275/**
276 * Prefetch a page
277 *
278 * Typically used to sync commonly used pages before entering raw mode
279 * after a CR3 reload.
280 *
281 * @returns VBox status code suitable for scheduling.
282 * @retval VINF_SUCCESS on success.
283 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
284 * @param pVM VM handle.
285 * @param GCPtrPage Page to invalidate.
286 */
287PGMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
288{
289 STAM_PROFILE_START(&pVM->pgm.s.StatHCPrefetch, a);
290 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, (RTGCUINTPTR)GCPtrPage);
291 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCPrefetch, a);
292 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%Vrc\n", rc));
293 return rc;
294}
295
296
297/**
298 * Gets the mapping corresponding to the specified address (if any).
299 *
300 * @returns Pointer to the mapping.
301 * @returns NULL if not
302 *
303 * @param pVM The virtual machine.
304 * @param GCPtr The guest context pointer.
305 */
306PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
307{
308 PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
309 while (pMapping)
310 {
311 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
312 break;
313 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
314 {
315 STAM_COUNTER_INC(&pVM->pgm.s.StatGCSyncPTConflict);
316 return pMapping;
317 }
318 pMapping = CTXALLSUFF(pMapping->pNext);
319 }
320 return NULL;
321}
322
323
324/**
325 * Verifies a range of pages for read or write access
326 *
327 * Only checks the guest's page tables
328 *
329 * @returns VBox status code.
330 * @param pVM VM handle.
331 * @param Addr Guest virtual address to check
332 * @param cbSize Access size
333 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
334 */
335PGMDECL(int) PGMIsValidAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
336{
337 /*
338 * Validate input.
339 */
340 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
341 {
342 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
343 return VERR_INVALID_PARAMETER;
344 }
345
346 uint64_t fPage;
347 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
348 if (VBOX_FAILURE(rc))
349 {
350 Log(("PGMIsValidAccess: access violation for %VGv rc=%d\n", Addr, rc));
351 return VINF_EM_RAW_GUEST_TRAP;
352 }
353
354 /*
355 * Check if the access would cause a page fault
356 *
357 * Note that hypervisor page directories are not present in the guest's tables, so this check
358 * is sufficient.
359 */
360 bool fWrite = !!(fAccess & X86_PTE_RW);
361 bool fUser = !!(fAccess & X86_PTE_US);
362 if ( !(fPage & X86_PTE_P)
363 || (fWrite && !(fPage & X86_PTE_RW))
364 || (fUser && !(fPage & X86_PTE_US)) )
365 {
366 Log(("PGMIsValidAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
367 return VINF_EM_RAW_GUEST_TRAP;
368 }
369 if ( VBOX_SUCCESS(rc)
370 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
371 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
372 return rc;
373}
374
375
376/**
377 * Verifies a range of pages for read or write access
378 *
379 * Supports handling of pages marked for dirty bit tracking and CSAM
380 *
381 * @returns VBox status code.
382 * @param pVM VM handle.
383 * @param Addr Guest virtual address to check
384 * @param cbSize Access size
385 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
386 */
387PGMDECL(int) PGMVerifyAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
388{
389 /*
390 * Validate input.
391 */
392 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
393 {
394 AssertMsgFailed(("PGMVerifyAccess: invalid access type %08x\n", fAccess));
395 return VERR_INVALID_PARAMETER;
396 }
397
398 uint64_t fPageGst;
399 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
400 if (VBOX_FAILURE(rc))
401 {
402 Log(("PGMVerifyAccess: access violation for %VGv rc=%d\n", Addr, rc));
403 return VINF_EM_RAW_GUEST_TRAP;
404 }
405
406 /*
407 * Check if the access would cause a page fault
408 *
409 * Note that hypervisor page directories are not present in the guest's tables, so this check
410 * is sufficient.
411 */
412 const bool fWrite = !!(fAccess & X86_PTE_RW);
413 const bool fUser = !!(fAccess & X86_PTE_US);
414 if ( !(fPageGst & X86_PTE_P)
415 || (fWrite && !(fPageGst & X86_PTE_RW))
416 || (fUser && !(fPageGst & X86_PTE_US)) )
417 {
418 Log(("PGMVerifyAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
419 return VINF_EM_RAW_GUEST_TRAP;
420 }
421
422 /*
423 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
424 */
425 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
426 if ( rc == VERR_PAGE_NOT_PRESENT
427 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
428 {
429 /*
430 * Page is not present in our page tables.
431 * Try to sync it!
432 */
433 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
434 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
435 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
436 if (rc != VINF_SUCCESS)
437 return rc;
438 }
439 else
440 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %VGv failed with %Vrc\n", Addr, rc));
441
442#if 0 /* def VBOX_STRICT; triggers too often now */
443 /*
444 * This check is a bit paranoid, but useful.
445 */
446 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
447 uint64_t fPageShw;
448 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
449 if ( (rc == VERR_PAGE_NOT_PRESENT || VBOX_FAILURE(rc))
450 || (fWrite && !(fPageShw & X86_PTE_RW))
451 || (fUser && !(fPageShw & X86_PTE_US)) )
452 {
453 AssertMsgFailed(("Unexpected access violation for %VGv! rc=%Vrc write=%d user=%d\n",
454 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
455 return VINF_EM_RAW_GUEST_TRAP;
456 }
457#endif
458
459 if ( VBOX_SUCCESS(rc)
460 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
461 || Addr + cbSize < Addr))
462 {
463 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
464 for (;;)
465 {
466 Addr += PAGE_SIZE;
467 if (cbSize > PAGE_SIZE)
468 cbSize -= PAGE_SIZE;
469 else
470 cbSize = 1;
471 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
472 if (rc != VINF_SUCCESS)
473 break;
474 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
475 break;
476 }
477 }
478 return rc;
479}
480
481
482#ifndef IN_GC
483/**
484 * Emulation of the invlpg instruction (HC only actually).
485 *
486 * @returns VBox status code.
487 * @param pVM VM handle.
488 * @param GCPtrPage Page to invalidate.
489 * @remark ASSUMES the page table entry or page directory is
490 * valid. Fairly safe, but there could be edge cases!
491 * @todo Flush page or page directory only if necessary!
492 */
493PGMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
494{
495 int rc;
496
497 LogFlow(("PGMInvalidatePage: GCPtrPage=%VGv\n", GCPtrPage));
498
499 /** @todo merge PGMGCInvalidatePage with this one */
500
501#ifndef IN_RING3
502 /*
503 * Notify the recompiler so it can record this instruction.
504 * Failure happens when it's out of space. We'll return to HC in that case.
505 */
506 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
507 if (VBOX_FAILURE(rc))
508 return rc;
509#endif
510
511 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
512 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
513 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
514
515#ifndef IN_RING0
516 /*
517 * Check if we have a pending update of the CR3 monitoring.
518 */
519 if ( VBOX_SUCCESS(rc)
520 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
521 {
522 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
523 Assert(!pVM->pgm.s.fMappingsFixed);
524 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
525 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
526 }
527#endif
528
529#ifdef IN_RING3
530 /*
531 * Inform CSAM about the flush
532 */
533 /** @note this is to check if monitored pages have been changed; when we implement callbacks for virtual handlers, this is no longer required. */
534 CSAMR3FlushPage(pVM, GCPtrPage);
535#endif
536 return rc;
537}
538#endif
539
540
541/**
542 * Executes an instruction using the interpreter.
543 *
544 * @returns VBox status code (appropriate for trap handling and GC return).
545 * @param pVM VM handle.
546 * @param pRegFrame Register frame.
547 * @param pvFault Fault address.
548 */
549PGMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
550{
551 uint32_t cb;
552 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
553 if (rc == VERR_EM_INTERPRETER)
554 rc = VINF_EM_RAW_EMULATE_INSTR;
555 if (rc != VINF_SUCCESS)
556 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%VGv)\n", rc, pvFault));
557 return rc;
558}
559
560
561/**
562 * Gets effective page information (from the VMM page directory).
563 *
564 * @returns VBox status.
565 * @param pVM VM Handle.
566 * @param GCPtr Guest Context virtual address of the page.
567 * @param pfFlags Where to store the flags. These are X86_PTE_*.
568 * @param pHCPhys Where to store the HC physical address of the page.
569 * This is page aligned.
570 * @remark You should use PGMMapGetPage() for pages in a mapping.
571 */
572PGMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
573{
574 return PGM_SHW_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pHCPhys);
575}
576
577
578/**
579 * Sets (replaces) the page flags for a range of pages in the shadow context.
580 *
581 * @returns VBox status.
582 * @param pVM VM handle.
583 * @param GCPtr The address of the first page.
584 * @param cb The size of the range in bytes.
585 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
586 * @remark You must use PGMMapSetPage() for pages in a mapping.
587 */
588PGMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
589{
590 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
591}
592
593
594/**
595 * Modify page flags for a range of pages in the shadow context.
596 *
597 * The existing flags are ANDed with the fMask and ORed with the fFlags.
598 *
599 * @returns VBox status code.
600 * @param pVM VM handle.
601 * @param GCPtr Virtual address of the first page in the range.
602 * @param cb Size (in bytes) of the range to apply the modification to.
603 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
604 * @param fMask The AND mask - page flags X86_PTE_*.
605 * Be very CAREFUL when ~'ing constants which could be 32-bit!
606 * @remark You must use PGMMapModifyPage() for pages in a mapping.
607 */
608PGMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
609{
610 /*
611 * Validate input.
612 */
613 if (fFlags & X86_PTE_PAE_PG_MASK)
614 {
615 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
616 return VERR_INVALID_PARAMETER;
617 }
618 if (!cb)
619 {
620 AssertFailed();
621 return VERR_INVALID_PARAMETER;
622 }
623
624 /*
625 * Align the input.
626 */
627 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
628 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
629 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
630
631 /*
632 * Call worker.
633 */
634 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
635}
636
637
638/**
639 * Gets effective Guest OS page information.
640 *
641 * When GCPtr is in a big page, the function will return as if it was a normal
642 * 4KB page. If the need for distinguishing between big and normal page becomes
643 * necessary at a later point, a PGMGstGetPage() will be created for that
644 * purpose.
645 *
646 * @returns VBox status.
647 * @param pVM VM Handle.
648 * @param GCPtr Guest Context virtual address of the page.
649 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
650 * @param pGCPhys Where to store the GC physical address of the page.
651 * This is page aligned. The fact that the
652 */
653PGMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
654{
655 return PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pGCPhys);
656}
657
658
659/**
660 * Checks if the page is present.
661 *
662 * @returns true if the page is present.
663 * @returns false if the page is not present.
664 * @param pVM The VM handle.
665 * @param GCPtr Address within the page.
666 */
667PGMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
668{
669 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
670 return VBOX_SUCCESS(rc);
671}
672
673
674/**
675 * Sets (replaces) the page flags for a range of pages in the guest's tables.
676 *
677 * @returns VBox status.
678 * @param pVM VM handle.
679 * @param GCPtr The address of the first page.
680 * @param cb The size of the range in bytes.
681 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
682 */
683PGMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
684{
685 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
686}
687
688
689/**
690 * Modify page flags for a range of pages in the guest's tables
691 *
692 * The existing flags are ANDed with the fMask and ORed with the fFlags.
693 *
694 * @returns VBox status code.
695 * @param pVM VM handle.
696 * @param GCPtr Virtual address of the first page in the range.
697 * @param cb Size (in bytes) of the range to apply the modification to.
698 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
699 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
700 * Be very CAREFUL when ~'ing constants which could be 32-bit!
701 */
702PGMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
703{
704 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
705
706 /*
707 * Validate input.
708 */
709 if (fFlags & X86_PTE_PAE_PG_MASK)
710 {
711 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
712 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
713 return VERR_INVALID_PARAMETER;
714 }
715
716 if (!cb)
717 {
718 AssertFailed();
719 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
720 return VERR_INVALID_PARAMETER;
721 }
722
723 LogFlow(("PGMGstModifyPage %VGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
724
725 /*
726 * Adjust input.
727 */
728 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
729 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
730 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
731
732 /*
733 * Call worker.
734 */
735 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
736
737 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
738 return rc;
739}
740
741
742/**
743 * Gets the current CR3 register value for the shadow memory context.
744 * @returns CR3 value.
745 * @param pVM The VM handle.
746 */
747PGMDECL(uint32_t) PGMGetHyperCR3(PVM pVM)
748{
749 switch (pVM->pgm.s.enmShadowMode)
750 {
751 case PGMMODE_32_BIT:
752 return pVM->pgm.s.HCPhys32BitPD;
753
754 case PGMMODE_PAE:
755 case PGMMODE_PAE_NX:
756 return pVM->pgm.s.HCPhysPaePDPT;
757
758 case PGMMODE_AMD64:
759 case PGMMODE_AMD64_NX:
760 return pVM->pgm.s.HCPhysPaePML4;
761
762 default:
763 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
764 return ~0;
765 }
766}
767
768
769/**
770 * Gets the CR3 register value for the 32-Bit shadow memory context.
771 * @returns CR3 value.
772 * @param pVM The VM handle.
773 */
774PGMDECL(uint32_t) PGMGetHyper32BitCR3(PVM pVM)
775{
776 return pVM->pgm.s.HCPhys32BitPD;
777}
778
779
780/**
781 * Gets the CR3 register value for the PAE shadow memory context.
782 * @returns CR3 value.
783 * @param pVM The VM handle.
784 */
785PGMDECL(uint32_t) PGMGetHyperPaeCR3(PVM pVM)
786{
787 return pVM->pgm.s.HCPhysPaePDPT;
788}
789
790
791/**
792 * Gets the CR3 register value for the AMD64 shadow memory context.
793 * @returns CR3 value.
794 * @param pVM The VM handle.
795 */
796PGMDECL(uint32_t) PGMGetHyperAmd64CR3(PVM pVM)
797{
798 return pVM->pgm.s.HCPhysPaePML4;
799}
800
801
802/**
803 * Gets the current CR3 register value for the HC intermediate memory context.
804 * @returns CR3 value.
805 * @param pVM The VM handle.
806 */
807PGMDECL(uint32_t) PGMGetInterHCCR3(PVM pVM)
808{
809 switch (pVM->pgm.s.enmHostMode)
810 {
811 case SUPPAGINGMODE_32_BIT:
812 case SUPPAGINGMODE_32_BIT_GLOBAL:
813 return pVM->pgm.s.HCPhysInterPD;
814
815 case SUPPAGINGMODE_PAE:
816 case SUPPAGINGMODE_PAE_GLOBAL:
817 case SUPPAGINGMODE_PAE_NX:
818 case SUPPAGINGMODE_PAE_GLOBAL_NX:
819 return pVM->pgm.s.HCPhysInterPaePDPT;
820
821 case SUPPAGINGMODE_AMD64:
822 case SUPPAGINGMODE_AMD64_GLOBAL:
823 case SUPPAGINGMODE_AMD64_NX:
824 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
825 return pVM->pgm.s.HCPhysInterPaePDPT;
826
827 default:
828 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
829 return ~0;
830 }
831}
832
833
834/**
835 * Gets the current CR3 register value for the GC intermediate memory context.
836 * @returns CR3 value.
837 * @param pVM The VM handle.
838 */
839PGMDECL(uint32_t) PGMGetInterGCCR3(PVM pVM)
840{
841 switch (pVM->pgm.s.enmShadowMode)
842 {
843 case PGMMODE_32_BIT:
844 return pVM->pgm.s.HCPhysInterPD;
845
846 case PGMMODE_PAE:
847 case PGMMODE_PAE_NX:
848 return pVM->pgm.s.HCPhysInterPaePDPT;
849
850 case PGMMODE_AMD64:
851 case PGMMODE_AMD64_NX:
852 return pVM->pgm.s.HCPhysInterPaePML4;
853
854 default:
855 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
856 return ~0;
857 }
858}
859
860
861/**
862 * Gets the CR3 register value for the 32-Bit intermediate memory context.
863 * @returns CR3 value.
864 * @param pVM The VM handle.
865 */
866PGMDECL(uint32_t) PGMGetInter32BitCR3(PVM pVM)
867{
868 return pVM->pgm.s.HCPhysInterPD;
869}
870
871
872/**
873 * Gets the CR3 register value for the PAE intermediate memory context.
874 * @returns CR3 value.
875 * @param pVM The VM handle.
876 */
877PGMDECL(uint32_t) PGMGetInterPaeCR3(PVM pVM)
878{
879 return pVM->pgm.s.HCPhysInterPaePDPT;
880}
881
882
883/**
884 * Gets the CR3 register value for the AMD64 intermediate memory context.
885 * @returns CR3 value.
886 * @param pVM The VM handle.
887 */
888PGMDECL(uint32_t) PGMGetInterAmd64CR3(PVM pVM)
889{
890 return pVM->pgm.s.HCPhysInterPaePML4;
891}
892
893
894/**
895 * Performs and schedules necessary updates following a CR3 load or reload.
896 *
897 * This will normally involve mapping the guest PD or nPDPT
898 *
899 * @returns VBox status code.
900 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
901 * safely be ignored and overridden since the FF will be set too then.
902 * @param pVM VM handle.
903 * @param cr3 The new cr3.
904 * @param fGlobal Indicates whether this is a global flush or not.
905 */
906PGMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
907{
908 STAM_PROFILE_START(&pVM->pgm.s.StatFlushTLB, a);
909
910 /*
911 * Always flag the necessary updates; necessary for hardware acceleration
912 */
913 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
914 if (fGlobal)
915 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
916 LogFlow(("PGMFlushTLB: cr3=%VX64 OldCr3=%VX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
917
918 /*
919 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
920 */
921 int rc = VINF_SUCCESS;
922 RTGCPHYS GCPhysCR3;
923 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
924 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
925 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
926 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
927 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
928 else
929 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
930 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
931 {
932 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
933 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
934 if (VBOX_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
935 {
936 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
937 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
938 }
939 if (fGlobal)
940 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3Global);
941 else
942 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3);
943 }
944 else
945 {
946 /*
947 * Check if we have a pending update of the CR3 monitoring.
948 */
949 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
950 {
951 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
952 Assert(!pVM->pgm.s.fMappingsFixed);
953 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
954 }
955 if (fGlobal)
956 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3Global);
957 else
958 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3);
959 }
960
961 STAM_PROFILE_STOP(&pVM->pgm.s.StatFlushTLB, a);
962 return rc;
963}
964
965
966/**
967 * Synchronize the paging structures.
968 *
969 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
970 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
971 * in several places, most importantly whenever the CR3 is loaded.
972 *
973 * @returns VBox status code.
974 * @param pVM The virtual machine.
975 * @param cr0 Guest context CR0 register
976 * @param cr3 Guest context CR3 register
977 * @param cr4 Guest context CR4 register
978 * @param fGlobal Including global page directories or not
979 */
980PGMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
981{
982 /*
983 * We might be called when we shouldn't.
984 *
985 * The mode switching will ensure that the PD is resynced
986 * after every mode switch. So, if we find ourselves here
987 * when in protected or real mode we can safely disable the
988 * FF and return immediately.
989 */
990 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
991 {
992 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
993 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
994 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
995 return VINF_SUCCESS;
996 }
997
998 /* If global pages are not supported, then all flushes are global */
999 if (!(cr4 & X86_CR4_PGE))
1000 fGlobal = true;
1001 LogFlow(("PGMSyncCR3: cr0=%VX64 cr3=%VX64 cr4=%VX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1002 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1003
1004 /*
1005 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1006 */
1007 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1008 int rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1009 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1010 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%VRc\n", rc));
1011 if (rc == VINF_SUCCESS)
1012 {
1013 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1014 {
1015 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1016 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1017 }
1018
1019 /*
1020 * Check if we have a pending update of the CR3 monitoring.
1021 */
1022 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1023 {
1024 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1025 Assert(!pVM->pgm.s.fMappingsFixed);
1026 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1027 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1028 }
1029 }
1030
1031 /*
1032 * Now flush the CR3 (guest context).
1033 */
1034 if (rc == VINF_SUCCESS)
1035 PGM_INVL_GUEST_TLBS();
1036 return rc;
1037}
1038
1039
1040/**
1041 * Called whenever CR0 or CR4 in a way which may change
1042 * the paging mode.
1043 *
1044 * @returns VBox status code fit for scheduling in GC and R0.
1045 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1046 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1047 * @param pVM VM handle.
1048 * @param cr0 The new cr0.
1049 * @param cr4 The new cr4.
1050 * @param efer The new extended feature enable register.
1051 */
1052PGMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1053{
1054 PGMMODE enmGuestMode;
1055
1056 /*
1057 * Calc the new guest mode.
1058 */
1059 if (!(cr0 & X86_CR0_PE))
1060 enmGuestMode = PGMMODE_REAL;
1061 else if (!(cr0 & X86_CR0_PG))
1062 enmGuestMode = PGMMODE_PROTECTED;
1063 else if (!(cr4 & X86_CR4_PAE))
1064 enmGuestMode = PGMMODE_32_BIT;
1065 else if (!(efer & MSR_K6_EFER_LME))
1066 {
1067 if (!(efer & MSR_K6_EFER_NXE))
1068 enmGuestMode = PGMMODE_PAE;
1069 else
1070 enmGuestMode = PGMMODE_PAE_NX;
1071 }
1072 else
1073 {
1074 if (!(efer & MSR_K6_EFER_NXE))
1075 enmGuestMode = PGMMODE_AMD64;
1076 else
1077 enmGuestMode = PGMMODE_AMD64_NX;
1078 }
1079
1080 /*
1081 * Did it change?
1082 */
1083 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1084 return VINF_SUCCESS;
1085#ifdef IN_RING3
1086 return pgmR3ChangeMode(pVM, enmGuestMode);
1087#else
1088 Log(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1089 return VINF_PGM_CHANGE_MODE;
1090#endif
1091}
1092
1093
1094/**
1095 * Gets the current guest paging mode.
1096 *
1097 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1098 *
1099 * @returns The current paging mode.
1100 * @param pVM The VM handle.
1101 */
1102PGMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1103{
1104 return pVM->pgm.s.enmGuestMode;
1105}
1106
1107
1108/**
1109 * Gets the current shadow paging mode.
1110 *
1111 * @returns The current paging mode.
1112 * @param pVM The VM handle.
1113 */
1114PGMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1115{
1116 return pVM->pgm.s.enmShadowMode;
1117}
1118
1119
1120/**
1121 * Get mode name.
1122 *
1123 * @returns read-only name string.
1124 * @param enmMode The mode which name is desired.
1125 */
1126PGMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1127{
1128 switch (enmMode)
1129 {
1130 case PGMMODE_REAL: return "real";
1131 case PGMMODE_PROTECTED: return "protected";
1132 case PGMMODE_32_BIT: return "32-bit";
1133 case PGMMODE_PAE: return "PAE";
1134 case PGMMODE_PAE_NX: return "PAE+NX";
1135 case PGMMODE_AMD64: return "AMD64";
1136 case PGMMODE_AMD64_NX: return "AMD64+NX";
1137 default: return "unknown mode value";
1138 }
1139}
1140
1141
1142/**
1143 * Acquire the PGM lock.
1144 *
1145 * @returns VBox status code
1146 * @param pVM The VM to operate on.
1147 */
1148int pgmLock(PVM pVM)
1149{
1150 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1151#ifdef IN_GC
1152 if (rc == VERR_SEM_BUSY)
1153 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1154#elif defined(IN_RING0)
1155 if (rc == VERR_SEM_BUSY)
1156 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1157#endif
1158 AssertRC(rc);
1159 return rc;
1160}
1161
1162
1163/**
1164 * Release the PGM lock.
1165 *
1166 * @returns VBox status code
1167 * @param pVM The VM to operate on.
1168 */
1169void pgmUnlock(PVM pVM)
1170{
1171 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1172}
1173
1174
1175#ifdef VBOX_STRICT
1176
1177/**
1178 * Asserts that there are no mapping conflicts.
1179 *
1180 * @returns Number of conflicts.
1181 * @param pVM The VM Handle.
1182 */
1183PGMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
1184{
1185 unsigned cErrors = 0;
1186
1187 /*
1188 * Check for mapping conflicts.
1189 */
1190 for (PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
1191 pMapping;
1192 pMapping = CTXALLSUFF(pMapping->pNext))
1193 {
1194 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
1195 for (RTGCUINTPTR GCPtr = (RTGCUINTPTR)pMapping->GCPtr;
1196 GCPtr <= (RTGCUINTPTR)pMapping->GCPtrLast;
1197 GCPtr += PAGE_SIZE)
1198 {
1199 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
1200 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
1201 {
1202 AssertMsgFailed(("Conflict at %VGv with %s\n", GCPtr, HCSTRING(pMapping->pszDesc)));
1203 cErrors++;
1204 break;
1205 }
1206 }
1207 }
1208
1209 return cErrors;
1210}
1211
1212
1213/**
1214 * Asserts that everything related to the guest CR3 is correctly shadowed.
1215 *
1216 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
1217 * and assert the correctness of the guest CR3 mapping before asserting that the
1218 * shadow page tables is in sync with the guest page tables.
1219 *
1220 * @returns Number of conflicts.
1221 * @param pVM The VM Handle.
1222 * @param cr3 The current guest CR3 register value.
1223 * @param cr4 The current guest CR4 register value.
1224 */
1225PGMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
1226{
1227 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1228 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCUINTPTR)0);
1229 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1230 return cErrors;
1231}
1232
1233#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette