VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 8863

Last change on this file since 8863 was 8533, checked in by vboxsync, 17 years ago

Start of 64 bits paging support

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.9 KB
Line 
1/* $Id: PGMAll.cpp 8533 2008-05-02 16:04:51Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include "PGMInternal.h"
40#include <VBox/vm.h>
41#include <iprt/assert.h>
42#include <iprt/asm.h>
43#include <iprt/string.h>
44#include <VBox/log.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/**
53 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
54 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
55 */
56typedef struct PGMHVUSTATE
57{
58 /** The VM handle. */
59 PVM pVM;
60 /** The todo flags. */
61 RTUINT fTodo;
62 /** The CR4 register value. */
63 uint32_t cr4;
64} PGMHVUSTATE, *PPGMHVUSTATE;
65
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70
71#if 1///@todo ndef RT_ARCH_AMD64
72/*
73 * Shadow - 32-bit mode
74 */
75#define PGM_SHW_TYPE PGM_TYPE_32BIT
76#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
77#include "PGMAllShw.h"
78
79/* Guest - real mode */
80#define PGM_GST_TYPE PGM_TYPE_REAL
81#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
82#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
83#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
84#include "PGMAllGst.h"
85#include "PGMAllBth.h"
86#undef BTH_PGMPOOLKIND_PT_FOR_PT
87#undef PGM_BTH_NAME
88#undef PGM_GST_TYPE
89#undef PGM_GST_NAME
90
91/* Guest - protected mode */
92#define PGM_GST_TYPE PGM_TYPE_PROT
93#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
94#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
95#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
96#include "PGMAllGst.h"
97#include "PGMAllBth.h"
98#undef BTH_PGMPOOLKIND_PT_FOR_PT
99#undef PGM_BTH_NAME
100#undef PGM_GST_TYPE
101#undef PGM_GST_NAME
102
103/* Guest - 32-bit mode */
104#define PGM_GST_TYPE PGM_TYPE_32BIT
105#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
106#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
107#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
108#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
109#include "PGMAllGst.h"
110#include "PGMAllBth.h"
111#undef BTH_PGMPOOLKIND_PT_FOR_BIG
112#undef BTH_PGMPOOLKIND_PT_FOR_PT
113#undef PGM_BTH_NAME
114#undef PGM_GST_TYPE
115#undef PGM_GST_NAME
116
117#undef PGM_SHW_TYPE
118#undef PGM_SHW_NAME
119#endif /* !RT_ARCH_AMD64 */
120
121
122/*
123 * Shadow - PAE mode
124 */
125#define PGM_SHW_TYPE PGM_TYPE_PAE
126#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
127#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
128#include "PGMAllShw.h"
129
130/* Guest - real mode */
131#define PGM_GST_TYPE PGM_TYPE_REAL
132#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
133#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
134#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
135#include "PGMAllBth.h"
136#undef BTH_PGMPOOLKIND_PT_FOR_PT
137#undef PGM_BTH_NAME
138#undef PGM_GST_TYPE
139#undef PGM_GST_NAME
140
141/* Guest - protected mode */
142#define PGM_GST_TYPE PGM_TYPE_PROT
143#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
144#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
145#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
146#include "PGMAllBth.h"
147#undef BTH_PGMPOOLKIND_PT_FOR_PT
148#undef PGM_BTH_NAME
149#undef PGM_GST_TYPE
150#undef PGM_GST_NAME
151
152/* Guest - 32-bit mode */
153#define PGM_GST_TYPE PGM_TYPE_32BIT
154#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
155#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
156#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
157#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
158#include "PGMAllBth.h"
159#undef BTH_PGMPOOLKIND_PT_FOR_BIG
160#undef BTH_PGMPOOLKIND_PT_FOR_PT
161#undef PGM_BTH_NAME
162#undef PGM_GST_TYPE
163#undef PGM_GST_NAME
164
165
166/* Guest - PAE mode */
167#define PGM_GST_TYPE PGM_TYPE_PAE
168#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
169#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
170#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
171#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
172#include "PGMAllGst.h"
173#include "PGMAllBth.h"
174#undef BTH_PGMPOOLKIND_PT_FOR_BIG
175#undef BTH_PGMPOOLKIND_PT_FOR_PT
176#undef PGM_BTH_NAME
177#undef PGM_GST_TYPE
178#undef PGM_GST_NAME
179
180#undef PGM_SHW_TYPE
181#undef PGM_SHW_NAME
182
183
184#ifndef IN_GC /* AMD64 implies VT-x/AMD-V */
185/*
186 * Shadow - AMD64 mode
187 */
188#define PGM_SHW_TYPE PGM_TYPE_AMD64
189#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
190#include "PGMAllShw.h"
191
192/* Guest - AMD64 mode */
193#define PGM_GST_TYPE PGM_TYPE_AMD64
194#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
195#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
196#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
197#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
198#include "PGMAllGst.h"
199#include "PGMAllBth.h"
200#undef BTH_PGMPOOLKIND_PT_FOR_BIG
201#undef BTH_PGMPOOLKIND_PT_FOR_PT
202#undef PGM_BTH_NAME
203#undef PGM_GST_TYPE
204#undef PGM_GST_NAME
205
206#undef PGM_SHW_TYPE
207#undef PGM_SHW_NAME
208#endif
209
210
211/**
212 * #PF Handler.
213 *
214 * @returns VBox status code (appropriate for trap handling and GC return).
215 * @param pVM VM Handle.
216 * @param uErr The trap error code.
217 * @param pRegFrame Trap register frame.
218 * @param pvFault The fault address.
219 */
220PGMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
221{
222 LogFlow(("PGMTrap0eHandler: uErr=%#x pvFault=%VGv eip=%VGv\n", uErr, pvFault, pRegFrame->eip));
223 STAM_PROFILE_START(&pVM->pgm.s.StatGCTrap0e, a);
224 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = NULL; } );
225
226
227#ifdef VBOX_WITH_STATISTICS
228 /*
229 * Error code stats.
230 */
231 if (uErr & X86_TRAP_PF_US)
232 {
233 if (!(uErr & X86_TRAP_PF_P))
234 {
235 if (uErr & X86_TRAP_PF_RW)
236 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentWrite);
237 else
238 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentRead);
239 }
240 else if (uErr & X86_TRAP_PF_RW)
241 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSWrite);
242 else if (uErr & X86_TRAP_PF_RSVD)
243 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSReserved);
244 else if (uErr & X86_TRAP_PF_ID)
245 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNXE);
246 else
247 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSRead);
248 }
249 else
250 { /* Supervisor */
251 if (!(uErr & X86_TRAP_PF_P))
252 {
253 if (uErr & X86_TRAP_PF_RW)
254 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentWrite);
255 else
256 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentRead);
257 }
258 else if (uErr & X86_TRAP_PF_RW)
259 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVWrite);
260 else if (uErr & X86_TRAP_PF_ID)
261 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSNXE);
262 else if (uErr & X86_TRAP_PF_RSVD)
263 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVReserved);
264 }
265#endif
266
267 /*
268 * Call the worker.
269 */
270 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
271 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
272 rc = VINF_SUCCESS;
273 STAM_STATS({ if (!pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution))
274 pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eMisc; });
275 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatGCTrap0e, pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution), a);
276 return rc;
277}
278
279
280/**
281 * Prefetch a page
282 *
283 * Typically used to sync commonly used pages before entering raw mode
284 * after a CR3 reload.
285 *
286 * @returns VBox status code suitable for scheduling.
287 * @retval VINF_SUCCESS on success.
288 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
289 * @param pVM VM handle.
290 * @param GCPtrPage Page to invalidate.
291 */
292PGMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
293{
294 STAM_PROFILE_START(&pVM->pgm.s.StatHCPrefetch, a);
295 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, (RTGCUINTPTR)GCPtrPage);
296 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCPrefetch, a);
297 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%Vrc\n", rc));
298 return rc;
299}
300
301
302/**
303 * Gets the mapping corresponding to the specified address (if any).
304 *
305 * @returns Pointer to the mapping.
306 * @returns NULL if not
307 *
308 * @param pVM The virtual machine.
309 * @param GCPtr The guest context pointer.
310 */
311PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
312{
313 PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
314 while (pMapping)
315 {
316 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
317 break;
318 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
319 {
320 STAM_COUNTER_INC(&pVM->pgm.s.StatGCSyncPTConflict);
321 return pMapping;
322 }
323 pMapping = CTXALLSUFF(pMapping->pNext);
324 }
325 return NULL;
326}
327
328
329/**
330 * Verifies a range of pages for read or write access
331 *
332 * Only checks the guest's page tables
333 *
334 * @returns VBox status code.
335 * @param pVM VM handle.
336 * @param Addr Guest virtual address to check
337 * @param cbSize Access size
338 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
339 */
340PGMDECL(int) PGMIsValidAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
341{
342 /*
343 * Validate input.
344 */
345 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
346 {
347 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
348 return VERR_INVALID_PARAMETER;
349 }
350
351 uint64_t fPage;
352 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
353 if (VBOX_FAILURE(rc))
354 {
355 Log(("PGMIsValidAccess: access violation for %VGv rc=%d\n", Addr, rc));
356 return VINF_EM_RAW_GUEST_TRAP;
357 }
358
359 /*
360 * Check if the access would cause a page fault
361 *
362 * Note that hypervisor page directories are not present in the guest's tables, so this check
363 * is sufficient.
364 */
365 bool fWrite = !!(fAccess & X86_PTE_RW);
366 bool fUser = !!(fAccess & X86_PTE_US);
367 if ( !(fPage & X86_PTE_P)
368 || (fWrite && !(fPage & X86_PTE_RW))
369 || (fUser && !(fPage & X86_PTE_US)) )
370 {
371 Log(("PGMIsValidAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
372 return VINF_EM_RAW_GUEST_TRAP;
373 }
374 if ( VBOX_SUCCESS(rc)
375 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
376 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
377 return rc;
378}
379
380
381/**
382 * Verifies a range of pages for read or write access
383 *
384 * Supports handling of pages marked for dirty bit tracking and CSAM
385 *
386 * @returns VBox status code.
387 * @param pVM VM handle.
388 * @param Addr Guest virtual address to check
389 * @param cbSize Access size
390 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
391 */
392PGMDECL(int) PGMVerifyAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
393{
394 /*
395 * Validate input.
396 */
397 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
398 {
399 AssertMsgFailed(("PGMVerifyAccess: invalid access type %08x\n", fAccess));
400 return VERR_INVALID_PARAMETER;
401 }
402
403 uint64_t fPageGst;
404 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
405 if (VBOX_FAILURE(rc))
406 {
407 Log(("PGMVerifyAccess: access violation for %VGv rc=%d\n", Addr, rc));
408 return VINF_EM_RAW_GUEST_TRAP;
409 }
410
411 /*
412 * Check if the access would cause a page fault
413 *
414 * Note that hypervisor page directories are not present in the guest's tables, so this check
415 * is sufficient.
416 */
417 const bool fWrite = !!(fAccess & X86_PTE_RW);
418 const bool fUser = !!(fAccess & X86_PTE_US);
419 if ( !(fPageGst & X86_PTE_P)
420 || (fWrite && !(fPageGst & X86_PTE_RW))
421 || (fUser && !(fPageGst & X86_PTE_US)) )
422 {
423 Log(("PGMVerifyAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
424 return VINF_EM_RAW_GUEST_TRAP;
425 }
426
427 /*
428 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
429 */
430 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
431 if ( rc == VERR_PAGE_NOT_PRESENT
432 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
433 {
434 /*
435 * Page is not present in our page tables.
436 * Try to sync it!
437 */
438 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
439 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
440 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
441 if (rc != VINF_SUCCESS)
442 return rc;
443 }
444 else
445 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %VGv failed with %Vrc\n", Addr, rc));
446
447#if 0 /* def VBOX_STRICT; triggers too often now */
448 /*
449 * This check is a bit paranoid, but useful.
450 */
451 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
452 uint64_t fPageShw;
453 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
454 if ( (rc == VERR_PAGE_NOT_PRESENT || VBOX_FAILURE(rc))
455 || (fWrite && !(fPageShw & X86_PTE_RW))
456 || (fUser && !(fPageShw & X86_PTE_US)) )
457 {
458 AssertMsgFailed(("Unexpected access violation for %VGv! rc=%Vrc write=%d user=%d\n",
459 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
460 return VINF_EM_RAW_GUEST_TRAP;
461 }
462#endif
463
464 if ( VBOX_SUCCESS(rc)
465 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
466 || Addr + cbSize < Addr))
467 {
468 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
469 for (;;)
470 {
471 Addr += PAGE_SIZE;
472 if (cbSize > PAGE_SIZE)
473 cbSize -= PAGE_SIZE;
474 else
475 cbSize = 1;
476 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
477 if (rc != VINF_SUCCESS)
478 break;
479 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
480 break;
481 }
482 }
483 return rc;
484}
485
486
487#ifndef IN_GC
488/**
489 * Emulation of the invlpg instruction (HC only actually).
490 *
491 * @returns VBox status code.
492 * @param pVM VM handle.
493 * @param GCPtrPage Page to invalidate.
494 * @remark ASSUMES the page table entry or page directory is
495 * valid. Fairly safe, but there could be edge cases!
496 * @todo Flush page or page directory only if necessary!
497 */
498PGMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
499{
500 int rc;
501
502 LogFlow(("PGMInvalidatePage: GCPtrPage=%VGv\n", GCPtrPage));
503
504 /** @todo merge PGMGCInvalidatePage with this one */
505
506#ifndef IN_RING3
507 /*
508 * Notify the recompiler so it can record this instruction.
509 * Failure happens when it's out of space. We'll return to HC in that case.
510 */
511 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
512 if (VBOX_FAILURE(rc))
513 return rc;
514#endif
515
516 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
517 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
518 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
519
520#ifndef IN_RING0
521 /*
522 * Check if we have a pending update of the CR3 monitoring.
523 */
524 if ( VBOX_SUCCESS(rc)
525 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
526 {
527 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
528 Assert(!pVM->pgm.s.fMappingsFixed);
529 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
530 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
531 }
532#endif
533
534#ifdef IN_RING3
535 /*
536 * Inform CSAM about the flush
537 */
538 /** @note this is to check if monitored pages have been changed; when we implement callbacks for virtual handlers, this is no longer required. */
539 CSAMR3FlushPage(pVM, GCPtrPage);
540#endif
541 return rc;
542}
543#endif
544
545
546/**
547 * Executes an instruction using the interpreter.
548 *
549 * @returns VBox status code (appropriate for trap handling and GC return).
550 * @param pVM VM handle.
551 * @param pRegFrame Register frame.
552 * @param pvFault Fault address.
553 */
554PGMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
555{
556 uint32_t cb;
557 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
558 if (rc == VERR_EM_INTERPRETER)
559 rc = VINF_EM_RAW_EMULATE_INSTR;
560 if (rc != VINF_SUCCESS)
561 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%VGv)\n", rc, pvFault));
562 return rc;
563}
564
565
566/**
567 * Gets effective page information (from the VMM page directory).
568 *
569 * @returns VBox status.
570 * @param pVM VM Handle.
571 * @param GCPtr Guest Context virtual address of the page.
572 * @param pfFlags Where to store the flags. These are X86_PTE_*.
573 * @param pHCPhys Where to store the HC physical address of the page.
574 * This is page aligned.
575 * @remark You should use PGMMapGetPage() for pages in a mapping.
576 */
577PGMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
578{
579 return PGM_SHW_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pHCPhys);
580}
581
582
583/**
584 * Sets (replaces) the page flags for a range of pages in the shadow context.
585 *
586 * @returns VBox status.
587 * @param pVM VM handle.
588 * @param GCPtr The address of the first page.
589 * @param cb The size of the range in bytes.
590 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
591 * @remark You must use PGMMapSetPage() for pages in a mapping.
592 */
593PGMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
594{
595 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
596}
597
598
599/**
600 * Modify page flags for a range of pages in the shadow context.
601 *
602 * The existing flags are ANDed with the fMask and ORed with the fFlags.
603 *
604 * @returns VBox status code.
605 * @param pVM VM handle.
606 * @param GCPtr Virtual address of the first page in the range.
607 * @param cb Size (in bytes) of the range to apply the modification to.
608 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
609 * @param fMask The AND mask - page flags X86_PTE_*.
610 * Be very CAREFUL when ~'ing constants which could be 32-bit!
611 * @remark You must use PGMMapModifyPage() for pages in a mapping.
612 */
613PGMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
614{
615 /*
616 * Validate input.
617 */
618 if (fFlags & X86_PTE_PAE_PG_MASK)
619 {
620 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
621 return VERR_INVALID_PARAMETER;
622 }
623 if (!cb)
624 {
625 AssertFailed();
626 return VERR_INVALID_PARAMETER;
627 }
628
629 /*
630 * Align the input.
631 */
632 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
633 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
634 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
635
636 /*
637 * Call worker.
638 */
639 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
640}
641
642#ifndef IN_GC
643/**
644 * Gets the SHADOW page directory pointer for the specified address. Allocates
645 * backing pages in case the PDPT or page dirctory is missing.
646 *
647 * @returns VBox status.
648 * @param pVM VM handle.
649 * @param GCPtr The address.
650 * @param ppPD Receives address of page directory
651 */
652PGMDECL(int) PGMShwGetLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PDPAE *ppPD)
653{
654 PPGM pPGM = &pVM->pgm.s;
655 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
656 PPGMPOOL pPool = pPGM->CTXSUFF(pPool);
657 PX86PML4E pPml4e;
658 PPGMPOOLPAGE pShwPage;
659 int rc;
660
661 pPml4e = &pPGM->pHCPaePML4->a[iPml4e];
662 if ( !pPml4e->n.u1Present
663 && !(pPml4e->u & X86_PML4E_PG_MASK))
664 {
665 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
666
667 Assert(!(pPml4e->u & X86_PML4E_PG_MASK));
668 rc = pgmPoolAlloc(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, PGMPOOL_IDX_PML4, iPml4e, &pShwPage);
669 if (rc == VERR_PGM_POOL_FLUSHED)
670 return VINF_PGM_SYNC_CR3;
671
672 AssertRCReturn(rc, rc);
673
674 /* The PDPT was cached or created; hook it up now. */
675 pPml4e->u |= pShwPage->Core.Key;
676 }
677 else
678 {
679 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
680 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
681 }
682
683 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
684 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
685 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
686
687 if ( !pPdpe->n.u1Present
688 && !(pPdpe->u & X86_PDPE_PG_MASK))
689 {
690 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
691 PX86PDPT pPdptGst;
692 rc = PGM_GCPHYS_2_PTR(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, &pPdptGst);
693 AssertRCReturn(rc, rc);
694
695 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
696 rc = pgmPoolAlloc(pVM, pPdptGst->a[iPdPt].u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, PGMPOOL_IDX_PDPT, iPdPt, &pShwPage);
697 if (rc == VERR_PGM_POOL_FLUSHED)
698 return VINF_PGM_SYNC_CR3;
699
700 AssertRCReturn(rc, rc);
701
702 /* The PDPT was cached or created; hook it up now. */
703 pPdpe->u |= pShwPage->Core.Key;
704 }
705 else
706 {
707 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
708 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
709 }
710
711 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
712 return VINF_SUCCESS;
713}
714#endif
715
716/**
717 * Gets effective Guest OS page information.
718 *
719 * When GCPtr is in a big page, the function will return as if it was a normal
720 * 4KB page. If the need for distinguishing between big and normal page becomes
721 * necessary at a later point, a PGMGstGetPage() will be created for that
722 * purpose.
723 *
724 * @returns VBox status.
725 * @param pVM VM Handle.
726 * @param GCPtr Guest Context virtual address of the page.
727 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
728 * @param pGCPhys Where to store the GC physical address of the page.
729 * This is page aligned. The fact that the
730 */
731PGMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
732{
733 return PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pGCPhys);
734}
735
736
737/**
738 * Checks if the page is present.
739 *
740 * @returns true if the page is present.
741 * @returns false if the page is not present.
742 * @param pVM The VM handle.
743 * @param GCPtr Address within the page.
744 */
745PGMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
746{
747 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
748 return VBOX_SUCCESS(rc);
749}
750
751
752/**
753 * Sets (replaces) the page flags for a range of pages in the guest's tables.
754 *
755 * @returns VBox status.
756 * @param pVM VM handle.
757 * @param GCPtr The address of the first page.
758 * @param cb The size of the range in bytes.
759 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
760 */
761PGMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
762{
763 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
764}
765
766
767/**
768 * Modify page flags for a range of pages in the guest's tables
769 *
770 * The existing flags are ANDed with the fMask and ORed with the fFlags.
771 *
772 * @returns VBox status code.
773 * @param pVM VM handle.
774 * @param GCPtr Virtual address of the first page in the range.
775 * @param cb Size (in bytes) of the range to apply the modification to.
776 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
777 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
778 * Be very CAREFUL when ~'ing constants which could be 32-bit!
779 */
780PGMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
781{
782 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
783
784 /*
785 * Validate input.
786 */
787 if (fFlags & X86_PTE_PAE_PG_MASK)
788 {
789 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
790 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
791 return VERR_INVALID_PARAMETER;
792 }
793
794 if (!cb)
795 {
796 AssertFailed();
797 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
798 return VERR_INVALID_PARAMETER;
799 }
800
801 LogFlow(("PGMGstModifyPage %VGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
802
803 /*
804 * Adjust input.
805 */
806 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
807 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
808 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
809
810 /*
811 * Call worker.
812 */
813 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
814
815 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
816 return rc;
817}
818
819
820/**
821 * Gets the current CR3 register value for the shadow memory context.
822 * @returns CR3 value.
823 * @param pVM The VM handle.
824 */
825PGMDECL(uint32_t) PGMGetHyperCR3(PVM pVM)
826{
827 switch (pVM->pgm.s.enmShadowMode)
828 {
829 case PGMMODE_32_BIT:
830 return pVM->pgm.s.HCPhys32BitPD;
831
832 case PGMMODE_PAE:
833 case PGMMODE_PAE_NX:
834 return pVM->pgm.s.HCPhysPaePDPT;
835
836 case PGMMODE_AMD64:
837 case PGMMODE_AMD64_NX:
838 return pVM->pgm.s.HCPhysPaePML4;
839
840 default:
841 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
842 return ~0;
843 }
844}
845
846
847/**
848 * Gets the CR3 register value for the 32-Bit shadow memory context.
849 * @returns CR3 value.
850 * @param pVM The VM handle.
851 */
852PGMDECL(uint32_t) PGMGetHyper32BitCR3(PVM pVM)
853{
854 return pVM->pgm.s.HCPhys32BitPD;
855}
856
857
858/**
859 * Gets the CR3 register value for the PAE shadow memory context.
860 * @returns CR3 value.
861 * @param pVM The VM handle.
862 */
863PGMDECL(uint32_t) PGMGetHyperPaeCR3(PVM pVM)
864{
865 return pVM->pgm.s.HCPhysPaePDPT;
866}
867
868
869/**
870 * Gets the CR3 register value for the AMD64 shadow memory context.
871 * @returns CR3 value.
872 * @param pVM The VM handle.
873 */
874PGMDECL(uint32_t) PGMGetHyperAmd64CR3(PVM pVM)
875{
876 return pVM->pgm.s.HCPhysPaePML4;
877}
878
879
880/**
881 * Gets the current CR3 register value for the HC intermediate memory context.
882 * @returns CR3 value.
883 * @param pVM The VM handle.
884 */
885PGMDECL(uint32_t) PGMGetInterHCCR3(PVM pVM)
886{
887 switch (pVM->pgm.s.enmHostMode)
888 {
889 case SUPPAGINGMODE_32_BIT:
890 case SUPPAGINGMODE_32_BIT_GLOBAL:
891 return pVM->pgm.s.HCPhysInterPD;
892
893 case SUPPAGINGMODE_PAE:
894 case SUPPAGINGMODE_PAE_GLOBAL:
895 case SUPPAGINGMODE_PAE_NX:
896 case SUPPAGINGMODE_PAE_GLOBAL_NX:
897 return pVM->pgm.s.HCPhysInterPaePDPT;
898
899 case SUPPAGINGMODE_AMD64:
900 case SUPPAGINGMODE_AMD64_GLOBAL:
901 case SUPPAGINGMODE_AMD64_NX:
902 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
903 return pVM->pgm.s.HCPhysInterPaePDPT;
904
905 default:
906 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
907 return ~0;
908 }
909}
910
911
912/**
913 * Gets the current CR3 register value for the GC intermediate memory context.
914 * @returns CR3 value.
915 * @param pVM The VM handle.
916 */
917PGMDECL(uint32_t) PGMGetInterGCCR3(PVM pVM)
918{
919 switch (pVM->pgm.s.enmShadowMode)
920 {
921 case PGMMODE_32_BIT:
922 return pVM->pgm.s.HCPhysInterPD;
923
924 case PGMMODE_PAE:
925 case PGMMODE_PAE_NX:
926 return pVM->pgm.s.HCPhysInterPaePDPT;
927
928 case PGMMODE_AMD64:
929 case PGMMODE_AMD64_NX:
930 return pVM->pgm.s.HCPhysInterPaePML4;
931
932 default:
933 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
934 return ~0;
935 }
936}
937
938
939/**
940 * Gets the CR3 register value for the 32-Bit intermediate memory context.
941 * @returns CR3 value.
942 * @param pVM The VM handle.
943 */
944PGMDECL(uint32_t) PGMGetInter32BitCR3(PVM pVM)
945{
946 return pVM->pgm.s.HCPhysInterPD;
947}
948
949
950/**
951 * Gets the CR3 register value for the PAE intermediate memory context.
952 * @returns CR3 value.
953 * @param pVM The VM handle.
954 */
955PGMDECL(uint32_t) PGMGetInterPaeCR3(PVM pVM)
956{
957 return pVM->pgm.s.HCPhysInterPaePDPT;
958}
959
960
961/**
962 * Gets the CR3 register value for the AMD64 intermediate memory context.
963 * @returns CR3 value.
964 * @param pVM The VM handle.
965 */
966PGMDECL(uint32_t) PGMGetInterAmd64CR3(PVM pVM)
967{
968 return pVM->pgm.s.HCPhysInterPaePML4;
969}
970
971
972/**
973 * Performs and schedules necessary updates following a CR3 load or reload.
974 *
975 * This will normally involve mapping the guest PD or nPDPT
976 *
977 * @returns VBox status code.
978 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
979 * safely be ignored and overridden since the FF will be set too then.
980 * @param pVM VM handle.
981 * @param cr3 The new cr3.
982 * @param fGlobal Indicates whether this is a global flush or not.
983 */
984PGMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
985{
986 STAM_PROFILE_START(&pVM->pgm.s.StatFlushTLB, a);
987
988 /*
989 * Always flag the necessary updates; necessary for hardware acceleration
990 */
991 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
992 if (fGlobal)
993 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
994 LogFlow(("PGMFlushTLB: cr3=%VX64 OldCr3=%VX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
995
996 /*
997 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
998 */
999 int rc = VINF_SUCCESS;
1000 RTGCPHYS GCPhysCR3;
1001 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1002 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1003 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1004 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1005 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1006 else
1007 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1008 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1009 {
1010 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1011 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1012 if (VBOX_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
1013 {
1014 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1015 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1016 }
1017 if (fGlobal)
1018 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3Global);
1019 else
1020 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3);
1021 }
1022 else
1023 {
1024 /*
1025 * Check if we have a pending update of the CR3 monitoring.
1026 */
1027 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1028 {
1029 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1030 Assert(!pVM->pgm.s.fMappingsFixed);
1031 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1032 }
1033 if (fGlobal)
1034 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3Global);
1035 else
1036 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3);
1037 }
1038
1039 STAM_PROFILE_STOP(&pVM->pgm.s.StatFlushTLB, a);
1040 return rc;
1041}
1042
1043
1044/**
1045 * Synchronize the paging structures.
1046 *
1047 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1048 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1049 * in several places, most importantly whenever the CR3 is loaded.
1050 *
1051 * @returns VBox status code.
1052 * @param pVM The virtual machine.
1053 * @param cr0 Guest context CR0 register
1054 * @param cr3 Guest context CR3 register
1055 * @param cr4 Guest context CR4 register
1056 * @param fGlobal Including global page directories or not
1057 */
1058PGMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1059{
1060 /*
1061 * We might be called when we shouldn't.
1062 *
1063 * The mode switching will ensure that the PD is resynced
1064 * after every mode switch. So, if we find ourselves here
1065 * when in protected or real mode we can safely disable the
1066 * FF and return immediately.
1067 */
1068 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1069 {
1070 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1071 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1072 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1073 return VINF_SUCCESS;
1074 }
1075
1076 /* If global pages are not supported, then all flushes are global */
1077 if (!(cr4 & X86_CR4_PGE))
1078 fGlobal = true;
1079 LogFlow(("PGMSyncCR3: cr0=%VX64 cr3=%VX64 cr4=%VX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1080 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1081
1082 /*
1083 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1084 */
1085 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1086 int rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1087 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1088 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%VRc\n", rc));
1089 if (rc == VINF_SUCCESS)
1090 {
1091 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1092 {
1093 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1094 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1095 }
1096
1097 /*
1098 * Check if we have a pending update of the CR3 monitoring.
1099 */
1100 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1101 {
1102 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1103 Assert(!pVM->pgm.s.fMappingsFixed);
1104 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1105 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1106 }
1107 }
1108
1109 /*
1110 * Now flush the CR3 (guest context).
1111 */
1112 if (rc == VINF_SUCCESS)
1113 PGM_INVL_GUEST_TLBS();
1114 return rc;
1115}
1116
1117
1118/**
1119 * Called whenever CR0 or CR4 in a way which may change
1120 * the paging mode.
1121 *
1122 * @returns VBox status code fit for scheduling in GC and R0.
1123 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1124 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1125 * @param pVM VM handle.
1126 * @param cr0 The new cr0.
1127 * @param cr4 The new cr4.
1128 * @param efer The new extended feature enable register.
1129 */
1130PGMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1131{
1132 PGMMODE enmGuestMode;
1133
1134 /*
1135 * Calc the new guest mode.
1136 */
1137 if (!(cr0 & X86_CR0_PE))
1138 enmGuestMode = PGMMODE_REAL;
1139 else if (!(cr0 & X86_CR0_PG))
1140 enmGuestMode = PGMMODE_PROTECTED;
1141 else if (!(cr4 & X86_CR4_PAE))
1142 enmGuestMode = PGMMODE_32_BIT;
1143 else if (!(efer & MSR_K6_EFER_LME))
1144 {
1145 if (!(efer & MSR_K6_EFER_NXE))
1146 enmGuestMode = PGMMODE_PAE;
1147 else
1148 enmGuestMode = PGMMODE_PAE_NX;
1149 }
1150 else
1151 {
1152 if (!(efer & MSR_K6_EFER_NXE))
1153 enmGuestMode = PGMMODE_AMD64;
1154 else
1155 enmGuestMode = PGMMODE_AMD64_NX;
1156 }
1157
1158 /*
1159 * Did it change?
1160 */
1161 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1162 return VINF_SUCCESS;
1163#ifdef IN_RING3
1164 return pgmR3ChangeMode(pVM, enmGuestMode);
1165#else
1166 Log(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1167 return VINF_PGM_CHANGE_MODE;
1168#endif
1169}
1170
1171
1172/**
1173 * Gets the current guest paging mode.
1174 *
1175 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1176 *
1177 * @returns The current paging mode.
1178 * @param pVM The VM handle.
1179 */
1180PGMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1181{
1182 return pVM->pgm.s.enmGuestMode;
1183}
1184
1185
1186/**
1187 * Gets the current shadow paging mode.
1188 *
1189 * @returns The current paging mode.
1190 * @param pVM The VM handle.
1191 */
1192PGMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1193{
1194 return pVM->pgm.s.enmShadowMode;
1195}
1196
1197/**
1198 * Gets the current host paging mode.
1199 *
1200 * @returns The current paging mode.
1201 * @param pVM The VM handle.
1202 */
1203PGMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1204{
1205 switch (pVM->pgm.s.enmHostMode)
1206 {
1207 case SUPPAGINGMODE_32_BIT:
1208 case SUPPAGINGMODE_32_BIT_GLOBAL:
1209 return PGMMODE_32_BIT;
1210
1211 case SUPPAGINGMODE_PAE:
1212 case SUPPAGINGMODE_PAE_GLOBAL:
1213 return PGMMODE_PAE;
1214
1215 case SUPPAGINGMODE_PAE_NX:
1216 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1217 return PGMMODE_PAE_NX;
1218
1219 case SUPPAGINGMODE_AMD64:
1220 case SUPPAGINGMODE_AMD64_GLOBAL:
1221 return PGMMODE_AMD64;
1222
1223 case SUPPAGINGMODE_AMD64_NX:
1224 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1225 return PGMMODE_AMD64_NX;
1226
1227 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
1228 }
1229
1230 return PGMMODE_INVALID;
1231}
1232
1233
1234/**
1235 * Get mode name.
1236 *
1237 * @returns read-only name string.
1238 * @param enmMode The mode which name is desired.
1239 */
1240PGMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1241{
1242 switch (enmMode)
1243 {
1244 case PGMMODE_REAL: return "real";
1245 case PGMMODE_PROTECTED: return "protected";
1246 case PGMMODE_32_BIT: return "32-bit";
1247 case PGMMODE_PAE: return "PAE";
1248 case PGMMODE_PAE_NX: return "PAE+NX";
1249 case PGMMODE_AMD64: return "AMD64";
1250 case PGMMODE_AMD64_NX: return "AMD64+NX";
1251 default: return "unknown mode value";
1252 }
1253}
1254
1255
1256/**
1257 * Acquire the PGM lock.
1258 *
1259 * @returns VBox status code
1260 * @param pVM The VM to operate on.
1261 */
1262int pgmLock(PVM pVM)
1263{
1264 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1265#ifdef IN_GC
1266 if (rc == VERR_SEM_BUSY)
1267 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1268#elif defined(IN_RING0)
1269 if (rc == VERR_SEM_BUSY)
1270 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1271#endif
1272 AssertRC(rc);
1273 return rc;
1274}
1275
1276
1277/**
1278 * Release the PGM lock.
1279 *
1280 * @returns VBox status code
1281 * @param pVM The VM to operate on.
1282 */
1283void pgmUnlock(PVM pVM)
1284{
1285 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1286}
1287
1288
1289#ifdef VBOX_STRICT
1290
1291/**
1292 * Asserts that there are no mapping conflicts.
1293 *
1294 * @returns Number of conflicts.
1295 * @param pVM The VM Handle.
1296 */
1297PGMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
1298{
1299 unsigned cErrors = 0;
1300
1301 /*
1302 * Check for mapping conflicts.
1303 */
1304 for (PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
1305 pMapping;
1306 pMapping = CTXALLSUFF(pMapping->pNext))
1307 {
1308 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
1309 for (RTGCUINTPTR GCPtr = (RTGCUINTPTR)pMapping->GCPtr;
1310 GCPtr <= (RTGCUINTPTR)pMapping->GCPtrLast;
1311 GCPtr += PAGE_SIZE)
1312 {
1313 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
1314 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
1315 {
1316 AssertMsgFailed(("Conflict at %VGv with %s\n", GCPtr, HCSTRING(pMapping->pszDesc)));
1317 cErrors++;
1318 break;
1319 }
1320 }
1321 }
1322
1323 return cErrors;
1324}
1325
1326
1327/**
1328 * Asserts that everything related to the guest CR3 is correctly shadowed.
1329 *
1330 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
1331 * and assert the correctness of the guest CR3 mapping before asserting that the
1332 * shadow page tables is in sync with the guest page tables.
1333 *
1334 * @returns Number of conflicts.
1335 * @param pVM The VM Handle.
1336 * @param cr3 The current guest CR3 register value.
1337 * @param cr4 The current guest CR4 register value.
1338 */
1339PGMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
1340{
1341 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1342 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCUINTPTR)0);
1343 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1344 return cErrors;
1345}
1346
1347#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette