VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0.cpp@ 38953

Last change on this file since 38953 was 38953, checked in by vboxsync, 13 years ago

PGM: Attempt at fixing the VERR_MAP_FAILED during state save problem on 32-bit hosts when assigning lots of memory to the guest. PGM should lock down guest RAM pages before use and release them afterwards like everyone else. Still quite some stuff left to do there, so I've deviced a little hack for tracking unlocked mappings and using this as input when deciding to do async or sync chunk unmapping at save/load time. See xtracker #5912 and public ticket 7929.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.7 KB
Line 
1/* $Id: PGMR0.cpp 38953 2011-10-06 08:49:36Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Ring-0.
4 */
5
6/*
7 * Copyright (C) 2007-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/rawpci.h>
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/gmm.h>
25#include <VBox/vmm/gvm.h>
26#include "PGMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "PGMInline.h"
29#include <VBox/log.h>
30#include <VBox/err.h>
31#include <iprt/assert.h>
32#include <iprt/mem.h>
33
34
35/*
36 * Instantiate the ring-0 header/code templates.
37 */
38#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
39#include "PGMR0Bth.h"
40#undef PGM_BTH_NAME
41
42#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
43#include "PGMR0Bth.h"
44#undef PGM_BTH_NAME
45
46#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
47#include "PGMR0Bth.h"
48#undef PGM_BTH_NAME
49
50#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
51#include "PGMR0Bth.h"
52#undef PGM_BTH_NAME
53
54
55/**
56 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
57 *
58 * @returns The following VBox status codes.
59 * @retval VINF_SUCCESS on success. FF cleared.
60 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
61 *
62 * @param pVM The VM handle.
63 * @param pVCpu The VMCPU handle.
64 *
65 * @remarks Must be called from within the PGM critical section. The caller
66 * must clear the new pages.
67 */
68VMMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM, PVMCPU pVCpu)
69{
70 PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
71
72 /*
73 * Check for error injection.
74 */
75 if (RT_UNLIKELY(pVM->pgm.s.fErrInjHandyPages))
76 return VERR_NO_MEMORY;
77
78 /*
79 * Try allocate a full set of handy pages.
80 */
81 uint32_t iFirst = pVM->pgm.s.cHandyPages;
82 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_INTERNAL_ERROR);
83 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
84 if (!cPages)
85 return VINF_SUCCESS;
86 int rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
87 if (RT_SUCCESS(rc))
88 {
89#ifdef VBOX_STRICT
90 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
91 {
92 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
93 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
94 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
95 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
96 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
97 }
98#endif
99
100 pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
101 }
102 else if (rc != VERR_GMM_SEED_ME)
103 {
104 if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
105 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
106 && iFirst < PGM_HANDY_PAGES_MIN)
107 {
108
109#ifdef VBOX_STRICT
110 /* We're ASSUMING that GMM has updated all the entires before failing us. */
111 uint32_t i;
112 for (i = iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
113 {
114 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
115 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
116 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
117 }
118#endif
119
120 /*
121 * Reduce the number of pages until we hit the minimum limit.
122 */
123 do
124 {
125 cPages >>= 1;
126 if (cPages + iFirst < PGM_HANDY_PAGES_MIN)
127 cPages = PGM_HANDY_PAGES_MIN - iFirst;
128 rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, 0, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
129 } while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
130 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
131 && cPages + iFirst > PGM_HANDY_PAGES_MIN);
132 if (RT_SUCCESS(rc))
133 {
134#ifdef VBOX_STRICT
135 i = iFirst + cPages;
136 while (i-- > 0)
137 {
138 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
139 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
140 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
141 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
142 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
143 }
144
145 for (i = cPages + iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
146 {
147 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
148 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
149 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
150 }
151#endif
152
153 pVM->pgm.s.cHandyPages = iFirst + cPages;
154 }
155 }
156
157 if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)
158 {
159 LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
160 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
161 }
162 }
163
164
165 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
166 return rc;
167}
168
169
170/**
171 * Worker function for PGMR3PhysAllocateLargeHandyPage
172 *
173 * @returns The following VBox status codes.
174 * @retval VINF_SUCCESS on success.
175 * @retval VINF_EM_NO_MEMORY if we're out of memory.
176 *
177 * @param pVM The VM handle.
178 * @param pVCpu The VMCPU handle.
179 *
180 * @remarks Must be called from within the PGM critical section. The caller
181 * must clear the new pages.
182 */
183VMMR0DECL(int) PGMR0PhysAllocateLargeHandyPage(PVM pVM, PVMCPU pVCpu)
184{
185 PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
186 Assert(!pVM->pgm.s.cLargeHandyPages);
187
188 int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M,
189 &pVM->pgm.s.aLargeHandyPage[0].idPage,
190 &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
191 if (RT_SUCCESS(rc))
192 pVM->pgm.s.cLargeHandyPages = 1;
193
194 return rc;
195}
196
197
198#ifdef VBOX_WITH_PCI_PASSTHROUGH
199/* Interface sketch. The interface belongs to a global PCI pass-through
200 manager. It shall use the global VM handle, not the user VM handle to
201 store the per-VM info (domain) since that is all ring-0 stuff, thus
202 passing pGVM here. I've tentitively prefixed the functions 'GPciRawR0',
203 we can discuss the PciRaw code re-organtization when I'm back from
204 vacation.
205
206 I've implemented the initial IOMMU set up below. For things to work
207 reliably, we will probably need add a whole bunch of checks and
208 GPciRawR0GuestPageUpdate call to the PGM code. For the present,
209 assuming nested paging (enforced) and prealloc (enforced), no
210 ballooning (check missing), page sharing (check missing) or live
211 migration (check missing), it might work fine. At least if some
212 VM power-off hook is present and can tear down the IOMMU page tables. */
213
214/**
215 * Tells the global PCI pass-through manager that we are about to set up the
216 * guest page to host page mappings for the specfied VM.
217 *
218 * @returns VBox status code.
219 *
220 * @param pGVM The ring-0 VM structure.
221 */
222VMMR0_INT_DECL(int) GPciRawR0GuestPageBeginAssignments(PGVM pGVM)
223{
224 return VINF_SUCCESS;
225}
226
227
228/**
229 * Assigns a host page mapping for a guest page.
230 *
231 * This is only used when setting up the mappings, i.e. between
232 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
233 *
234 * @returns VBox status code.
235 * @param pGVM The ring-0 VM structure.
236 * @param GCPhys The address of the guest page (page aligned).
237 * @param HCPhys The address of the host page (page aligned).
238 */
239VMMR0_INT_DECL(int) GPciRawR0GuestPageAssign(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
240{
241 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
242 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
243
244 if (pGVM->rawpci.s.pfnContigMemInfo)
245 /** @todo: what do we do on failure? */
246 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, HCPhys, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_MAP);
247
248 return VINF_SUCCESS;
249}
250
251
252/**
253 * Indicates that the specified guest page doesn't exists but doesn't have host
254 * page mapping we trust PCI pass-through with.
255 *
256 * This is only used when setting up the mappings, i.e. between
257 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
258 *
259 * @returns VBox status code.
260 * @param pGVM The ring-0 VM structure.
261 * @param GCPhys The address of the guest page (page aligned).
262 * @param HCPhys The address of the host page (page aligned).
263 */
264VMMR0_INT_DECL(int) GPciRawR0GuestPageUnassign(PGVM pGVM, RTGCPHYS GCPhys)
265{
266 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
267
268 if (pGVM->rawpci.s.pfnContigMemInfo)
269 /** @todo: what do we do on failure? */
270 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, 0, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_UNMAP);
271
272 return VINF_SUCCESS;
273}
274
275
276/**
277 * Tells the global PCI pass-through manager that we have completed setting up
278 * the guest page to host page mappings for the specfied VM.
279 *
280 * This complements GPciRawR0GuestPageBeginAssignments and will be called even
281 * if some page assignment failed.
282 *
283 * @returns VBox status code.
284 *
285 * @param pGVM The ring-0 VM structure.
286 */
287VMMR0_INT_DECL(int) GPciRawR0GuestPageEndAssignments(PGVM pGVM)
288{
289 return VINF_SUCCESS;
290}
291
292
293/**
294 * Tells the global PCI pass-through manager that a guest page mapping has
295 * changed after the initial setup.
296 *
297 * @returns VBox status code.
298 * @param pGVM The ring-0 VM structure.
299 * @param GCPhys The address of the guest page (page aligned).
300 * @param HCPhys The new host page address or NIL_RTHCPHYS if
301 * now unassigned.
302 */
303VMMR0_INT_DECL(int) GPciRawR0GuestPageUpdate(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
304{
305 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_4);
306 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK) || HCPhys == NIL_RTHCPHYS, VERR_INTERNAL_ERROR_4);
307 return VINF_SUCCESS;
308}
309
310#endif /* VBOX_WITH_PCI_PASSTHROUGH */
311
312
313/**
314 * Sets up the IOMMU when raw PCI device is enabled.
315 *
316 * @note This is a hack that will probably be remodelled and refined later!
317 *
318 * @returns VBox status code.
319 *
320 * @param pVM The VM handle.
321 */
322VMMR0_INT_DECL(int) PGMR0PhysSetupIommu(PVM pVM)
323{
324 PGVM pGVM;
325 int rc = GVMMR0ByVM(pVM, &pGVM);
326 if (RT_FAILURE(rc))
327 return rc;
328
329#ifdef VBOX_WITH_PCI_PASSTHROUGH
330 if (pVM->pgm.s.fPciPassthrough)
331 {
332 /*
333 * The Simplistic Approach - Enumerate all the pages and call tell the
334 * IOMMU about each of them.
335 */
336 pgmLock(pVM);
337 rc = GPciRawR0GuestPageBeginAssignments(pGVM);
338 if (RT_SUCCESS(rc))
339 {
340 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
341 {
342 PPGMPAGE pPage = &pRam->aPages[0];
343 RTGCPHYS GCPhys = pRam->GCPhys;
344 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
345 while (cLeft-- > 0)
346 {
347 /* Only expose pages that are 100% safe for now. */
348 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
349 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
350 && !PGM_PAGE_HAS_ANY_HANDLERS(pPage))
351 rc = GPciRawR0GuestPageAssign(pGVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage));
352 else
353 rc = GPciRawR0GuestPageUnassign(pGVM, GCPhys);
354
355 /* next */
356 pPage++;
357 GCPhys += PAGE_SIZE;
358 }
359 }
360
361 int rc2 = GPciRawR0GuestPageEndAssignments(pGVM);
362 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
363 rc = rc2;
364 }
365 pgmUnlock(pVM);
366 }
367 else
368#endif
369 rc = VERR_NOT_SUPPORTED;
370 return rc;
371}
372
373
374/**
375 * #PF Handler for nested paging.
376 *
377 * @returns VBox status code (appropriate for trap handling and GC return).
378 * @param pVM VM Handle.
379 * @param pVCpu VMCPU Handle.
380 * @param enmShwPagingMode Paging mode for the nested page tables.
381 * @param uErr The trap error code.
382 * @param pRegFrame Trap register frame.
383 * @param GCPhysFault The fault address.
384 */
385VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
386 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault)
387{
388 int rc;
389
390 LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pRegFrame->rip));
391 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
392 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
393
394 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
395 AssertMsg( enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX
396 || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT,
397 ("enmShwPagingMode=%d\n", enmShwPagingMode));
398
399 /* Reserved shouldn't end up here. */
400 Assert(!(uErr & X86_TRAP_PF_RSVD));
401
402#ifdef VBOX_WITH_STATISTICS
403 /*
404 * Error code stats.
405 */
406 if (uErr & X86_TRAP_PF_US)
407 {
408 if (!(uErr & X86_TRAP_PF_P))
409 {
410 if (uErr & X86_TRAP_PF_RW)
411 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
412 else
413 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
414 }
415 else if (uErr & X86_TRAP_PF_RW)
416 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
417 else if (uErr & X86_TRAP_PF_RSVD)
418 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
419 else if (uErr & X86_TRAP_PF_ID)
420 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
421 else
422 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
423 }
424 else
425 { /* Supervisor */
426 if (!(uErr & X86_TRAP_PF_P))
427 {
428 if (uErr & X86_TRAP_PF_RW)
429 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
430 else
431 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
432 }
433 else if (uErr & X86_TRAP_PF_RW)
434 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
435 else if (uErr & X86_TRAP_PF_ID)
436 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
437 else if (uErr & X86_TRAP_PF_RSVD)
438 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
439 }
440#endif
441
442 /*
443 * Call the worker.
444 *
445 * Note! We pretend the guest is in protected mode without paging, so we
446 * can use existing code to build the nested page tables.
447 */
448 bool fLockTaken = false;
449 switch(enmShwPagingMode)
450 {
451 case PGMMODE_32_BIT:
452 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
453 break;
454 case PGMMODE_PAE:
455 case PGMMODE_PAE_NX:
456 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
457 break;
458 case PGMMODE_AMD64:
459 case PGMMODE_AMD64_NX:
460 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
461 break;
462 case PGMMODE_EPT:
463 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
464 break;
465 default:
466 AssertFailed();
467 rc = VERR_INVALID_PARAMETER;
468 break;
469 }
470 if (fLockTaken)
471 {
472 PGM_LOCK_ASSERT_OWNER(pVM);
473 pgmUnlock(pVM);
474 }
475
476 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
477 rc = VINF_SUCCESS;
478 /* Note: hack alert for difficult to reproduce problem. */
479 else if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
480 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
481 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
482 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
483 {
484 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, GCPhysFault, uErr, pRegFrame->rip));
485 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about
486 single VCPU VMs though. */
487 rc = VINF_SUCCESS;
488 }
489
490 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
491 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
492 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
493 return rc;
494}
495
496
497/**
498 * #PF Handler for deliberate nested paging misconfiguration (/reserved bit)
499 * employed for MMIO pages.
500 *
501 * @returns VBox status code (appropriate for trap handling and GC return).
502 * @param pVM The VM Handle.
503 * @param pVCpu The current CPU.
504 * @param enmShwPagingMode Paging mode for the nested page tables.
505 * @param pRegFrame Trap register frame.
506 * @param GCPhysFault The fault address.
507 * @param uErr The error code, UINT32_MAX if not available
508 * (VT-x).
509 */
510VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode,
511 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr)
512{
513#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
514 STAM_PROFILE_START(&pVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
515 VBOXSTRICTRC rc;
516
517 /*
518 * Try lookup the all access physical handler for the address.
519 */
520 pgmLock(pVM);
521 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
522 if (RT_LIKELY(pHandler && pHandler->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE))
523 {
524 /*
525 * If the handle has aliases page or pages that have been temporarily
526 * disabled, we'll have to take a detour to make sure we resync them
527 * to avoid lots of unnecessary exits.
528 */
529 PPGMPAGE pPage;
530 if ( ( pHandler->cAliasedPages
531 || pHandler->cTmpOffPages)
532 && ( (pPage = pgmPhysGetPage(pVM, GCPhysFault)) == NULL
533 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
534 )
535 {
536 Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage));
537 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
538 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
539 pgmUnlock(pVM);
540 }
541 else
542 {
543 if (pHandler->CTX_SUFF(pfnHandler))
544 {
545 CTX_MID(PFNPGM,PHYSHANDLER) pfnHandler = pHandler->CTX_SUFF(pfnHandler);
546 void *pvUser = pHandler->CTX_SUFF(pvUser);
547 STAM_PROFILE_START(&pHandler->Stat, h);
548 pgmUnlock(pVM);
549
550 Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pfnHandler, uErr, GCPhysFault, pvUser));
551 rc = pfnHandler(pVM, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame, GCPhysFault, GCPhysFault, pvUser);
552
553#ifdef VBOX_WITH_STATISTICS
554 pgmLock(pVM);
555 pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
556 if (pHandler)
557 STAM_PROFILE_STOP(&pHandler->Stat, h);
558 pgmUnlock(pVM);
559#endif
560 }
561 else
562 {
563 pgmUnlock(pVM);
564 Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr));
565 rc = VINF_EM_RAW_EMULATE_INSTR;
566 }
567 }
568 }
569 else
570 {
571 /*
572 * Must be out of sync, so do a SyncPage and restart the instruction.
573 *
574 * ASSUMES that ALL handlers are page aligned and covers whole pages
575 * (assumption asserted in PGMHandlerPhysicalRegisterEx).
576 */
577 Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr));
578 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
579 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
580 pgmUnlock(pVM);
581 }
582
583 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a);
584 return rc;
585
586#else
587 AssertLogRelFailed();
588 return VERR_INTERNAL_ERROR_4;
589#endif
590}
591
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette