VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0.cpp@ 37529

Last change on this file since 37529 was 37354, checked in by vboxsync, 14 years ago

PGM: Fixed locking issues in PGMR3PhysMMIORegister and PGMR3PhysMMIODeregister. Also addressed a harmless on in PGMR3PhysRomRegister (only used at init time, so no races). Fortified the code with assertions more lock assertion, replacing the incorrect PGMIsLocked() checks (we only care if the current thread is the lock owner). Cleaned up some ReturnStmt macros and adding more of them.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.7 KB
Line 
1/* $Id: PGMR0.cpp 37354 2011-06-07 15:05:32Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Ring-0.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/rawpci.h>
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/gmm.h>
25#include <VBox/vmm/gvm.h>
26#include "PGMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "PGMInline.h"
29#include <VBox/log.h>
30#include <VBox/err.h>
31#include <iprt/assert.h>
32#include <iprt/mem.h>
33
34
35/*
36 * Instantiate the ring-0 header/code templates.
37 */
38#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
39#include "PGMR0Bth.h"
40#undef PGM_BTH_NAME
41
42#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
43#include "PGMR0Bth.h"
44#undef PGM_BTH_NAME
45
46#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
47#include "PGMR0Bth.h"
48#undef PGM_BTH_NAME
49
50#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
51#include "PGMR0Bth.h"
52#undef PGM_BTH_NAME
53
54
55/**
56 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
57 *
58 * @returns The following VBox status codes.
59 * @retval VINF_SUCCESS on success. FF cleared.
60 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
61 *
62 * @param pVM The VM handle.
63 * @param pVCpu The VMCPU handle.
64 *
65 * @remarks Must be called from within the PGM critical section. The caller
66 * must clear the new pages.
67 */
68VMMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM, PVMCPU pVCpu)
69{
70 Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu->idCpu));
71
72 /*
73 * Check for error injection.
74 */
75 if (RT_UNLIKELY(pVM->pgm.s.fErrInjHandyPages))
76 return VERR_NO_MEMORY;
77
78 /*
79 * Try allocate a full set of handy pages.
80 */
81 uint32_t iFirst = pVM->pgm.s.cHandyPages;
82 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_INTERNAL_ERROR);
83 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
84 if (!cPages)
85 return VINF_SUCCESS;
86 int rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
87 if (RT_SUCCESS(rc))
88 {
89 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
90 {
91 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
92 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
93 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
94 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
95 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
96 }
97
98 pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
99 }
100 else if (rc != VERR_GMM_SEED_ME)
101 {
102 if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
103 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
104 && iFirst < PGM_HANDY_PAGES_MIN)
105 {
106
107#ifdef VBOX_STRICT
108 /* We're ASSUMING that GMM has updated all the entires before failing us. */
109 uint32_t i;
110 for (i = iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
111 {
112 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
113 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
114 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
115 }
116#endif
117
118 /*
119 * Reduce the number of pages until we hit the minimum limit.
120 */
121 do
122 {
123 cPages >>= 2;
124 if (cPages + iFirst < PGM_HANDY_PAGES_MIN)
125 cPages = PGM_HANDY_PAGES_MIN - iFirst;
126 rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
127 } while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
128 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
129 && cPages + iFirst > PGM_HANDY_PAGES_MIN);
130 if (RT_SUCCESS(rc))
131 {
132#ifdef VBOX_STRICT
133 i = iFirst + cPages;
134 while (i-- > 0)
135 {
136 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
137 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
138 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
139 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
140 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
141 }
142
143 for (i = cPages + iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
144 {
145 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
146 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
147 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
148 }
149#endif
150
151 pVM->pgm.s.cHandyPages = iFirst + cPages;
152 }
153 }
154
155 if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)
156 {
157 LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
158 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
159 }
160 }
161
162
163 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
164 return rc;
165}
166
167
168/**
169 * Worker function for PGMR3PhysAllocateLargeHandyPage
170 *
171 * @returns The following VBox status codes.
172 * @retval VINF_SUCCESS on success.
173 * @retval VINF_EM_NO_MEMORY if we're out of memory.
174 *
175 * @param pVM The VM handle.
176 * @param pVCpu The VMCPU handle.
177 *
178 * @remarks Must be called from within the PGM critical section. The caller
179 * must clear the new pages.
180 */
181VMMR0DECL(int) PGMR0PhysAllocateLargeHandyPage(PVM pVM, PVMCPU pVCpu)
182{
183 Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu->idCpu));
184
185 Assert(!pVM->pgm.s.cLargeHandyPages);
186 int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M, &pVM->pgm.s.aLargeHandyPage[0].idPage, &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
187 if (RT_SUCCESS(rc))
188 pVM->pgm.s.cLargeHandyPages = 1;
189
190 return rc;
191}
192
193
194#ifdef VBOX_WITH_PCI_PASSTHROUGH
195/* Interface sketch. The interface belongs to a global PCI pass-through
196 manager. It shall use the global VM handle, not the user VM handle to
197 store the per-VM info (domain) since that is all ring-0 stuff, thus
198 passing pGVM here. I've tentitively prefixed the functions 'GPciRawR0',
199 we can discuss the PciRaw code re-organtization when I'm back from
200 vacation.
201
202 I've implemented the initial IOMMU set up below. For things to work
203 reliably, we will probably need add a whole bunch of checks and
204 GPciRawR0GuestPageUpdate call to the PGM code. For the present,
205 assuming nested paging (enforced) and prealloc (enforced), no
206 ballooning (check missing), page sharing (check missing) or live
207 migration (check missing), it might work fine. At least if some
208 VM power-off hook is present and can tear down the IOMMU page tables. */
209
210/**
211 * Tells the global PCI pass-through manager that we are about to set up the
212 * guest page to host page mappings for the specfied VM.
213 *
214 * @returns VBox status code.
215 *
216 * @param pGVM The ring-0 VM structure.
217 */
218VMMR0_INT_DECL(int) GPciRawR0GuestPageBeginAssignments(PGVM pGVM)
219{
220 return VINF_SUCCESS;
221}
222
223
224/**
225 * Assigns a host page mapping for a guest page.
226 *
227 * This is only used when setting up the mappings, i.e. between
228 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
229 *
230 * @returns VBox status code.
231 * @param pGVM The ring-0 VM structure.
232 * @param GCPhys The address of the guest page (page aligned).
233 * @param HCPhys The address of the host page (page aligned).
234 */
235VMMR0_INT_DECL(int) GPciRawR0GuestPageAssign(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
236{
237 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
238 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
239
240 if (pGVM->rawpci.s.pfnContigMemInfo)
241 /** @todo: what do we do on failure? */
242 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, HCPhys, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_MAP);
243
244 return VINF_SUCCESS;
245}
246
247
248/**
249 * Indicates that the specified guest page doesn't exists but doesn't have host
250 * page mapping we trust PCI pass-through with.
251 *
252 * This is only used when setting up the mappings, i.e. between
253 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
254 *
255 * @returns VBox status code.
256 * @param pGVM The ring-0 VM structure.
257 * @param GCPhys The address of the guest page (page aligned).
258 * @param HCPhys The address of the host page (page aligned).
259 */
260VMMR0_INT_DECL(int) GPciRawR0GuestPageUnassign(PGVM pGVM, RTGCPHYS GCPhys)
261{
262 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
263
264 if (pGVM->rawpci.s.pfnContigMemInfo)
265 /** @todo: what do we do on failure? */
266 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, 0, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_UNMAP);
267
268 return VINF_SUCCESS;
269}
270
271
272/**
273 * Tells the global PCI pass-through manager that we have completed setting up
274 * the guest page to host page mappings for the specfied VM.
275 *
276 * This complements GPciRawR0GuestPageBeginAssignments and will be called even
277 * if some page assignment failed.
278 *
279 * @returns VBox status code.
280 *
281 * @param pGVM The ring-0 VM structure.
282 */
283VMMR0_INT_DECL(int) GPciRawR0GuestPageEndAssignments(PGVM pGVM)
284{
285 return VINF_SUCCESS;
286}
287
288
289/**
290 * Tells the global PCI pass-through manager that a guest page mapping has
291 * changed after the initial setup.
292 *
293 * @returns VBox status code.
294 * @param pGVM The ring-0 VM structure.
295 * @param GCPhys The address of the guest page (page aligned).
296 * @param HCPhys The new host page address or NIL_RTHCPHYS if
297 * now unassigned.
298 */
299VMMR0_INT_DECL(int) GPciRawR0GuestPageUpdate(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
300{
301 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_4);
302 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK) || HCPhys == NIL_RTHCPHYS, VERR_INTERNAL_ERROR_4);
303 return VINF_SUCCESS;
304}
305
306#endif /* VBOX_WITH_PCI_PASSTHROUGH */
307
308
309/**
310 * Sets up the IOMMU when raw PCI device is enabled.
311 *
312 * @note This is a hack that will probably be remodelled and refined later!
313 *
314 * @returns VBox status code.
315 *
316 * @param pVM The VM handle.
317 */
318VMMR0_INT_DECL(int) PGMR0PhysSetupIommu(PVM pVM)
319{
320 PGVM pGVM;
321 int rc = GVMMR0ByVM(pVM, &pGVM);
322 if (RT_FAILURE(rc))
323 return rc;
324
325#ifdef VBOX_WITH_PCI_PASSTHROUGH
326 if (pVM->pgm.s.fPciPassthrough)
327 {
328 /*
329 * The Simplistic Approach - Enumerate all the pages and call tell the
330 * IOMMU about each of them.
331 */
332 pgmLock(pVM);
333 rc = GPciRawR0GuestPageBeginAssignments(pGVM);
334 if (RT_SUCCESS(rc))
335 {
336 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
337 {
338 PPGMPAGE pPage = &pRam->aPages[0];
339 RTGCPHYS GCPhys = pRam->GCPhys;
340 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
341 while (cLeft-- > 0)
342 {
343 /* Only expose pages that are 100% safe for now. */
344 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
345 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
346 && !PGM_PAGE_HAS_ANY_HANDLERS(pPage))
347 rc = GPciRawR0GuestPageAssign(pGVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage));
348 else
349 rc = GPciRawR0GuestPageUnassign(pGVM, GCPhys);
350
351 /* next */
352 pPage++;
353 GCPhys += PAGE_SIZE;
354 }
355 }
356
357 int rc2 = GPciRawR0GuestPageEndAssignments(pGVM);
358 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
359 rc = rc2;
360 }
361 pgmUnlock(pVM);
362 }
363 else
364#endif
365 rc = VERR_NOT_SUPPORTED;
366 return rc;
367}
368
369
370/**
371 * #PF Handler for nested paging.
372 *
373 * @returns VBox status code (appropriate for trap handling and GC return).
374 * @param pVM VM Handle.
375 * @param pVCpu VMCPU Handle.
376 * @param enmShwPagingMode Paging mode for the nested page tables.
377 * @param uErr The trap error code.
378 * @param pRegFrame Trap register frame.
379 * @param GCPhysFault The fault address.
380 */
381VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
382 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault)
383{
384 int rc;
385
386 LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pRegFrame->rip));
387 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
388 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
389
390 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
391 AssertMsg( enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX
392 || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT,
393 ("enmShwPagingMode=%d\n", enmShwPagingMode));
394
395 /* Reserved shouldn't end up here. */
396 Assert(!(uErr & X86_TRAP_PF_RSVD));
397
398#ifdef VBOX_WITH_STATISTICS
399 /*
400 * Error code stats.
401 */
402 if (uErr & X86_TRAP_PF_US)
403 {
404 if (!(uErr & X86_TRAP_PF_P))
405 {
406 if (uErr & X86_TRAP_PF_RW)
407 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
408 else
409 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
410 }
411 else if (uErr & X86_TRAP_PF_RW)
412 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
413 else if (uErr & X86_TRAP_PF_RSVD)
414 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
415 else if (uErr & X86_TRAP_PF_ID)
416 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
417 else
418 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
419 }
420 else
421 { /* Supervisor */
422 if (!(uErr & X86_TRAP_PF_P))
423 {
424 if (uErr & X86_TRAP_PF_RW)
425 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
426 else
427 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
428 }
429 else if (uErr & X86_TRAP_PF_RW)
430 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
431 else if (uErr & X86_TRAP_PF_ID)
432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
433 else if (uErr & X86_TRAP_PF_RSVD)
434 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
435 }
436#endif
437
438 /*
439 * Call the worker.
440 *
441 * Note! We pretend the guest is in protected mode without paging, so we
442 * can use existing code to build the nested page tables.
443 */
444 bool fLockTaken = false;
445 switch(enmShwPagingMode)
446 {
447 case PGMMODE_32_BIT:
448 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
449 break;
450 case PGMMODE_PAE:
451 case PGMMODE_PAE_NX:
452 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
453 break;
454 case PGMMODE_AMD64:
455 case PGMMODE_AMD64_NX:
456 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
457 break;
458 case PGMMODE_EPT:
459 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
460 break;
461 default:
462 AssertFailed();
463 rc = VERR_INVALID_PARAMETER;
464 break;
465 }
466 if (fLockTaken)
467 {
468 PGM_LOCK_ASSERT_OWNER(pVM);
469 pgmUnlock(pVM);
470 }
471
472 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
473 rc = VINF_SUCCESS;
474 /* Note: hack alert for difficult to reproduce problem. */
475 else if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
476 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
477 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
478 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
479 {
480 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, GCPhysFault, uErr, pRegFrame->rip));
481 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about
482 single VCPU VMs though. */
483 rc = VINF_SUCCESS;
484 }
485
486 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
487 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
488 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
489 return rc;
490}
491
492
493/**
494 * #PF Handler for deliberate nested paging misconfiguration (/reserved bit)
495 * employed for MMIO pages.
496 *
497 * @returns VBox status code (appropriate for trap handling and GC return).
498 * @param pVM The VM Handle.
499 * @param pVCpu The current CPU.
500 * @param enmShwPagingMode Paging mode for the nested page tables.
501 * @param pRegFrame Trap register frame.
502 * @param GCPhysFault The fault address.
503 * @param uErr The error code, UINT32_MAX if not available
504 * (VT-x).
505 */
506VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode,
507 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr)
508{
509#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
510 STAM_PROFILE_START(&pVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
511 VBOXSTRICTRC rc;
512
513 /*
514 * Try lookup the all access physical handler for the address.
515 */
516 pgmLock(pVM);
517 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
518 if (RT_LIKELY(pHandler && pHandler->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE))
519 {
520 /*
521 * If the handle has aliases page or pages that have been temporarily
522 * disabled, we'll have to take a detour to make sure we resync them
523 * to avoid lots of unnecessary exits.
524 */
525 PPGMPAGE pPage;
526 if ( ( pHandler->cAliasedPages
527 || pHandler->cTmpOffPages)
528 && ( (pPage = pgmPhysGetPage(pVM, GCPhysFault)) == NULL
529 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
530 )
531 {
532 Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage));
533 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
534 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
535 pgmUnlock(pVM);
536 }
537 else
538 {
539 if (pHandler->CTX_SUFF(pfnHandler))
540 {
541 CTX_MID(PFNPGM,PHYSHANDLER) pfnHandler = pHandler->CTX_SUFF(pfnHandler);
542 void *pvUser = pHandler->CTX_SUFF(pvUser);
543 STAM_PROFILE_START(&pHandler->Stat, h);
544 pgmUnlock(pVM);
545
546 Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pfnHandler, uErr, GCPhysFault, pvUser));
547 rc = pfnHandler(pVM, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame, GCPhysFault, GCPhysFault, pvUser);
548
549#ifdef VBOX_WITH_STATISTICS
550 pgmLock(pVM);
551 pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
552 if (pHandler)
553 STAM_PROFILE_STOP(&pHandler->Stat, h);
554 pgmUnlock(pVM);
555#endif
556 }
557 else
558 {
559 pgmUnlock(pVM);
560 Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr));
561 rc = VINF_EM_RAW_EMULATE_INSTR;
562 }
563 }
564 }
565 else
566 {
567 /*
568 * Must be out of sync, so do a SyncPage and restart the instruction.
569 *
570 * ASSUMES that ALL handlers are page aligned and covers whole pages
571 * (assumption asserted in PGMHandlerPhysicalRegisterEx).
572 */
573 Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr));
574 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
575 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
576 pgmUnlock(pVM);
577 }
578
579 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a);
580 return rc;
581
582#else
583 AssertLogRelFailed();
584 return VERR_INTERNAL_ERROR_4;
585#endif
586}
587
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette