VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0.cpp@ 37950

Last change on this file since 37950 was 37950, checked in by vboxsync, 13 years ago

PGMR0.cpp: Add a VBOX_STRICT around the loop, just in case.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.7 KB
Line 
1/* $Id: PGMR0.cpp 37950 2011-07-14 10:13:39Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Ring-0.
4 */
5
6/*
7 * Copyright (C) 2007-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/rawpci.h>
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/gmm.h>
25#include <VBox/vmm/gvm.h>
26#include "PGMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "PGMInline.h"
29#include <VBox/log.h>
30#include <VBox/err.h>
31#include <iprt/assert.h>
32#include <iprt/mem.h>
33
34
35/*
36 * Instantiate the ring-0 header/code templates.
37 */
38#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
39#include "PGMR0Bth.h"
40#undef PGM_BTH_NAME
41
42#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
43#include "PGMR0Bth.h"
44#undef PGM_BTH_NAME
45
46#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
47#include "PGMR0Bth.h"
48#undef PGM_BTH_NAME
49
50#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
51#include "PGMR0Bth.h"
52#undef PGM_BTH_NAME
53
54
55/**
56 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
57 *
58 * @returns The following VBox status codes.
59 * @retval VINF_SUCCESS on success. FF cleared.
60 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
61 *
62 * @param pVM The VM handle.
63 * @param pVCpu The VMCPU handle.
64 *
65 * @remarks Must be called from within the PGM critical section. The caller
66 * must clear the new pages.
67 */
68VMMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM, PVMCPU pVCpu)
69{
70 Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu));
71
72 /*
73 * Check for error injection.
74 */
75 if (RT_UNLIKELY(pVM->pgm.s.fErrInjHandyPages))
76 return VERR_NO_MEMORY;
77
78 /*
79 * Try allocate a full set of handy pages.
80 */
81 uint32_t iFirst = pVM->pgm.s.cHandyPages;
82 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_INTERNAL_ERROR);
83 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
84 if (!cPages)
85 return VINF_SUCCESS;
86 int rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
87 if (RT_SUCCESS(rc))
88 {
89#ifdef VBOX_STRICT
90 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
91 {
92 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
93 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
94 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
95 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
96 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
97 }
98#endif
99
100 pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
101 }
102 else if (rc != VERR_GMM_SEED_ME)
103 {
104 if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
105 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
106 && iFirst < PGM_HANDY_PAGES_MIN)
107 {
108
109#ifdef VBOX_STRICT
110 /* We're ASSUMING that GMM has updated all the entires before failing us. */
111 uint32_t i;
112 for (i = iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
113 {
114 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
115 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
116 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
117 }
118#endif
119
120 /*
121 * Reduce the number of pages until we hit the minimum limit.
122 */
123 do
124 {
125 cPages >>= 1;
126 if (cPages + iFirst < PGM_HANDY_PAGES_MIN)
127 cPages = PGM_HANDY_PAGES_MIN - iFirst;
128 rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, 0, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
129 } while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
130 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
131 && cPages + iFirst > PGM_HANDY_PAGES_MIN);
132 if (RT_SUCCESS(rc))
133 {
134#ifdef VBOX_STRICT
135 i = iFirst + cPages;
136 while (i-- > 0)
137 {
138 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
139 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
140 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
141 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
142 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
143 }
144
145 for (i = cPages + iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
146 {
147 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
148 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
149 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
150 }
151#endif
152
153 pVM->pgm.s.cHandyPages = iFirst + cPages;
154 }
155 }
156
157 if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)
158 {
159 LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
160 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
161 }
162 }
163
164
165 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
166 return rc;
167}
168
169
170/**
171 * Worker function for PGMR3PhysAllocateLargeHandyPage
172 *
173 * @returns The following VBox status codes.
174 * @retval VINF_SUCCESS on success.
175 * @retval VINF_EM_NO_MEMORY if we're out of memory.
176 *
177 * @param pVM The VM handle.
178 * @param pVCpu The VMCPU handle.
179 *
180 * @remarks Must be called from within the PGM critical section. The caller
181 * must clear the new pages.
182 */
183VMMR0DECL(int) PGMR0PhysAllocateLargeHandyPage(PVM pVM, PVMCPU pVCpu)
184{
185 Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu));
186
187 Assert(!pVM->pgm.s.cLargeHandyPages);
188 int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M, &pVM->pgm.s.aLargeHandyPage[0].idPage, &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
189 if (RT_SUCCESS(rc))
190 pVM->pgm.s.cLargeHandyPages = 1;
191
192 return rc;
193}
194
195
196#ifdef VBOX_WITH_PCI_PASSTHROUGH
197/* Interface sketch. The interface belongs to a global PCI pass-through
198 manager. It shall use the global VM handle, not the user VM handle to
199 store the per-VM info (domain) since that is all ring-0 stuff, thus
200 passing pGVM here. I've tentitively prefixed the functions 'GPciRawR0',
201 we can discuss the PciRaw code re-organtization when I'm back from
202 vacation.
203
204 I've implemented the initial IOMMU set up below. For things to work
205 reliably, we will probably need add a whole bunch of checks and
206 GPciRawR0GuestPageUpdate call to the PGM code. For the present,
207 assuming nested paging (enforced) and prealloc (enforced), no
208 ballooning (check missing), page sharing (check missing) or live
209 migration (check missing), it might work fine. At least if some
210 VM power-off hook is present and can tear down the IOMMU page tables. */
211
212/**
213 * Tells the global PCI pass-through manager that we are about to set up the
214 * guest page to host page mappings for the specfied VM.
215 *
216 * @returns VBox status code.
217 *
218 * @param pGVM The ring-0 VM structure.
219 */
220VMMR0_INT_DECL(int) GPciRawR0GuestPageBeginAssignments(PGVM pGVM)
221{
222 return VINF_SUCCESS;
223}
224
225
226/**
227 * Assigns a host page mapping for a guest page.
228 *
229 * This is only used when setting up the mappings, i.e. between
230 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
231 *
232 * @returns VBox status code.
233 * @param pGVM The ring-0 VM structure.
234 * @param GCPhys The address of the guest page (page aligned).
235 * @param HCPhys The address of the host page (page aligned).
236 */
237VMMR0_INT_DECL(int) GPciRawR0GuestPageAssign(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
238{
239 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
240 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
241
242 if (pGVM->rawpci.s.pfnContigMemInfo)
243 /** @todo: what do we do on failure? */
244 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, HCPhys, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_MAP);
245
246 return VINF_SUCCESS;
247}
248
249
250/**
251 * Indicates that the specified guest page doesn't exists but doesn't have host
252 * page mapping we trust PCI pass-through with.
253 *
254 * This is only used when setting up the mappings, i.e. between
255 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
256 *
257 * @returns VBox status code.
258 * @param pGVM The ring-0 VM structure.
259 * @param GCPhys The address of the guest page (page aligned).
260 * @param HCPhys The address of the host page (page aligned).
261 */
262VMMR0_INT_DECL(int) GPciRawR0GuestPageUnassign(PGVM pGVM, RTGCPHYS GCPhys)
263{
264 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
265
266 if (pGVM->rawpci.s.pfnContigMemInfo)
267 /** @todo: what do we do on failure? */
268 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, 0, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_UNMAP);
269
270 return VINF_SUCCESS;
271}
272
273
274/**
275 * Tells the global PCI pass-through manager that we have completed setting up
276 * the guest page to host page mappings for the specfied VM.
277 *
278 * This complements GPciRawR0GuestPageBeginAssignments and will be called even
279 * if some page assignment failed.
280 *
281 * @returns VBox status code.
282 *
283 * @param pGVM The ring-0 VM structure.
284 */
285VMMR0_INT_DECL(int) GPciRawR0GuestPageEndAssignments(PGVM pGVM)
286{
287 return VINF_SUCCESS;
288}
289
290
291/**
292 * Tells the global PCI pass-through manager that a guest page mapping has
293 * changed after the initial setup.
294 *
295 * @returns VBox status code.
296 * @param pGVM The ring-0 VM structure.
297 * @param GCPhys The address of the guest page (page aligned).
298 * @param HCPhys The new host page address or NIL_RTHCPHYS if
299 * now unassigned.
300 */
301VMMR0_INT_DECL(int) GPciRawR0GuestPageUpdate(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
302{
303 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_4);
304 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK) || HCPhys == NIL_RTHCPHYS, VERR_INTERNAL_ERROR_4);
305 return VINF_SUCCESS;
306}
307
308#endif /* VBOX_WITH_PCI_PASSTHROUGH */
309
310
311/**
312 * Sets up the IOMMU when raw PCI device is enabled.
313 *
314 * @note This is a hack that will probably be remodelled and refined later!
315 *
316 * @returns VBox status code.
317 *
318 * @param pVM The VM handle.
319 */
320VMMR0_INT_DECL(int) PGMR0PhysSetupIommu(PVM pVM)
321{
322 PGVM pGVM;
323 int rc = GVMMR0ByVM(pVM, &pGVM);
324 if (RT_FAILURE(rc))
325 return rc;
326
327#ifdef VBOX_WITH_PCI_PASSTHROUGH
328 if (pVM->pgm.s.fPciPassthrough)
329 {
330 /*
331 * The Simplistic Approach - Enumerate all the pages and call tell the
332 * IOMMU about each of them.
333 */
334 pgmLock(pVM);
335 rc = GPciRawR0GuestPageBeginAssignments(pGVM);
336 if (RT_SUCCESS(rc))
337 {
338 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
339 {
340 PPGMPAGE pPage = &pRam->aPages[0];
341 RTGCPHYS GCPhys = pRam->GCPhys;
342 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
343 while (cLeft-- > 0)
344 {
345 /* Only expose pages that are 100% safe for now. */
346 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
347 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
348 && !PGM_PAGE_HAS_ANY_HANDLERS(pPage))
349 rc = GPciRawR0GuestPageAssign(pGVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage));
350 else
351 rc = GPciRawR0GuestPageUnassign(pGVM, GCPhys);
352
353 /* next */
354 pPage++;
355 GCPhys += PAGE_SIZE;
356 }
357 }
358
359 int rc2 = GPciRawR0GuestPageEndAssignments(pGVM);
360 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
361 rc = rc2;
362 }
363 pgmUnlock(pVM);
364 }
365 else
366#endif
367 rc = VERR_NOT_SUPPORTED;
368 return rc;
369}
370
371
372/**
373 * #PF Handler for nested paging.
374 *
375 * @returns VBox status code (appropriate for trap handling and GC return).
376 * @param pVM VM Handle.
377 * @param pVCpu VMCPU Handle.
378 * @param enmShwPagingMode Paging mode for the nested page tables.
379 * @param uErr The trap error code.
380 * @param pRegFrame Trap register frame.
381 * @param GCPhysFault The fault address.
382 */
383VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
384 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault)
385{
386 int rc;
387
388 LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pRegFrame->rip));
389 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
390 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
391
392 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
393 AssertMsg( enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX
394 || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT,
395 ("enmShwPagingMode=%d\n", enmShwPagingMode));
396
397 /* Reserved shouldn't end up here. */
398 Assert(!(uErr & X86_TRAP_PF_RSVD));
399
400#ifdef VBOX_WITH_STATISTICS
401 /*
402 * Error code stats.
403 */
404 if (uErr & X86_TRAP_PF_US)
405 {
406 if (!(uErr & X86_TRAP_PF_P))
407 {
408 if (uErr & X86_TRAP_PF_RW)
409 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
410 else
411 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
412 }
413 else if (uErr & X86_TRAP_PF_RW)
414 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
415 else if (uErr & X86_TRAP_PF_RSVD)
416 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
417 else if (uErr & X86_TRAP_PF_ID)
418 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
419 else
420 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
421 }
422 else
423 { /* Supervisor */
424 if (!(uErr & X86_TRAP_PF_P))
425 {
426 if (uErr & X86_TRAP_PF_RW)
427 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
428 else
429 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
430 }
431 else if (uErr & X86_TRAP_PF_RW)
432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
433 else if (uErr & X86_TRAP_PF_ID)
434 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
435 else if (uErr & X86_TRAP_PF_RSVD)
436 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
437 }
438#endif
439
440 /*
441 * Call the worker.
442 *
443 * Note! We pretend the guest is in protected mode without paging, so we
444 * can use existing code to build the nested page tables.
445 */
446 bool fLockTaken = false;
447 switch(enmShwPagingMode)
448 {
449 case PGMMODE_32_BIT:
450 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
451 break;
452 case PGMMODE_PAE:
453 case PGMMODE_PAE_NX:
454 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
455 break;
456 case PGMMODE_AMD64:
457 case PGMMODE_AMD64_NX:
458 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
459 break;
460 case PGMMODE_EPT:
461 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
462 break;
463 default:
464 AssertFailed();
465 rc = VERR_INVALID_PARAMETER;
466 break;
467 }
468 if (fLockTaken)
469 {
470 PGM_LOCK_ASSERT_OWNER(pVM);
471 pgmUnlock(pVM);
472 }
473
474 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
475 rc = VINF_SUCCESS;
476 /* Note: hack alert for difficult to reproduce problem. */
477 else if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
478 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
479 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
480 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
481 {
482 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, GCPhysFault, uErr, pRegFrame->rip));
483 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about
484 single VCPU VMs though. */
485 rc = VINF_SUCCESS;
486 }
487
488 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
489 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
490 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
491 return rc;
492}
493
494
495/**
496 * #PF Handler for deliberate nested paging misconfiguration (/reserved bit)
497 * employed for MMIO pages.
498 *
499 * @returns VBox status code (appropriate for trap handling and GC return).
500 * @param pVM The VM Handle.
501 * @param pVCpu The current CPU.
502 * @param enmShwPagingMode Paging mode for the nested page tables.
503 * @param pRegFrame Trap register frame.
504 * @param GCPhysFault The fault address.
505 * @param uErr The error code, UINT32_MAX if not available
506 * (VT-x).
507 */
508VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode,
509 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr)
510{
511#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
512 STAM_PROFILE_START(&pVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
513 VBOXSTRICTRC rc;
514
515 /*
516 * Try lookup the all access physical handler for the address.
517 */
518 pgmLock(pVM);
519 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
520 if (RT_LIKELY(pHandler && pHandler->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE))
521 {
522 /*
523 * If the handle has aliases page or pages that have been temporarily
524 * disabled, we'll have to take a detour to make sure we resync them
525 * to avoid lots of unnecessary exits.
526 */
527 PPGMPAGE pPage;
528 if ( ( pHandler->cAliasedPages
529 || pHandler->cTmpOffPages)
530 && ( (pPage = pgmPhysGetPage(pVM, GCPhysFault)) == NULL
531 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
532 )
533 {
534 Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage));
535 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
536 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
537 pgmUnlock(pVM);
538 }
539 else
540 {
541 if (pHandler->CTX_SUFF(pfnHandler))
542 {
543 CTX_MID(PFNPGM,PHYSHANDLER) pfnHandler = pHandler->CTX_SUFF(pfnHandler);
544 void *pvUser = pHandler->CTX_SUFF(pvUser);
545 STAM_PROFILE_START(&pHandler->Stat, h);
546 pgmUnlock(pVM);
547
548 Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pfnHandler, uErr, GCPhysFault, pvUser));
549 rc = pfnHandler(pVM, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame, GCPhysFault, GCPhysFault, pvUser);
550
551#ifdef VBOX_WITH_STATISTICS
552 pgmLock(pVM);
553 pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
554 if (pHandler)
555 STAM_PROFILE_STOP(&pHandler->Stat, h);
556 pgmUnlock(pVM);
557#endif
558 }
559 else
560 {
561 pgmUnlock(pVM);
562 Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr));
563 rc = VINF_EM_RAW_EMULATE_INSTR;
564 }
565 }
566 }
567 else
568 {
569 /*
570 * Must be out of sync, so do a SyncPage and restart the instruction.
571 *
572 * ASSUMES that ALL handlers are page aligned and covers whole pages
573 * (assumption asserted in PGMHandlerPhysicalRegisterEx).
574 */
575 Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr));
576 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
577 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
578 pgmUnlock(pVM);
579 }
580
581 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a);
582 return rc;
583
584#else
585 AssertLogRelFailed();
586 return VERR_INTERNAL_ERROR_4;
587#endif
588}
589
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette