VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0.cpp@ 46757

Last change on this file since 46757 was 44528, checked in by vboxsync, 12 years ago

header (C) fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 23.0 KB
Line 
1/* $Id: PGMR0.cpp 44528 2013-02-04 14:27:54Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Ring-0.
4 */
5
6/*
7 * Copyright (C) 2007-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/rawpci.h>
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/gmm.h>
25#include <VBox/vmm/gvm.h>
26#include "PGMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "PGMInline.h"
29#include <VBox/log.h>
30#include <VBox/err.h>
31#include <iprt/assert.h>
32#include <iprt/mem.h>
33
34
35/*
36 * Instantiate the ring-0 header/code templates.
37 */
38#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
39#include "PGMR0Bth.h"
40#undef PGM_BTH_NAME
41
42#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
43#include "PGMR0Bth.h"
44#undef PGM_BTH_NAME
45
46#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
47#include "PGMR0Bth.h"
48#undef PGM_BTH_NAME
49
50#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
51#include "PGMR0Bth.h"
52#undef PGM_BTH_NAME
53
54
55/**
56 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
57 *
58 * @returns The following VBox status codes.
59 * @retval VINF_SUCCESS on success. FF cleared.
60 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
61 *
62 * @param pVM Pointer to the VM.
63 * @param pVCpu Pointer to the VMCPU.
64 *
65 * @remarks Must be called from within the PGM critical section. The caller
66 * must clear the new pages.
67 */
68VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM, PVMCPU pVCpu)
69{
70 PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
71
72 /*
73 * Check for error injection.
74 */
75 if (RT_UNLIKELY(pVM->pgm.s.fErrInjHandyPages))
76 return VERR_NO_MEMORY;
77
78 /*
79 * Try allocate a full set of handy pages.
80 */
81 uint32_t iFirst = pVM->pgm.s.cHandyPages;
82 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
83 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
84 if (!cPages)
85 return VINF_SUCCESS;
86 int rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
87 if (RT_SUCCESS(rc))
88 {
89#ifdef VBOX_STRICT
90 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
91 {
92 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
93 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
94 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
95 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
96 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
97 }
98#endif
99
100 pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
101 }
102 else if (rc != VERR_GMM_SEED_ME)
103 {
104 if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
105 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
106 && iFirst < PGM_HANDY_PAGES_MIN)
107 {
108
109#ifdef VBOX_STRICT
110 /* We're ASSUMING that GMM has updated all the entires before failing us. */
111 uint32_t i;
112 for (i = iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
113 {
114 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
115 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
116 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
117 }
118#endif
119
120 /*
121 * Reduce the number of pages until we hit the minimum limit.
122 */
123 do
124 {
125 cPages >>= 1;
126 if (cPages + iFirst < PGM_HANDY_PAGES_MIN)
127 cPages = PGM_HANDY_PAGES_MIN - iFirst;
128 rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, 0, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
129 } while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
130 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
131 && cPages + iFirst > PGM_HANDY_PAGES_MIN);
132 if (RT_SUCCESS(rc))
133 {
134#ifdef VBOX_STRICT
135 i = iFirst + cPages;
136 while (i-- > 0)
137 {
138 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
139 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
140 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
141 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
142 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
143 }
144
145 for (i = cPages + iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
146 {
147 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
148 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
149 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
150 }
151#endif
152
153 pVM->pgm.s.cHandyPages = iFirst + cPages;
154 }
155 }
156
157 if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)
158 {
159 LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
160 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
161 }
162 }
163
164
165 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
166 return rc;
167}
168
169
170/**
171 * Flushes any changes pending in the handy page array.
172 *
173 * It is very important that this gets done when page sharing is enabled.
174 *
175 * @returns The following VBox status codes.
176 * @retval VINF_SUCCESS on success. FF cleared.
177 *
178 * @param pVM Pointer to the VM.
179 * @param pVCpu Pointer to the VMCPU.
180 *
181 * @remarks Must be called from within the PGM critical section.
182 */
183VMMR0_INT_DECL(int) PGMR0PhysFlushHandyPages(PVM pVM, PVMCPU pVCpu)
184{
185 PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
186
187 /*
188 * Try allocate a full set of handy pages.
189 */
190 uint32_t iFirst = pVM->pgm.s.cHandyPages;
191 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
192 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
193 if (!cPages)
194 return VINF_SUCCESS;
195 int rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, 0, &pVM->pgm.s.aHandyPages[iFirst]);
196
197 LogFlow(("PGMR0PhysFlushHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
198 return rc;
199}
200
201
202/**
203 * Worker function for PGMR3PhysAllocateLargeHandyPage
204 *
205 * @returns The following VBox status codes.
206 * @retval VINF_SUCCESS on success.
207 * @retval VINF_EM_NO_MEMORY if we're out of memory.
208 *
209 * @param pVM Pointer to the VM.
210 * @param pVCpu Pointer to the VMCPU.
211 *
212 * @remarks Must be called from within the PGM critical section. The caller
213 * must clear the new pages.
214 */
215VMMR0_INT_DECL(int) PGMR0PhysAllocateLargeHandyPage(PVM pVM, PVMCPU pVCpu)
216{
217 PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
218 Assert(!pVM->pgm.s.cLargeHandyPages);
219
220 int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M,
221 &pVM->pgm.s.aLargeHandyPage[0].idPage,
222 &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
223 if (RT_SUCCESS(rc))
224 pVM->pgm.s.cLargeHandyPages = 1;
225
226 return rc;
227}
228
229
230#ifdef VBOX_WITH_PCI_PASSTHROUGH
231/* Interface sketch. The interface belongs to a global PCI pass-through
232 manager. It shall use the global VM handle, not the user VM handle to
233 store the per-VM info (domain) since that is all ring-0 stuff, thus
234 passing pGVM here. I've tentitively prefixed the functions 'GPciRawR0',
235 we can discuss the PciRaw code re-organtization when I'm back from
236 vacation.
237
238 I've implemented the initial IOMMU set up below. For things to work
239 reliably, we will probably need add a whole bunch of checks and
240 GPciRawR0GuestPageUpdate call to the PGM code. For the present,
241 assuming nested paging (enforced) and prealloc (enforced), no
242 ballooning (check missing), page sharing (check missing) or live
243 migration (check missing), it might work fine. At least if some
244 VM power-off hook is present and can tear down the IOMMU page tables. */
245
246/**
247 * Tells the global PCI pass-through manager that we are about to set up the
248 * guest page to host page mappings for the specfied VM.
249 *
250 * @returns VBox status code.
251 *
252 * @param pGVM The ring-0 VM structure.
253 */
254VMMR0_INT_DECL(int) GPciRawR0GuestPageBeginAssignments(PGVM pGVM)
255{
256 NOREF(pGVM);
257 return VINF_SUCCESS;
258}
259
260
261/**
262 * Assigns a host page mapping for a guest page.
263 *
264 * This is only used when setting up the mappings, i.e. between
265 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
266 *
267 * @returns VBox status code.
268 * @param pGVM The ring-0 VM structure.
269 * @param GCPhys The address of the guest page (page aligned).
270 * @param HCPhys The address of the host page (page aligned).
271 */
272VMMR0_INT_DECL(int) GPciRawR0GuestPageAssign(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
273{
274 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
275 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
276
277 if (pGVM->rawpci.s.pfnContigMemInfo)
278 /** @todo: what do we do on failure? */
279 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, HCPhys, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_MAP);
280
281 return VINF_SUCCESS;
282}
283
284
285/**
286 * Indicates that the specified guest page doesn't exists but doesn't have host
287 * page mapping we trust PCI pass-through with.
288 *
289 * This is only used when setting up the mappings, i.e. between
290 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
291 *
292 * @returns VBox status code.
293 * @param pGVM The ring-0 VM structure.
294 * @param GCPhys The address of the guest page (page aligned).
295 * @param HCPhys The address of the host page (page aligned).
296 */
297VMMR0_INT_DECL(int) GPciRawR0GuestPageUnassign(PGVM pGVM, RTGCPHYS GCPhys)
298{
299 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
300
301 if (pGVM->rawpci.s.pfnContigMemInfo)
302 /** @todo: what do we do on failure? */
303 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, 0, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_UNMAP);
304
305 return VINF_SUCCESS;
306}
307
308
309/**
310 * Tells the global PCI pass-through manager that we have completed setting up
311 * the guest page to host page mappings for the specfied VM.
312 *
313 * This complements GPciRawR0GuestPageBeginAssignments and will be called even
314 * if some page assignment failed.
315 *
316 * @returns VBox status code.
317 *
318 * @param pGVM The ring-0 VM structure.
319 */
320VMMR0_INT_DECL(int) GPciRawR0GuestPageEndAssignments(PGVM pGVM)
321{
322 NOREF(pGVM);
323 return VINF_SUCCESS;
324}
325
326
327/**
328 * Tells the global PCI pass-through manager that a guest page mapping has
329 * changed after the initial setup.
330 *
331 * @returns VBox status code.
332 * @param pGVM The ring-0 VM structure.
333 * @param GCPhys The address of the guest page (page aligned).
334 * @param HCPhys The new host page address or NIL_RTHCPHYS if
335 * now unassigned.
336 */
337VMMR0_INT_DECL(int) GPciRawR0GuestPageUpdate(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
338{
339 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_4);
340 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK) || HCPhys == NIL_RTHCPHYS, VERR_INTERNAL_ERROR_4);
341 NOREF(pGVM);
342 return VINF_SUCCESS;
343}
344
345#endif /* VBOX_WITH_PCI_PASSTHROUGH */
346
347
348/**
349 * Sets up the IOMMU when raw PCI device is enabled.
350 *
351 * @note This is a hack that will probably be remodelled and refined later!
352 *
353 * @returns VBox status code.
354 *
355 * @param pVM Pointer to the VM.
356 */
357VMMR0_INT_DECL(int) PGMR0PhysSetupIommu(PVM pVM)
358{
359 PGVM pGVM;
360 int rc = GVMMR0ByVM(pVM, &pGVM);
361 if (RT_FAILURE(rc))
362 return rc;
363
364#ifdef VBOX_WITH_PCI_PASSTHROUGH
365 if (pVM->pgm.s.fPciPassthrough)
366 {
367 /*
368 * The Simplistic Approach - Enumerate all the pages and call tell the
369 * IOMMU about each of them.
370 */
371 pgmLock(pVM);
372 rc = GPciRawR0GuestPageBeginAssignments(pGVM);
373 if (RT_SUCCESS(rc))
374 {
375 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
376 {
377 PPGMPAGE pPage = &pRam->aPages[0];
378 RTGCPHYS GCPhys = pRam->GCPhys;
379 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
380 while (cLeft-- > 0)
381 {
382 /* Only expose pages that are 100% safe for now. */
383 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
384 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
385 && !PGM_PAGE_HAS_ANY_HANDLERS(pPage))
386 rc = GPciRawR0GuestPageAssign(pGVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage));
387 else
388 rc = GPciRawR0GuestPageUnassign(pGVM, GCPhys);
389
390 /* next */
391 pPage++;
392 GCPhys += PAGE_SIZE;
393 }
394 }
395
396 int rc2 = GPciRawR0GuestPageEndAssignments(pGVM);
397 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
398 rc = rc2;
399 }
400 pgmUnlock(pVM);
401 }
402 else
403#endif
404 rc = VERR_NOT_SUPPORTED;
405 return rc;
406}
407
408
409/**
410 * #PF Handler for nested paging.
411 *
412 * @returns VBox status code (appropriate for trap handling and GC return).
413 * @param pVM Pointer to the VM.
414 * @param pVCpu Pointer to the VMCPU.
415 * @param enmShwPagingMode Paging mode for the nested page tables.
416 * @param uErr The trap error code.
417 * @param pRegFrame Trap register frame.
418 * @param GCPhysFault The fault address.
419 */
420VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
421 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault)
422{
423 int rc;
424
425 LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pRegFrame->rip));
426 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
427 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
428
429 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
430 AssertMsg( enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX
431 || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT,
432 ("enmShwPagingMode=%d\n", enmShwPagingMode));
433
434 /* Reserved shouldn't end up here. */
435 Assert(!(uErr & X86_TRAP_PF_RSVD));
436
437#ifdef VBOX_WITH_STATISTICS
438 /*
439 * Error code stats.
440 */
441 if (uErr & X86_TRAP_PF_US)
442 {
443 if (!(uErr & X86_TRAP_PF_P))
444 {
445 if (uErr & X86_TRAP_PF_RW)
446 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
447 else
448 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
449 }
450 else if (uErr & X86_TRAP_PF_RW)
451 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
452 else if (uErr & X86_TRAP_PF_RSVD)
453 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
454 else if (uErr & X86_TRAP_PF_ID)
455 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
456 else
457 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
458 }
459 else
460 { /* Supervisor */
461 if (!(uErr & X86_TRAP_PF_P))
462 {
463 if (uErr & X86_TRAP_PF_RW)
464 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
465 else
466 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
467 }
468 else if (uErr & X86_TRAP_PF_RW)
469 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
470 else if (uErr & X86_TRAP_PF_ID)
471 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
472 else if (uErr & X86_TRAP_PF_RSVD)
473 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
474 }
475#endif
476
477 /*
478 * Call the worker.
479 *
480 * Note! We pretend the guest is in protected mode without paging, so we
481 * can use existing code to build the nested page tables.
482 */
483 bool fLockTaken = false;
484 switch (enmShwPagingMode)
485 {
486 case PGMMODE_32_BIT:
487 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
488 break;
489 case PGMMODE_PAE:
490 case PGMMODE_PAE_NX:
491 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
492 break;
493 case PGMMODE_AMD64:
494 case PGMMODE_AMD64_NX:
495 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
496 break;
497 case PGMMODE_EPT:
498 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
499 break;
500 default:
501 AssertFailed();
502 rc = VERR_INVALID_PARAMETER;
503 break;
504 }
505 if (fLockTaken)
506 {
507 PGM_LOCK_ASSERT_OWNER(pVM);
508 pgmUnlock(pVM);
509 }
510
511 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
512 rc = VINF_SUCCESS;
513 /*
514 * Handle the case where we cannot interpret the instruction because we cannot get the guest physical address
515 * via its page tables, see @bugref{6043}.
516 */
517 else if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
518 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
519 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
520 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
521 {
522 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, GCPhysFault, uErr, pRegFrame->rip));
523 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about
524 single VCPU VMs though. */
525 rc = VINF_SUCCESS;
526 }
527
528 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
529 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
530 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
531 return rc;
532}
533
534
535/**
536 * #PF Handler for deliberate nested paging misconfiguration (/reserved bit)
537 * employed for MMIO pages.
538 *
539 * @returns VBox status code (appropriate for trap handling and GC return).
540 * @param pVM Pointer to the VM.
541 * @param pVCpu Pointer to the VMCPU.
542 * @param enmShwPagingMode Paging mode for the nested page tables.
543 * @param pRegFrame Trap register frame.
544 * @param GCPhysFault The fault address.
545 * @param uErr The error code, UINT32_MAX if not available
546 * (VT-x).
547 */
548VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode,
549 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr)
550{
551#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
552 STAM_PROFILE_START(&pVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
553 VBOXSTRICTRC rc;
554
555 /*
556 * Try lookup the all access physical handler for the address.
557 */
558 pgmLock(pVM);
559 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
560 if (RT_LIKELY(pHandler && pHandler->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE))
561 {
562 /*
563 * If the handle has aliases page or pages that have been temporarily
564 * disabled, we'll have to take a detour to make sure we resync them
565 * to avoid lots of unnecessary exits.
566 */
567 PPGMPAGE pPage;
568 if ( ( pHandler->cAliasedPages
569 || pHandler->cTmpOffPages)
570 && ( (pPage = pgmPhysGetPage(pVM, GCPhysFault)) == NULL
571 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
572 )
573 {
574 Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage));
575 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
576 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
577 pgmUnlock(pVM);
578 }
579 else
580 {
581 if (pHandler->CTX_SUFF(pfnHandler))
582 {
583 CTX_MID(PFNPGM,PHYSHANDLER) pfnHandler = pHandler->CTX_SUFF(pfnHandler);
584 void *pvUser = pHandler->CTX_SUFF(pvUser);
585 STAM_PROFILE_START(&pHandler->Stat, h);
586 pgmUnlock(pVM);
587
588 Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pfnHandler, uErr, GCPhysFault, pvUser));
589 rc = pfnHandler(pVM, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame, GCPhysFault, GCPhysFault, pvUser);
590
591#ifdef VBOX_WITH_STATISTICS
592 pgmLock(pVM);
593 pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
594 if (pHandler)
595 STAM_PROFILE_STOP(&pHandler->Stat, h);
596 pgmUnlock(pVM);
597#endif
598 }
599 else
600 {
601 pgmUnlock(pVM);
602 Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr));
603 rc = VINF_EM_RAW_EMULATE_INSTR;
604 }
605 }
606 }
607 else
608 {
609 /*
610 * Must be out of sync, so do a SyncPage and restart the instruction.
611 *
612 * ASSUMES that ALL handlers are page aligned and covers whole pages
613 * (assumption asserted in PGMHandlerPhysicalRegisterEx).
614 */
615 Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr));
616 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
617 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
618 pgmUnlock(pVM);
619 }
620
621 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a);
622 return rc;
623
624#else
625 AssertLogRelFailed();
626 return VERR_PGM_NOT_USED_IN_MODE;
627#endif
628}
629
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette