VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0.cpp@ 33935

Last change on this file since 33935 was 32431, checked in by vboxsync, 14 years ago

scm cleanup

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 15.7 KB
Line 
1/* $Id: PGMR0.cpp 32431 2010-09-11 18:02:17Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Ring-0.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/pgm.h>
23#include <VBox/gmm.h>
24#include "../PGMInternal.h"
25#include <VBox/vm.h>
26#include "../PGMInline.h"
27#include <VBox/log.h>
28#include <VBox/err.h>
29#include <iprt/assert.h>
30#include <iprt/mem.h>
31
32
33/*
34 * Instantiate the ring-0 header/code templates.
35 */
36#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
37#include "PGMR0Bth.h"
38#undef PGM_BTH_NAME
39
40#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
41#include "PGMR0Bth.h"
42#undef PGM_BTH_NAME
43
44#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
45#include "PGMR0Bth.h"
46#undef PGM_BTH_NAME
47
48#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
49#include "PGMR0Bth.h"
50#undef PGM_BTH_NAME
51
52
53/**
54 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
55 *
56 * @returns The following VBox status codes.
57 * @retval VINF_SUCCESS on success. FF cleared.
58 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
59 *
60 * @param pVM The VM handle.
61 * @param pVCpu The VMCPU handle.
62 *
63 * @remarks Must be called from within the PGM critical section. The caller
64 * must clear the new pages.
65 */
66VMMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM, PVMCPU pVCpu)
67{
68 Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu->idCpu));
69
70 /*
71 * Check for error injection.
72 */
73 if (RT_UNLIKELY(pVM->pgm.s.fErrInjHandyPages))
74 return VERR_NO_MEMORY;
75
76 /*
77 * Try allocate a full set of handy pages.
78 */
79 uint32_t iFirst = pVM->pgm.s.cHandyPages;
80 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_INTERNAL_ERROR);
81 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
82 if (!cPages)
83 return VINF_SUCCESS;
84 int rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
85 if (RT_SUCCESS(rc))
86 {
87 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
88 {
89 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
90 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
91 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
92 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
93 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
94 }
95
96 pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
97 }
98 else if (rc != VERR_GMM_SEED_ME)
99 {
100 if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
101 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
102 && iFirst < PGM_HANDY_PAGES_MIN)
103 {
104
105#ifdef VBOX_STRICT
106 /* We're ASSUMING that GMM has updated all the entires before failing us. */
107 uint32_t i;
108 for (i = iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
109 {
110 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
111 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
112 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
113 }
114#endif
115
116 /*
117 * Reduce the number of pages until we hit the minimum limit.
118 */
119 do
120 {
121 cPages >>= 2;
122 if (cPages + iFirst < PGM_HANDY_PAGES_MIN)
123 cPages = PGM_HANDY_PAGES_MIN - iFirst;
124 rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
125 } while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
126 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
127 && cPages + iFirst > PGM_HANDY_PAGES_MIN);
128 if (RT_SUCCESS(rc))
129 {
130#ifdef VBOX_STRICT
131 i = iFirst + cPages;
132 while (i-- > 0)
133 {
134 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
135 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
136 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
137 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
138 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
139 }
140
141 for (i = cPages + iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
142 {
143 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
144 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
145 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
146 }
147#endif
148
149 pVM->pgm.s.cHandyPages = iFirst + cPages;
150 }
151 }
152
153 if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)
154 {
155 LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
156 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
157 }
158 }
159
160
161 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
162 return rc;
163}
164
165
166/**
167 * Worker function for PGMR3PhysAllocateLargeHandyPage
168 *
169 * @returns The following VBox status codes.
170 * @retval VINF_SUCCESS on success.
171 * @retval VINF_EM_NO_MEMORY if we're out of memory.
172 *
173 * @param pVM The VM handle.
174 * @param pVCpu The VMCPU handle.
175 *
176 * @remarks Must be called from within the PGM critical section. The caller
177 * must clear the new pages.
178 */
179VMMR0DECL(int) PGMR0PhysAllocateLargeHandyPage(PVM pVM, PVMCPU pVCpu)
180{
181 Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu->idCpu));
182
183 Assert(!pVM->pgm.s.cLargeHandyPages);
184 int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M, &pVM->pgm.s.aLargeHandyPage[0].idPage, &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
185 if (RT_SUCCESS(rc))
186 pVM->pgm.s.cLargeHandyPages = 1;
187
188 return rc;
189}
190
191
192/**
193 * #PF Handler for nested paging.
194 *
195 * @returns VBox status code (appropriate for trap handling and GC return).
196 * @param pVM VM Handle.
197 * @param pVCpu VMCPU Handle.
198 * @param enmShwPagingMode Paging mode for the nested page tables.
199 * @param uErr The trap error code.
200 * @param pRegFrame Trap register frame.
201 * @param GCPhysFault The fault address.
202 */
203VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
204 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault)
205{
206 int rc;
207
208 LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pRegFrame->rip));
209 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
210 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
211
212 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
213 AssertMsg( enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX
214 || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT,
215 ("enmShwPagingMode=%d\n", enmShwPagingMode));
216
217 /* Reserved shouldn't end up here. */
218 Assert(!(uErr & X86_TRAP_PF_RSVD));
219
220#ifdef VBOX_WITH_STATISTICS
221 /*
222 * Error code stats.
223 */
224 if (uErr & X86_TRAP_PF_US)
225 {
226 if (!(uErr & X86_TRAP_PF_P))
227 {
228 if (uErr & X86_TRAP_PF_RW)
229 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
230 else
231 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
232 }
233 else if (uErr & X86_TRAP_PF_RW)
234 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
235 else if (uErr & X86_TRAP_PF_RSVD)
236 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
237 else if (uErr & X86_TRAP_PF_ID)
238 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
239 else
240 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
241 }
242 else
243 { /* Supervisor */
244 if (!(uErr & X86_TRAP_PF_P))
245 {
246 if (uErr & X86_TRAP_PF_RW)
247 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
248 else
249 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
250 }
251 else if (uErr & X86_TRAP_PF_RW)
252 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
253 else if (uErr & X86_TRAP_PF_ID)
254 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
255 else if (uErr & X86_TRAP_PF_RSVD)
256 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
257 }
258#endif
259
260 /*
261 * Call the worker.
262 *
263 * Note! We pretend the guest is in protected mode without paging, so we
264 * can use existing code to build the nested page tables.
265 */
266 bool fLockTaken = false;
267 switch(enmShwPagingMode)
268 {
269 case PGMMODE_32_BIT:
270 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
271 break;
272 case PGMMODE_PAE:
273 case PGMMODE_PAE_NX:
274 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
275 break;
276 case PGMMODE_AMD64:
277 case PGMMODE_AMD64_NX:
278 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
279 break;
280 case PGMMODE_EPT:
281 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
282 break;
283 default:
284 AssertFailed();
285 rc = VERR_INVALID_PARAMETER;
286 break;
287 }
288 if (fLockTaken)
289 {
290 Assert(PGMIsLockOwner(pVM));
291 pgmUnlock(pVM);
292 }
293
294 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
295 rc = VINF_SUCCESS;
296 /* Note: hack alert for difficult to reproduce problem. */
297 else if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
298 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
299 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
300 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
301 {
302 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, GCPhysFault, uErr, pRegFrame->rip));
303 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about
304 single VCPU VMs though. */
305 rc = VINF_SUCCESS;
306 }
307
308 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
309 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
310 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
311 return rc;
312}
313
314
315/**
316 * #PF Handler for deliberate nested paging misconfiguration (/reserved bit)
317 * employed for MMIO pages.
318 *
319 * @returns VBox status code (appropriate for trap handling and GC return).
320 * @param pVM The VM Handle.
321 * @param pVCpu The current CPU.
322 * @param enmShwPagingMode Paging mode for the nested page tables.
323 * @param pRegFrame Trap register frame.
324 * @param GCPhysFault The fault address.
325 * @param uErr The error code, UINT32_MAX if not available
326 * (VT-x).
327 */
328VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode,
329 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr)
330{
331#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
332 STAM_PROFILE_START(&pVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
333 VBOXSTRICTRC rc;
334
335 /*
336 * Try lookup the all access physical handler for the address.
337 */
338 pgmLock(pVM);
339 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
340 if (RT_LIKELY(pHandler && pHandler->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE))
341 {
342 /*
343 * If the handle has aliases page or pages that have been temporarily
344 * disabled, we'll have to take a detour to make sure we resync them
345 * to avoid lots of unnecessary exits.
346 */
347 PPGMPAGE pPage;
348 if ( ( pHandler->cAliasedPages
349 || pHandler->cTmpOffPages)
350 && ( (pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysFault)) == NULL
351 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
352 )
353 {
354 Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage));
355 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
356 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
357 pgmUnlock(pVM);
358 }
359 else
360 {
361 if (pHandler->CTX_SUFF(pfnHandler))
362 {
363 CTX_MID(PFNPGM,PHYSHANDLER) pfnHandler = pHandler->CTX_SUFF(pfnHandler);
364 void *pvUser = pHandler->CTX_SUFF(pvUser);
365 STAM_PROFILE_START(&pHandler->Stat, h);
366 pgmUnlock(pVM);
367
368 Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pfnHandler, uErr, GCPhysFault, pvUser));
369 rc = pfnHandler(pVM, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame, GCPhysFault, GCPhysFault, pvUser);
370
371#ifdef VBOX_WITH_STATISTICS
372 pgmLock(pVM);
373 pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
374 if (pHandler)
375 STAM_PROFILE_STOP(&pHandler->Stat, h);
376 pgmUnlock(pVM);
377#endif
378 }
379 else
380 {
381 pgmUnlock(pVM);
382 Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr));
383 rc = VINF_EM_RAW_EMULATE_INSTR;
384 }
385 }
386 }
387 else
388 {
389 /*
390 * Must be out of sync, so do a SyncPage and restart the instruction.
391 *
392 * ASSUMES that ALL handlers are page aligned and covers whole pages
393 * (assumption asserted in PGMHandlerPhysicalRegisterEx).
394 */
395 Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr));
396 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
397 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
398 pgmUnlock(pVM);
399 }
400
401 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a);
402 return rc;
403
404#else
405 AssertLogRelFailed();
406 return VERR_INTERNAL_ERROR_4;
407#endif
408}
409
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette