VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp@ 17826

Last change on this file since 17826 was 17667, checked in by vboxsync, 16 years ago

pgmPoolAlloc no longer fails with non-fatal errors.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 29.3 KB
Line 
1/* $Id: PGMAllMap.cpp 17667 2009-03-11 09:35:22Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include "PGMInternal.h"
28#include <VBox/vm.h>
29#include <iprt/assert.h>
30#include <iprt/asm.h>
31#include <VBox/err.h>
32
33
34/**
35 * Maps a range of physical pages at a given virtual address
36 * in the guest context.
37 *
38 * The GC virtual address range must be within an existing mapping.
39 *
40 * @returns VBox status code.
41 * @param pVM The virtual machine.
42 * @param GCPtr Where to map the page(s). Must be page aligned.
43 * @param HCPhys Start of the range of physical pages. Must be page aligned.
44 * @param cbPages Number of bytes to map. Must be page aligned.
45 * @param fFlags Page flags (X86_PTE_*).
46 */
47VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
48{
49 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
50
51 /*
52 * Validate input.
53 */
54 AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
55 AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
56 AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
57
58 /* hypervisor defaults */
59 if (!fFlags)
60 fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
61
62 /*
63 * Find the mapping.
64 */
65 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
66 while (pCur)
67 {
68 if (GCPtr - pCur->GCPtr < pCur->cb)
69 {
70 if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
71 {
72 AssertMsgFailed(("Invalid range!!\n"));
73 return VERR_INVALID_PARAMETER;
74 }
75
76 /*
77 * Setup PTE.
78 */
79 X86PTEPAE Pte;
80 Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
81
82 /*
83 * Update the page tables.
84 */
85 for (;;)
86 {
87 RTGCUINTPTR off = GCPtr - pCur->GCPtr;
88 const unsigned iPT = off >> X86_PD_SHIFT;
89 const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
90
91 /* 32-bit */
92 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
93
94 /* pae */
95 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512].u = Pte.u;
96
97 /* next */
98 cbPages -= PAGE_SIZE;
99 if (!cbPages)
100 break;
101 GCPtr += PAGE_SIZE;
102 Pte.u += PAGE_SIZE;
103 }
104
105 return VINF_SUCCESS;
106 }
107
108 /* next */
109 pCur = pCur->CTX_SUFF(pNext);
110 }
111
112 AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
113 return VERR_INVALID_PARAMETER;
114}
115
116
117/**
118 * Sets (replaces) the page flags for a range of pages in a mapping.
119 *
120 * @returns VBox status.
121 * @param pVM VM handle.
122 * @param GCPtr Virtual address of the first page in the range.
123 * @param cb Size (in bytes) of the range to apply the modification to.
124 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
125 */
126VMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
127{
128 return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
129}
130
131
132/**
133 * Modify page flags for a range of pages in a mapping.
134 *
135 * The existing flags are ANDed with the fMask and ORed with the fFlags.
136 *
137 * @returns VBox status code.
138 * @param pVM VM handle.
139 * @param GCPtr Virtual address of the first page in the range.
140 * @param cb Size (in bytes) of the range to apply the modification to.
141 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
142 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
143 */
144VMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
145{
146 /*
147 * Validate input.
148 */
149 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags));
150 Assert(cb);
151
152 /*
153 * Align the input.
154 */
155 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
156 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
157 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
158
159 /*
160 * Find the mapping.
161 */
162 PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
163 while (pCur)
164 {
165 RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
166 if (off < pCur->cb)
167 {
168 AssertMsgReturn(off + cb <= pCur->cb,
169 ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
170 GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
171 VERR_INVALID_PARAMETER);
172
173 /*
174 * Perform the requested operation.
175 */
176 while (cb > 0)
177 {
178 unsigned iPT = off >> X86_PD_SHIFT;
179 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
180 while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
181 {
182 /* 32-Bit */
183 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
184 pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
185
186 /* PAE */
187 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u &= fMask | X86_PTE_PAE_PG_MASK;
188 pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u |= fFlags & ~X86_PTE_PAE_PG_MASK;
189
190 /* invalidate tls */
191 PGM_INVL_PG((RTGCUINTPTR)pCur->GCPtr + off);
192
193 /* next */
194 iPTE++;
195 cb -= PAGE_SIZE;
196 off += PAGE_SIZE;
197 }
198 }
199
200 return VINF_SUCCESS;
201 }
202 /* next */
203 pCur = pCur->CTX_SUFF(pNext);
204 }
205
206 AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
207 return VERR_INVALID_PARAMETER;
208}
209
210
211#ifndef IN_RING0
212/**
213 * Sets all PDEs involved with the mapping in the shadow page table.
214 *
215 * @param pVM The VM handle.
216 * @param pMap Pointer to the mapping in question.
217 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
218 */
219void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
220{
221 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
222
223 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
224 return;
225
226 if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3))
227 return; /* too early */
228
229 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
230 Assert(enmShadowMode <= PGMMODE_PAE_NX);
231
232 /*
233 * Init the page tables and insert them into the page directories.
234 */
235 unsigned i = pMap->cPTs;
236 iNewPDE += i;
237 while (i-- > 0)
238 {
239 iNewPDE--;
240
241 switch(enmShadowMode)
242 {
243 case PGMMODE_32_BIT:
244 {
245 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
246 AssertFatal(pShw32BitPd);
247#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
248 PGMDynLockHCPage(pVM, (uint8_t *)pShw32BitPd);
249#endif
250 if ( pShw32BitPd->a[iNewPDE].n.u1Present
251 && !(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING))
252 {
253 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
254 }
255
256 X86PDE Pde;
257 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
258 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
259 pShw32BitPd->a[iNewPDE] = Pde;
260#ifdef IN_RC
261 /* Unlock dynamic mappings again. */
262 PGMDynUnlockHCPage(pVM, (uint8_t *)pShw32BitPd);
263#endif
264 break;
265 }
266
267 case PGMMODE_PAE:
268 case PGMMODE_PAE_NX:
269 {
270 PX86PDPT pShwPdpt;
271 PX86PDPAE pShwPaePd;
272 const unsigned iPdPt = iNewPDE / 256;
273 unsigned iPDE = iNewPDE * 2 % 512;
274
275 pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
276 Assert(pShwPdpt);
277#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmShwSyncPaePDPtr. */
278 PGMDynLockHCPage(pVM, (uint8_t *)pShwPdpt);
279#endif
280 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
281 if (!pShwPaePd)
282 {
283 X86PDPE GstPdpe;
284
285 if (PGMGetGuestMode(pVM) < PGMMODE_PAE)
286 {
287 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
288 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
289 }
290 else
291 {
292 PX86PDPE pGstPdpe;
293 pGstPdpe = pgmGstGetPaePDPEPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
294 if (pGstPdpe)
295 GstPdpe = *pGstPdpe;
296 else
297 GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
298 }
299 int rc = pgmShwSyncPaePDPtr(pVM, (iPdPt << X86_PDPT_SHIFT), &GstPdpe, &pShwPaePd);
300 AssertFatal(RT_SUCCESS(rc));
301 }
302 Assert(pShwPaePd);
303#ifdef IN_RC /* Lock mapping to prevent it from being reused during pgmPoolFree. */
304 PGMDynLockHCPage(pVM, (uint8_t *)pShwPaePd);
305#endif
306 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
307 AssertFatal(pPoolPagePd);
308
309 if (!pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
310 {
311 /* Mark the page as locked; disallow flushing. */
312 pgmPoolLockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
313 }
314# ifdef VBOX_STRICT
315 else
316 if (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)
317 {
318 Assert(PGMGetGuestMode(pVM) >= PGMMODE_PAE);
319 AssertFatalMsg((pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT0, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT0));
320 Assert(pShwPaePd->a[iPDE+1].u & PGM_PDFLAGS_MAPPING);
321 AssertFatalMsg((pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK) == pMap->aPTs[i].HCPhysPaePT1, ("%RX64 vs %RX64\n", pShwPaePd->a[iPDE+1].u & X86_PDE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1));
322 }
323# endif
324 if ( pShwPaePd->a[iPDE].n.u1Present
325 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING))
326 {
327 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
328 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE);
329 }
330
331 X86PDEPAE PdePae0;
332 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
333 pShwPaePd->a[iPDE] = PdePae0;
334
335 /* 2nd 2 MB PDE of the 4 MB region */
336 iPDE++;
337 AssertFatal(iPDE < 512);
338
339 if ( pShwPaePd->a[iPDE].n.u1Present
340 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING))
341 {
342 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE);
343 }
344 X86PDEPAE PdePae1;
345 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
346 pShwPaePd->a[iPDE] = PdePae1;
347
348 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
349 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
350
351#ifdef IN_RC
352 /* Unlock dynamic mappings again. */
353 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPaePd);
354 PGMDynUnlockHCPage(pVM, (uint8_t *)pShwPdpt);
355#endif
356 break;
357 }
358
359 default:
360 AssertFailed();
361 break;
362 }
363 }
364}
365
366/**
367 * Clears all PDEs involved with the mapping in the shadow page table.
368 *
369 * @param pVM The VM handle.
370 * @param pShwPageCR3 CR3 root page
371 * @param pMap Pointer to the mapping in question.
372 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
373 */
374void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE)
375{
376 Log(("pgmMapClearShadowPDEs old pde %x (cPTs=%x) (mappings enabled %d)\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
377
378 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
379 return;
380
381 Assert(pShwPageCR3);
382# ifdef IN_RC
383 Assert(pShwPageCR3 != pVM->pgm.s.CTX_SUFF(pShwPageCR3));
384# endif
385
386 PX86PDPT pCurrentShwPdpt = NULL;
387
388 if ( PGMGetGuestMode(pVM) >= PGMMODE_PAE
389 && pShwPageCR3 != pVM->pgm.s.CTX_SUFF(pShwPageCR3))
390 {
391 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
392 }
393
394 unsigned i = pMap->cPTs;
395 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
396
397 iOldPDE += i;
398 while (i-- > 0)
399 {
400 iOldPDE--;
401
402 switch(enmShadowMode)
403 {
404 case PGMMODE_32_BIT:
405 {
406 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
407 AssertFatal(pShw32BitPd);
408
409 Assert(!pShw32BitPd->a[iOldPDE].n.u1Present || (pShw32BitPd->a[iOldPDE].u & PGM_PDFLAGS_MAPPING));
410 pShw32BitPd->a[iOldPDE].u = 0;
411 break;
412 }
413
414 case PGMMODE_PAE:
415 case PGMMODE_PAE_NX:
416 {
417 PX86PDPT pShwPdpt = NULL;
418 PX86PDPAE pShwPaePd = NULL;
419
420 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
421 unsigned iPDE = iOldPDE * 2 % 512;
422 pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
423 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
424
425 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
426 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
427
428 if (pCurrentShwPdpt)
429 {
430 /* If the page directory of the old CR3 is reused in the new one, then don't clear the hypervisor mappings. */
431 if ((pCurrentShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK) == (pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK))
432 {
433 LogFlow(("pgmMapClearShadowPDEs: Pdpe %d reused -> don't clear hypervisor mappings!\n", iPdpt));
434 break;
435 }
436 }
437 AssertFatal(pShwPaePd);
438
439 Assert(!pShwPaePd->a[iPDE].n.u1Present || (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
440 pShwPaePd->a[iPDE].u = 0;
441
442 iPDE++;
443 AssertFatal(iPDE < 512);
444
445 Assert(!pShwPaePd->a[iPDE].n.u1Present || (pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
446 pShwPaePd->a[iPDE].u = 0;
447
448 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
449 AssertFatal(pPoolPagePd);
450
451 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
452 {
453 /* Mark the page as unlocked; allow flushing again. */
454 pgmPoolUnlockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
455 }
456 break;
457 }
458
459 default:
460 AssertFailed();
461 break;
462 }
463 }
464}
465#endif /* !IN_RING0 */
466
467#if defined(VBOX_STRICT) && !defined(IN_RING0)
468/**
469 * Clears all PDEs involved with the mapping in the shadow page table.
470 *
471 * @param pVM The VM handle.
472 * @param pShwPageCR3 CR3 root page
473 * @param pMap Pointer to the mapping in question.
474 * @param iPDE The index of the 32-bit PDE corresponding to the base of the mapping.
475 */
476void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
477{
478 Assert(pShwPageCR3);
479
480 unsigned i = pMap->cPTs;
481 PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
482
483 iPDE += i;
484 while (i-- > 0)
485 {
486 iPDE--;
487
488 switch(enmShadowMode)
489 {
490 case PGMMODE_32_BIT:
491 {
492 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
493 AssertFatal(pShw32BitPd);
494
495 AssertMsg(pShw32BitPd->a[iPDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT),
496 ("Expected %x vs %x\n", pShw32BitPd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT)));
497 break;
498 }
499
500 case PGMMODE_PAE:
501 case PGMMODE_PAE_NX:
502 {
503 PX86PDPT pPdpt = NULL;
504 PX86PDPAE pShwPaePd = NULL;
505
506 const unsigned iPD = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */
507 unsigned iPaePDE = iPDE * 2 % 512;
508 pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
509 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT));
510 AssertFatal(pShwPaePd);
511
512 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0),
513 ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPaePT0)));
514
515 iPaePDE++;
516 AssertFatal(iPaePDE < 512);
517
518 AssertMsg(pShwPaePd->a[iPaePDE].u == (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1),
519 ("Expected %RX64 vs %RX64\n", pShwPaePd->a[iPDE].u, (PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPaePT1)));
520
521 Assert(pPdpt->a[iPD].u & PGM_PLXFLAGS_MAPPING);
522 break;
523 }
524
525 default:
526 AssertFailed();
527 break;
528 }
529 }
530}
531
532/**
533 * Check the hypervisor mappings in the active CR3.
534 *
535 * @param pVM The virtual machine.
536 */
537VMMDECL(void) PGMMapCheck(PVM pVM)
538{
539 /*
540 * Can skip this if mappings are disabled.
541 */
542 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
543 return;
544
545 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
546
547 /*
548 * Iterate mappings.
549 */
550 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
551 {
552 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
553
554 pgmMapCheckShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
555 }
556}
557#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
558
559#ifndef IN_RING0
560/**
561 * Apply the hypervisor mappings to the active CR3.
562 *
563 * @returns VBox status.
564 * @param pVM The virtual machine.
565 * @param pShwPageCR3 CR3 root page
566 */
567int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
568{
569 /*
570 * Can skip this if mappings are disabled.
571 */
572 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
573 return VINF_SUCCESS;
574
575 /* @note A log flush (in RC) can cause problems when called from MapCR3 (inconsistent state will trigger assertions). */
576 Log4(("PGMMapActivateAll fixed mappings=%d\n", pVM->pgm.s.fMappingsFixed));
577
578 Assert(pShwPageCR3 && pShwPageCR3 == pVM->pgm.s.CTX_SUFF(pShwPageCR3));
579
580 /*
581 * Iterate mappings.
582 */
583 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
584 {
585 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
586
587 pgmMapSetShadowPDEs(pVM, pCur, iPDE);
588 }
589 return VINF_SUCCESS;
590}
591
592
593/**
594 * Remove the hypervisor mappings from the specified CR3
595 *
596 * @returns VBox status.
597 * @param pVM The virtual machine.
598 * @param pShwPageCR3 CR3 root page
599 */
600int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
601{
602 /*
603 * Can skip this if mappings are disabled.
604 */
605 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
606 return VINF_SUCCESS;
607
608 Assert(pShwPageCR3);
609
610 /*
611 * Iterate mappings.
612 */
613 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
614 {
615 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
616
617 pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE);
618 }
619 return VINF_SUCCESS;
620}
621
622/**
623 * Checks guest PD for conflicts with VMM GC mappings.
624 *
625 * @returns true if conflict detected.
626 * @returns false if not.
627 * @param pVM The virtual machine.
628 */
629VMMDECL(bool) PGMMapHasConflicts(PVM pVM)
630{
631 /*
632 * Can skip this if mappings are safely fixed.
633 */
634 if (pVM->pgm.s.fMappingsFixed)
635 return false;
636
637 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
638 Assert(enmGuestMode <= PGMMODE_PAE_NX);
639
640 /*
641 * Iterate mappings.
642 */
643 if (enmGuestMode == PGMMODE_32_BIT)
644 {
645 /*
646 * Resolve the page directory.
647 */
648 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
649 Assert(pPD);
650
651 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
652 {
653 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
654 unsigned iPT = pCur->cPTs;
655 while (iPT-- > 0)
656 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
657 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
658 {
659 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
660
661#ifdef IN_RING3
662 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
663 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
664 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
665 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
666#else
667 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
668 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
669 (iPT + iPDE) << X86_PD_SHIFT,
670 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
671#endif
672 return true;
673 }
674 }
675 }
676 else if ( enmGuestMode == PGMMODE_PAE
677 || enmGuestMode == PGMMODE_PAE_NX)
678 {
679 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
680 {
681 RTGCPTR GCPtr = pCur->GCPtr;
682
683 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
684 while (iPT-- > 0)
685 {
686 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
687
688 if ( Pde.n.u1Present
689 && (pVM->fRawR0Enabled || Pde.n.u1User))
690 {
691 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
692#ifdef IN_RING3
693 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
694 " PDE=%016RX64.\n",
695 GCPtr, pCur->pszDesc, Pde.u));
696#else
697 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
698 " PDE=%016RX64.\n",
699 GCPtr, Pde.u));
700#endif
701 return true;
702 }
703 GCPtr += (1 << X86_PD_PAE_SHIFT);
704 }
705 }
706 }
707 else
708 AssertFailed();
709
710 return false;
711}
712
713/**
714 * Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
715 *
716 * @returns VBox status.
717 * @param pVM The virtual machine.
718 */
719VMMDECL(int) PGMMapResolveConflicts(PVM pVM)
720{
721 /*
722 * Can skip this if mappings are safely fixed.
723 */
724 if (pVM->pgm.s.fMappingsFixed)
725 return VINF_SUCCESS;
726
727 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
728 Assert(enmGuestMode <= PGMMODE_PAE_NX);
729
730 /*
731 * Iterate mappings.
732 */
733 if (enmGuestMode == PGMMODE_32_BIT)
734 {
735 /*
736 * Resolve the page directory.
737 */
738 PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
739 Assert(pPD);
740
741 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
742 {
743 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
744 unsigned iPT = pCur->cPTs;
745 while (iPT-- > 0)
746 {
747 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
748 && (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
749 {
750 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
751
752#ifdef IN_RING3
753 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
754 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
755 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
756 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
757 int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
758 AssertRCReturn(rc, rc);
759
760 /*
761 * Update pCur.
762 */
763 pCur = pVM->pgm.s.CTX_SUFF(pMappings);
764 while (pCur && pCur->GCPtr < (iPDE << X86_PD_SHIFT))
765 pCur = pCur->CTX_SUFF(pNext);
766 break;
767#else
768 Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
769 " iPDE=%#x iPT=%#x PDE=%RGp.\n",
770 (iPT + iPDE) << X86_PD_SHIFT,
771 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
772 return VINF_PGM_SYNC_CR3;
773#endif
774 }
775 }
776 if (!pCur)
777 break;
778 }
779 }
780 else if ( enmGuestMode == PGMMODE_PAE
781 || enmGuestMode == PGMMODE_PAE_NX)
782 {
783 for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
784 {
785 RTGCPTR GCPtr = pCur->GCPtr;
786
787 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
788 while (iPT-- > 0)
789 {
790 X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
791
792 if ( Pde.n.u1Present
793 && (pVM->fRawR0Enabled || Pde.n.u1User))
794 {
795 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
796#ifdef IN_RING3
797 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
798 " PDE=%016RX64.\n",
799 GCPtr, pCur->pszDesc, Pde.u));
800 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, pCur->GCPtr);
801 AssertRCReturn(rc, rc);
802
803 /*
804 * Update pCur.
805 */
806 pCur = pVM->pgm.s.CTX_SUFF(pMappings);
807 while (pCur && pCur->GCPtr < GCPtr)
808 pCur = pCur->CTX_SUFF(pNext);
809 break;
810#else
811 Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
812 " PDE=%016RX64.\n",
813 GCPtr, Pde.u));
814 return VINF_PGM_SYNC_CR3;
815#endif
816 }
817 GCPtr += (1 << X86_PD_PAE_SHIFT);
818 }
819 if (!pCur)
820 break;
821 }
822 }
823 else
824 AssertFailed();
825
826 return VINF_SUCCESS;
827}
828
829#endif /* IN_RING0 */
830
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette