VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMMap.cpp@ 11725

Last change on this file since 11725 was 11711, checked in by vboxsync, 16 years ago

Deal with PGMR3MappingsUnfix flushing the entire page pool. In long mode the cached CR3 page will be flushed as well.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 41.2 KB
Line 
1/* $Id: PGMMap.cpp 11711 2008-08-27 16:08:15Z vboxsync $ */
2/** @file
3 * PGM - Page Manager, Guest Context Mappings.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/dbgf.h>
28#include <VBox/pgm.h>
29#include "PGMInternal.h"
30#include <VBox/vm.h>
31
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37
38
39/*******************************************************************************
40* Internal Functions *
41*******************************************************************************/
42static void pgmR3MapClearPDEs(PPGM pPGM, PPGMMAPPING pMap, unsigned iOldPDE);
43static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
44static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
45static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
46
47
48
49/**
50 * Creates a page table based mapping in GC.
51 *
52 * @returns VBox status code.
53 * @param pVM VM Handle.
54 * @param GCPtr Virtual Address. (Page table aligned!)
55 * @param cb Size of the range. Must be a 4MB aligned!
56 * @param pfnRelocate Relocation callback function.
57 * @param pvUser User argument to the callback.
58 * @param pszDesc Pointer to description string. This must not be freed.
59 */
60PGMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, uint32_t cb, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc)
61{
62 LogFlow(("PGMR3MapPT: GCPtr=%#x cb=%d pfnRelocate=%p pvUser=%p pszDesc=%s\n", GCPtr, cb, pfnRelocate, pvUser, pszDesc));
63 AssertMsg(pVM->pgm.s.pInterPD && pVM->pgm.s.pHC32BitPD, ("Paging isn't initialized, init order problems!\n"));
64
65 /*
66 * Validate input.
67 */
68 if (cb < _2M || cb > 64 * _1M)
69 {
70 AssertMsgFailed(("Serious? cb=%d\n", cb));
71 return VERR_INVALID_PARAMETER;
72 }
73 cb = RT_ALIGN_32(cb, _4M);
74 RTGCPTR GCPtrLast = GCPtr + cb - 1;
75 if (GCPtrLast < GCPtr)
76 {
77 AssertMsgFailed(("Range wraps! GCPtr=%x GCPtrLast=%x\n", GCPtr, GCPtrLast));
78 return VERR_INVALID_PARAMETER;
79 }
80 if (pVM->pgm.s.fMappingsFixed)
81 {
82 AssertMsgFailed(("Mappings are fixed! It's not possible to add new mappings at this time!\n"));
83 return VERR_PGM_MAPPINGS_FIXED;
84 }
85 if (!pfnRelocate)
86 {
87 AssertMsgFailed(("Callback is required\n"));
88 return VERR_INVALID_PARAMETER;
89 }
90
91 /*
92 * Find list location.
93 */
94 PPGMMAPPING pPrev = NULL;
95 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
96 while (pCur)
97 {
98 if (pCur->GCPtrLast >= GCPtr && pCur->GCPtr <= GCPtrLast)
99 {
100 AssertMsgFailed(("Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
101 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
102 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
103 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
104 return VERR_PGM_MAPPING_CONFLICT;
105 }
106 if (pCur->GCPtr > GCPtr)
107 break;
108 pPrev = pCur;
109 pCur = pCur->pNextR3;
110 }
111
112 /*
113 * Check for conflicts with intermediate mappings.
114 */
115 const unsigned iPageDir = GCPtr >> X86_PD_SHIFT;
116 const unsigned cPTs = cb >> X86_PD_SHIFT;
117 unsigned i;
118 for (i = 0; i < cPTs; i++)
119 {
120 if (pVM->pgm.s.pInterPD->a[iPageDir + i].n.u1Present)
121 {
122 AssertMsgFailed(("Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
123 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
124 return VERR_PGM_MAPPING_CONFLICT;
125 }
126 }
127 /** @todo AMD64: add check in PAE structures too, so we can remove all the 32-Bit paging stuff there. */
128
129 /*
130 * Allocate and initialize the new list node.
131 */
132 PPGMMAPPING pNew;
133 int rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM, (void **)&pNew);
134 if (VBOX_FAILURE(rc))
135 return rc;
136 pNew->GCPtr = GCPtr;
137 pNew->GCPtrLast = GCPtrLast;
138 pNew->cb = cb;
139 pNew->pszDesc = pszDesc;
140 pNew->pfnRelocate = pfnRelocate;
141 pNew->pvUser = pvUser;
142 pNew->cPTs = cPTs;
143
144 /*
145 * Allocate page tables and insert them into the page directories.
146 * (One 32-bit PT and two PAE PTs.)
147 */
148 uint8_t *pbPTs;
149 rc = MMHyperAlloc(pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM, (void **)&pbPTs);
150 if (VBOX_FAILURE(rc))
151 {
152 MMHyperFree(pVM, pNew);
153 return VERR_NO_MEMORY;
154 }
155
156 /*
157 * Init the page tables and insert them into the page directories.
158 */
159 Log4(("PGMR3MapPT: GCPtr=%VGv cPTs=%u pbPTs=%p\n", GCPtr, cPTs, pbPTs));
160 for (i = 0; i < cPTs; i++)
161 {
162 /*
163 * 32-bit.
164 */
165 pNew->aPTs[i].pPTR3 = (PX86PT)pbPTs;
166 pNew->aPTs[i].pPTGC = MMHyperR3ToRC(pVM, pNew->aPTs[i].pPTR3);
167 pNew->aPTs[i].pPTR0 = MMHyperR3ToR0(pVM, pNew->aPTs[i].pPTR3);
168 pNew->aPTs[i].HCPhysPT = MMR3HyperHCVirt2HCPhys(pVM, pNew->aPTs[i].pPTR3);
169 pbPTs += PAGE_SIZE;
170 Log4(("PGMR3MapPT: i=%d: pPTHC=%p pPTGC=%p HCPhysPT=%RHp\n",
171 i, pNew->aPTs[i].pPTR3, pNew->aPTs[i].pPTGC, pNew->aPTs[i].HCPhysPT));
172
173 /*
174 * PAE.
175 */
176 pNew->aPTs[i].HCPhysPaePT0 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs);
177 pNew->aPTs[i].HCPhysPaePT1 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs + PAGE_SIZE);
178 pNew->aPTs[i].paPaePTsR3 = (PX86PTPAE)pbPTs;
179 pNew->aPTs[i].paPaePTsGC = MMHyperR3ToRC(pVM, pbPTs);
180 pNew->aPTs[i].paPaePTsR0 = MMHyperR3ToR0(pVM, pbPTs);
181 pbPTs += PAGE_SIZE * 2;
182 Log4(("PGMR3MapPT: i=%d: paPaePTsHC=%p paPaePTsGC=%p HCPhysPaePT0=%RHp HCPhysPaePT1=%RHp\n",
183 i, pNew->aPTs[i].paPaePTsR3, pNew->aPTs[i].paPaePTsGC, pNew->aPTs[i].HCPhysPaePT0, pNew->aPTs[i].HCPhysPaePT1));
184 }
185 pgmR3MapSetPDEs(pVM, pNew, iPageDir);
186
187 /*
188 * Insert the new mapping.
189 */
190 pNew->pNextR3 = pCur;
191 pNew->pNextGC = pCur ? MMHyperR3ToRC(pVM, pCur) : 0;
192 pNew->pNextR0 = pCur ? MMHyperR3ToR0(pVM, pCur) : 0;
193 if (pPrev)
194 {
195 pPrev->pNextR3 = pNew;
196 pPrev->pNextGC = MMHyperR3ToRC(pVM, pNew);
197 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pNew);
198 }
199 else
200 {
201 pVM->pgm.s.pMappingsR3 = pNew;
202 pVM->pgm.s.pMappingsGC = MMHyperR3ToRC(pVM, pNew);
203 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pNew);
204 }
205
206 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
207 return VINF_SUCCESS;
208}
209
210
211/**
212 * Removes a page table based mapping.
213 *
214 * @returns VBox status code.
215 * @param pVM VM Handle.
216 * @param GCPtr Virtual Address. (Page table aligned!)
217 */
218PGMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr)
219{
220 LogFlow(("PGMR3UnmapPT: GCPtr=%#x\n", GCPtr));
221
222 /*
223 * Find it.
224 */
225 PPGMMAPPING pPrev = NULL;
226 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
227 while (pCur)
228 {
229 if (pCur->GCPtr == GCPtr)
230 {
231 /*
232 * Unlink it.
233 */
234 if (pPrev)
235 {
236 pPrev->pNextR3 = pCur->pNextR3;
237 pPrev->pNextGC = pCur->pNextGC;
238 pPrev->pNextR0 = pCur->pNextR0;
239 }
240 else
241 {
242 pVM->pgm.s.pMappingsR3 = pCur->pNextR3;
243 pVM->pgm.s.pMappingsGC = pCur->pNextGC;
244 pVM->pgm.s.pMappingsR0 = pCur->pNextR0;
245 }
246
247 /*
248 * Free the page table memory, clear page directory entries
249 * and free the page tables and node memory.
250 */
251 MMHyperFree(pVM, pCur->aPTs[0].pPTR3);
252 pgmR3MapClearPDEs(&pVM->pgm.s, pCur, pCur->GCPtr >> X86_PD_SHIFT);
253 MMHyperFree(pVM, pCur);
254
255 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
256 return VINF_SUCCESS;
257 }
258
259 /* done? */
260 if (pCur->GCPtr > GCPtr)
261 break;
262
263 /* next */
264 pPrev = pCur;
265 pCur = pCur->pNextR3;
266 }
267
268 AssertMsgFailed(("No mapping for %#x found!\n", GCPtr));
269 return VERR_INVALID_PARAMETER;
270}
271
272
273/**
274 * Gets the size of the current guest mappings if they were to be
275 * put next to oneanother.
276 *
277 * @returns VBox status code.
278 * @param pVM The VM.
279 * @param pcb Where to store the size.
280 */
281PGMR3DECL(int) PGMR3MappingsSize(PVM pVM, uint32_t *pcb)
282{
283 RTGCUINTPTR cb = 0;
284 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
285 cb += pCur->cb;
286
287 *pcb = cb;
288 AssertReturn(*pcb == cb, VERR_NUMBER_TOO_BIG);
289 Log(("PGMR3MappingsSize: return %d (%#x) bytes\n", cb, cb));
290 return VINF_SUCCESS;
291}
292
293
294/**
295 * Fixes the guest context mappings in a range reserved from the Guest OS.
296 *
297 * @returns VBox status code.
298 * @param pVM The VM.
299 * @param GCPtrBase The address of the reserved range of guest memory.
300 * @param cb The size of the range starting at GCPtrBase.
301 */
302PGMR3DECL(int) PGMR3MappingsFix(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb)
303{
304 Log(("PGMR3MappingsFix: GCPtrBase=%#x cb=%#x\n", GCPtrBase, cb));
305
306 /*
307 * This is all or nothing at all. So, a tiny bit of paranoia first.
308 */
309 if (GCPtrBase & X86_PAGE_4M_OFFSET_MASK)
310 {
311 AssertMsgFailed(("GCPtrBase (%#x) has to be aligned on a 4MB address!\n", GCPtrBase));
312 return VERR_INVALID_PARAMETER;
313 }
314 if (!cb || (cb & X86_PAGE_4M_OFFSET_MASK))
315 {
316 AssertMsgFailed(("cb (%#x) is 0 or not aligned on a 4MB address!\n", cb));
317 return VERR_INVALID_PARAMETER;
318 }
319
320 /*
321 * Before we do anything we'll do a forced PD sync to try make sure any
322 * pending relocations because of these mappings have been resolved.
323 */
324 PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), true);
325
326 /*
327 * Check that it's not conflicting with a core code mapping in the intermediate page table.
328 */
329 unsigned iPDNew = GCPtrBase >> X86_PD_SHIFT;
330 unsigned i = cb >> X86_PD_SHIFT;
331 while (i-- > 0)
332 {
333 if (pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present)
334 {
335 /* Check that it's not one or our mappings. */
336 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
337 while (pCur)
338 {
339 if (iPDNew + i - (pCur->GCPtr >> X86_PD_SHIFT) < (pCur->cb >> X86_PD_SHIFT))
340 break;
341 pCur = pCur->pNextR3;
342 }
343 if (!pCur)
344 {
345 LogRel(("PGMR3MappingsFix: Conflicts with intermediate PDE %#x (GCPtrBase=%VGv cb=%#zx). The guest should retry.\n",
346 iPDNew + i, GCPtrBase, cb));
347 return VERR_PGM_MAPPINGS_FIX_CONFLICT;
348 }
349 }
350 }
351
352 /*
353 * Loop the mappings and check that they all agree on their new locations.
354 */
355 RTGCPTR GCPtrCur = GCPtrBase;
356 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
357 while (pCur)
358 {
359 if (!pCur->pfnRelocate(pVM, pCur->GCPtr, GCPtrCur, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
360 {
361 AssertMsgFailed(("The suggested fixed address %#x was rejected by '%s'!\n", GCPtrCur, pCur->pszDesc));
362 return VERR_PGM_MAPPINGS_FIX_REJECTED;
363 }
364 /* next */
365 GCPtrCur += pCur->cb;
366 pCur = pCur->pNextR3;
367 }
368 if (GCPtrCur > GCPtrBase + cb)
369 {
370 AssertMsgFailed(("cb (%#x) is less than the required range %#x!\n", cb, GCPtrCur - GCPtrBase));
371 return VERR_PGM_MAPPINGS_FIX_TOO_SMALL;
372 }
373
374 /*
375 * Loop the table assigning the mappings to the passed in memory
376 * and call their relocator callback.
377 */
378 GCPtrCur = GCPtrBase;
379 pCur = pVM->pgm.s.pMappingsR3;
380 while (pCur)
381 {
382 unsigned iPDOld = pCur->GCPtr >> X86_PD_SHIFT;
383 iPDNew = GCPtrCur >> X86_PD_SHIFT;
384
385 /*
386 * Relocate the page table(s).
387 */
388 pgmR3MapClearPDEs(&pVM->pgm.s, pCur, iPDOld);
389 pgmR3MapSetPDEs(pVM, pCur, iPDNew);
390
391 /*
392 * Update the entry.
393 */
394 pCur->GCPtr = GCPtrCur;
395 pCur->GCPtrLast = GCPtrCur + pCur->cb - 1;
396
397 /*
398 * Callback to execute the relocation.
399 */
400 pCur->pfnRelocate(pVM, iPDOld << X86_PD_SHIFT, iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_RELOCATE, pCur->pvUser);
401
402 /*
403 * Advance.
404 */
405 GCPtrCur += pCur->cb;
406 pCur = pCur->pNextR3;
407 }
408
409 /*
410 * Turn off CR3 updating monitoring.
411 */
412 int rc2 = PGM_GST_PFN(UnmonitorCR3, pVM)(pVM);
413 AssertRC(rc2);
414
415 /*
416 * Mark the mappings as fixed and return.
417 */
418 pVM->pgm.s.fMappingsFixed = true;
419 pVM->pgm.s.GCPtrMappingFixed = GCPtrBase;
420 pVM->pgm.s.cbMappingFixed = cb;
421 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
422 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
423 return VINF_SUCCESS;
424}
425
426
427/**
428 * Unfixes the mappings.
429 * After calling this function mapping conflict detection will be enabled.
430 *
431 * @returns VBox status code.
432 * @param pVM The VM.
433 */
434PGMR3DECL(int) PGMR3MappingsUnfix(PVM pVM)
435{
436 Log(("PGMR3MappingsUnfix: fMappingsFixed=%d\n", pVM->pgm.s.fMappingsFixed));
437 pVM->pgm.s.fMappingsFixed = false;
438 pVM->pgm.s.GCPtrMappingFixed = 0;
439 pVM->pgm.s.cbMappingFixed = 0;
440 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
441
442 /*
443 * Re-enable the CR3 monitoring.
444 *
445 * Paranoia: We flush the page pool before doing that because Windows
446 * is using the CR3 page both as a PD and a PT, e.g. the pool may
447 * be monitoring it.
448 */
449#ifdef PGMPOOL_WITH_MONITORING
450 pgmPoolFlushAll(pVM);
451#endif
452 /* Remap CR3 as we have just flushed the CR3 shadow PML4 in case we're in long mode. */
453 int rc = PGM_GST_PFN(MapCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
454 AssertRC(rc);
455
456 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
457 AssertRC(rc);
458
459 return VINF_SUCCESS;
460}
461
462
463/**
464 * Map pages into the intermediate context (switcher code).
465 * These pages are mapped at both the give virtual address and at
466 * the physical address (for identity mapping).
467 *
468 * @returns VBox status code.
469 * @param pVM The virtual machine.
470 * @param Addr Intermediate context address of the mapping.
471 * @param HCPhys Start of the range of physical pages. This must be entriely below 4GB!
472 * @param cbPages Number of bytes to map.
473 *
474 * @remark This API shall not be used to anything but mapping the switcher code.
475 */
476PGMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages)
477{
478 LogFlow(("PGMR3MapIntermediate: Addr=%RTptr HCPhys=%VHp cbPages=%#x\n", Addr, HCPhys, cbPages));
479
480 /*
481 * Adjust input.
482 */
483 cbPages += (uint32_t)HCPhys & PAGE_OFFSET_MASK;
484 cbPages = RT_ALIGN(cbPages, PAGE_SIZE);
485 HCPhys &= X86_PTE_PAE_PG_MASK;
486 Addr &= PAGE_BASE_MASK;
487 /* We only care about the first 4GB, because on AMD64 we'll be repeating them all over the address space. */
488 uint32_t uAddress = (uint32_t)Addr;
489
490 /*
491 * Assert input and state.
492 */
493 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
494 AssertMsg(pVM->pgm.s.pInterPD, ("Bad init order, paging.\n"));
495 AssertMsg(cbPages <= (512 << PAGE_SHIFT), ("The mapping is too big %d bytes\n", cbPages));
496 AssertMsg(HCPhys < _4G && HCPhys + cbPages < _4G, ("Addr=%RTptr HCPhys=%VHp cbPages=%d\n", Addr, HCPhys, cbPages));
497
498 /*
499 * Check for internal conflicts between the virtual address and the physical address.
500 */
501 if ( uAddress != HCPhys
502 && ( uAddress < HCPhys
503 ? HCPhys - uAddress < cbPages
504 : uAddress - HCPhys < cbPages
505 )
506 )
507 AssertLogRelMsgFailedReturn(("Addr=%RTptr HCPhys=%VHp cbPages=%d\n", Addr, HCPhys, cbPages),
508 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
509
510 /* The intermediate mapping must not conflict with our default hypervisor address. */
511 size_t cbHyper;
512 RTGCPTR pvHyperGC = MMHyperGetArea(pVM, &cbHyper);
513 if (uAddress < pvHyperGC
514 ? uAddress + cbPages > pvHyperGC
515 : pvHyperGC + cbHyper > uAddress
516 )
517 AssertLogRelMsgFailedReturn(("Addr=%RTptr HyperGC=%VGv cbPages=%zu\n", Addr, pvHyperGC, cbPages),
518 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
519
520 const unsigned cPages = cbPages >> PAGE_SHIFT;
521 int rc = pgmR3MapIntermediateCheckOne(pVM, uAddress, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
522 if (VBOX_FAILURE(rc))
523 return rc;
524 rc = pgmR3MapIntermediateCheckOne(pVM, (uintptr_t)HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
525 if (VBOX_FAILURE(rc))
526 return rc;
527
528 /*
529 * Everythings fine, do the mapping.
530 */
531 pgmR3MapIntermediateDoOne(pVM, uAddress, HCPhys, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
532 pgmR3MapIntermediateDoOne(pVM, (uintptr_t)HCPhys, HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
533
534 return VINF_SUCCESS;
535}
536
537
538/**
539 * Validates that there are no conflicts for this mapping into the intermediate context.
540 *
541 * @returns VBox status code.
542 * @param pVM VM handle.
543 * @param uAddress Address of the mapping.
544 * @param cPages Number of pages.
545 * @param pPTDefault Pointer to the default page table for this mapping.
546 * @param pPTPaeDefault Pointer to the default page table for this mapping.
547 */
548static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
549{
550 AssertMsg((uAddress >> X86_PD_SHIFT) + cPages <= 1024, ("64-bit fixme\n"));
551
552 /*
553 * Check that the ranges are available.
554 * (This codes doesn't have to be fast.)
555 */
556 while (cPages > 0)
557 {
558 /*
559 * 32-Bit.
560 */
561 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
562 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
563 PX86PT pPT = pPTDefault;
564 if (pVM->pgm.s.pInterPD->a[iPDE].u)
565 {
566 RTHCPHYS HCPhysPT = pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK;
567 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[0]))
568 pPT = pVM->pgm.s.apInterPTs[0];
569 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[1]))
570 pPT = pVM->pgm.s.apInterPTs[1];
571 else
572 {
573 /** @todo this must be handled with a relocation of the conflicting mapping!
574 * Which of course cannot be done because we're in the middle of the initialization. bad design! */
575 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%VHv\n", uAddress),
576 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
577 }
578 }
579 if (pPT->a[iPTE].u)
580 AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%VHv pPT->a[iPTE].u=%RX32\n", iPTE, iPDE, uAddress, pPT->a[iPTE].u),
581 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
582
583 /*
584 * PAE.
585 */
586 const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
587 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
588 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
589 Assert(iPDPE < 4);
590 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
591 PX86PTPAE pPTPae = pPTPaeDefault;
592 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
593 {
594 RTHCPHYS HCPhysPT = pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK;
595 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
596 pPTPae = pVM->pgm.s.apInterPaePTs[0];
597 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
598 pPTPae = pVM->pgm.s.apInterPaePTs[1];
599 else
600 {
601 /** @todo this must be handled with a relocation of the conflicting mapping!
602 * Which of course cannot be done because we're in the middle of the initialization. bad design! */
603 AssertLogRelMsgFailedReturn(("Conflict between core code and PGMR3Mapping(). uAddress=%VHv\n", uAddress),
604 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
605 }
606 }
607 if (pPTPae->a[iPTE].u)
608 AssertLogRelMsgFailedReturn(("Conflict iPTE=%#x iPDE=%#x uAddress=%VHv pPTPae->a[iPTE].u=%#RX64\n", iPTE, iPDE, uAddress, pPTPae->a[iPTE].u),
609 VERR_PGM_INTERMEDIATE_PAGING_CONFLICT);
610
611 /* next */
612 uAddress += PAGE_SIZE;
613 cPages--;
614 }
615
616 return VINF_SUCCESS;
617}
618
619
620
621/**
622 * Sets up the intermediate page tables for a verified mapping.
623 *
624 * @param pVM VM handle.
625 * @param uAddress Address of the mapping.
626 * @param HCPhys The physical address of the page range.
627 * @param cPages Number of pages.
628 * @param pPTDefault Pointer to the default page table for this mapping.
629 * @param pPTPaeDefault Pointer to the default page table for this mapping.
630 */
631static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
632{
633 while (cPages > 0)
634 {
635 /*
636 * 32-Bit.
637 */
638 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
639 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
640 PX86PT pPT;
641 if (pVM->pgm.s.pInterPD->a[iPDE].u)
642 pPT = (PX86PT)MMPagePhys2Page(pVM, pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK);
643 else
644 {
645 pVM->pgm.s.pInterPD->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
646 | (uint32_t)MMPage2Phys(pVM, pPTDefault);
647 pPT = pPTDefault;
648 }
649 pPT->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | (uint32_t)HCPhys;
650
651 /*
652 * PAE
653 */
654 const unsigned iPDPE= (uAddress >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
655 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
656 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
657 Assert(iPDPE < 4);
658 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
659 PX86PTPAE pPTPae;
660 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
661 pPTPae = (PX86PTPAE)MMPagePhys2Page(pVM, pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK);
662 else
663 {
664 pPTPae = pPTPaeDefault;
665 pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
666 | MMPage2Phys(pVM, pPTPaeDefault);
667 }
668 pPTPae->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | HCPhys;
669
670 /* next */
671 cPages--;
672 HCPhys += PAGE_SIZE;
673 uAddress += PAGE_SIZE;
674 }
675}
676
677
678/**
679 * Clears all PDEs involved with the mapping.
680 *
681 * @param pPGM Pointer to the PGM instance data.
682 * @param pMap Pointer to the mapping in question.
683 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
684 */
685static void pgmR3MapClearPDEs(PPGM pPGM, PPGMMAPPING pMap, unsigned iOldPDE)
686{
687 unsigned i = pMap->cPTs;
688 iOldPDE += i;
689 while (i-- > 0)
690 {
691 iOldPDE--;
692
693 /*
694 * 32-bit.
695 */
696 pPGM->pInterPD->a[iOldPDE].u = 0;
697 pPGM->pHC32BitPD->a[iOldPDE].u = 0;
698
699 /*
700 * PAE.
701 */
702 const unsigned iPD = iOldPDE / 256;
703 unsigned iPDE = iOldPDE * 2 % 512;
704 pPGM->apInterPaePDs[iPD]->a[iPDE].u = 0;
705 pPGM->apHCPaePDs[iPD]->a[iPDE].u = 0;
706 iPDE++;
707 pPGM->apInterPaePDs[iPD]->a[iPDE].u = 0;
708 pPGM->apHCPaePDs[iPD]->a[iPDE].u = 0;
709
710 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
711 pPGM->pHCPaePDPT->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;
712 }
713}
714
715
716/**
717 * Sets all PDEs involved with the mapping.
718 *
719 * @param pVM The VM handle.
720 * @param pMap Pointer to the mapping in question.
721 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
722 */
723static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
724{
725 PPGM pPGM = &pVM->pgm.s;
726
727 /* If mappings are not supposed to be put in the shadow page table, then this function is a nop. */
728 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
729 return;
730
731 Assert(PGMGetGuestMode(pVM) <= PGMMODE_PAE);
732
733 /*
734 * Init the page tables and insert them into the page directories.
735 */
736 unsigned i = pMap->cPTs;
737 iNewPDE += i;
738 while (i-- > 0)
739 {
740 iNewPDE--;
741
742 /*
743 * 32-bit.
744 */
745 if (pPGM->pHC32BitPD->a[iNewPDE].n.u1Present)
746 pgmPoolFree(pVM, pPGM->pHC32BitPD->a[iNewPDE].u & X86_PDE_PG_MASK, PGMPOOL_IDX_PD, iNewPDE);
747 X86PDE Pde;
748 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
749 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
750 pPGM->pInterPD->a[iNewPDE] = Pde;
751 pPGM->pHC32BitPD->a[iNewPDE] = Pde;
752
753 /*
754 * PAE.
755 */
756 const unsigned iPD = iNewPDE / 256;
757 unsigned iPDE = iNewPDE * 2 % 512;
758 if (pPGM->apHCPaePDs[iPD]->a[iPDE].n.u1Present)
759 pgmPoolFree(pVM, pPGM->apHCPaePDs[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2);
760 X86PDEPAE PdePae0;
761 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
762 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae0;
763 pPGM->apHCPaePDs[iPD]->a[iPDE] = PdePae0;
764
765 iPDE++;
766 if (pPGM->apHCPaePDs[iPD]->a[iPDE].n.u1Present)
767 pgmPoolFree(pVM, pPGM->apHCPaePDs[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2 + 1);
768 X86PDEPAE PdePae1;
769 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
770 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae1;
771 pPGM->apHCPaePDs[iPD]->a[iPDE] = PdePae1;
772
773 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
774 pPGM->pHCPaePDPT->a[iPD].u |= PGM_PLXFLAGS_MAPPING;
775 }
776}
777
778/**
779 * Relocates a mapping to a new address.
780 *
781 * @param pVM VM handle.
782 * @param pMapping The mapping to relocate.
783 * @param GCPtrOldMapping The address of the start of the old mapping.
784 * @param GCPtrNewMapping The address of the start of the new mapping.
785 */
786void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping)
787{
788 unsigned iPDOld = GCPtrOldMapping >> X86_PD_SHIFT;
789 unsigned iPDNew = GCPtrNewMapping >> X86_PD_SHIFT;
790
791 Log(("PGM: Relocating %s from %VGv to %VGv\n", pMapping->pszDesc, GCPtrOldMapping, GCPtrNewMapping));
792 Assert(((unsigned)iPDOld << X86_PD_SHIFT) == pMapping->GCPtr);
793
794 /*
795 * Relocate the page table(s).
796 */
797 pgmR3MapClearPDEs(&pVM->pgm.s, pMapping, iPDOld);
798 pgmR3MapSetPDEs(pVM, pMapping, iPDNew);
799
800 /*
801 * Update and resort the mapping list.
802 */
803
804 /* Find previous mapping for pMapping, put result into pPrevMap. */
805 PPGMMAPPING pPrevMap = NULL;
806 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
807 while (pCur && pCur != pMapping)
808 {
809 /* next */
810 pPrevMap = pCur;
811 pCur = pCur->pNextR3;
812 }
813 Assert(pCur);
814
815 /* Find mapping which >= than pMapping. */
816 RTGCPTR GCPtrNew = iPDNew << X86_PD_SHIFT;
817 PPGMMAPPING pPrev = NULL;
818 pCur = pVM->pgm.s.pMappingsR3;
819 while (pCur && pCur->GCPtr < GCPtrNew)
820 {
821 /* next */
822 pPrev = pCur;
823 pCur = pCur->pNextR3;
824 }
825
826 if (pCur != pMapping && pPrev != pMapping)
827 {
828 /*
829 * Unlink.
830 */
831 if (pPrevMap)
832 {
833 pPrevMap->pNextR3 = pMapping->pNextR3;
834 pPrevMap->pNextGC = pMapping->pNextGC;
835 pPrevMap->pNextR0 = pMapping->pNextR0;
836 }
837 else
838 {
839 pVM->pgm.s.pMappingsR3 = pMapping->pNextR3;
840 pVM->pgm.s.pMappingsGC = pMapping->pNextGC;
841 pVM->pgm.s.pMappingsR0 = pMapping->pNextR0;
842 }
843
844 /*
845 * Link
846 */
847 pMapping->pNextR3 = pCur;
848 if (pPrev)
849 {
850 pMapping->pNextGC = pPrev->pNextGC;
851 pMapping->pNextR0 = pPrev->pNextR0;
852 pPrev->pNextR3 = pMapping;
853 pPrev->pNextGC = MMHyperR3ToRC(pVM, pMapping);
854 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pMapping);
855 }
856 else
857 {
858 pMapping->pNextGC = pVM->pgm.s.pMappingsGC;
859 pMapping->pNextR0 = pVM->pgm.s.pMappingsR0;
860 pVM->pgm.s.pMappingsR3 = pMapping;
861 pVM->pgm.s.pMappingsGC = MMHyperR3ToRC(pVM, pMapping);
862 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping);
863 }
864 }
865
866 /*
867 * Update the entry.
868 */
869 pMapping->GCPtr = GCPtrNew;
870 pMapping->GCPtrLast = GCPtrNew + pMapping->cb - 1;
871
872 /*
873 * Callback to execute the relocation.
874 */
875 pMapping->pfnRelocate(pVM, iPDOld << X86_PD_SHIFT, iPDNew << X86_PD_SHIFT, PGMRELOCATECALL_RELOCATE, pMapping->pvUser);
876}
877
878
879/**
880 * Resolves a conflict between a page table based GC mapping and
881 * the Guest OS page tables. (32 bits version)
882 *
883 * @returns VBox status code.
884 * @param pVM VM Handle.
885 * @param pMapping The mapping which conflicts.
886 * @param pPDSrc The page directory of the guest OS.
887 * @param GCPtrOldMapping The address of the start of the current mapping.
888 */
889int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping)
890{
891 STAM_PROFILE_START(&pVM->pgm.s.StatHCResolveConflict, a);
892
893 /*
894 * Scan for free page directory entries.
895 *
896 * Note that we do not support mappings at the very end of the
897 * address space since that will break our GCPtrEnd assumptions.
898 */
899 const unsigned cPTs = pMapping->cPTs;
900 unsigned iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */
901 while (iPDNew-- > 0)
902 {
903 if (pPDSrc->a[iPDNew].n.u1Present)
904 continue;
905 if (cPTs > 1)
906 {
907 bool fOk = true;
908 for (unsigned i = 1; fOk && i < cPTs; i++)
909 if (pPDSrc->a[iPDNew + i].n.u1Present)
910 fOk = false;
911 if (!fOk)
912 continue;
913 }
914
915 /*
916 * Check that it's not conflicting with an intermediate page table mapping.
917 */
918 bool fOk = true;
919 unsigned i = cPTs;
920 while (fOk && i-- > 0)
921 fOk = !pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present;
922 if (!fOk)
923 continue;
924 /** @todo AMD64 should check the PAE directories and skip the 32bit stuff. */
925
926 /*
927 * Ask for the mapping.
928 */
929 RTGCPTR GCPtrNewMapping = iPDNew << X86_PD_SHIFT;
930
931 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))
932 {
933 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);
934 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCResolveConflict, a);
935 return VINF_SUCCESS;
936 }
937 }
938
939 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCResolveConflict, a);
940 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, cPTs));
941 return VERR_PGM_NO_HYPERVISOR_ADDRESS;
942}
943
944/**
945 * Resolves a conflict between a page table based GC mapping and
946 * the Guest OS page tables. (PAE bits version)
947 *
948 * @returns VBox status code.
949 * @param pVM VM Handle.
950 * @param pMapping The mapping which conflicts.
951 * @param GCPtrOldMapping The address of the start of the current mapping.
952 */
953int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping)
954{
955 STAM_PROFILE_START(&pVM->pgm.s.StatHCResolveConflict, a);
956
957 for (unsigned iPDPTE = X86_PG_PAE_PDPE_ENTRIES - 1; iPDPTE >= 0; iPDPTE--)
958 {
959 unsigned iPDSrc;
960 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iPDPTE << X86_PDPT_SHIFT, &iPDSrc);
961
962 /*
963 * Scan for free page directory entries.
964 *
965 * Note that we do not support mappings at the very end of the
966 * address space since that will break our GCPtrEnd assumptions.
967 */
968 const unsigned cPTs = pMapping->cb >> X86_PD_PAE_SHIFT;
969 unsigned iPDNew = RT_ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */
970
971 while (iPDNew-- > 0)
972 {
973 /* Ugly assumption that mappings start on a 4 MB boundary. */
974 if (iPDNew & 1)
975 continue;
976
977 if (pPDSrc)
978 {
979 if (pPDSrc->a[iPDNew].n.u1Present)
980 continue;
981 if (cPTs > 1)
982 {
983 bool fOk = true;
984 for (unsigned i = 1; fOk && i < cPTs; i++)
985 if (pPDSrc->a[iPDNew + i].n.u1Present)
986 fOk = false;
987 if (!fOk)
988 continue;
989 }
990 }
991 /*
992 * Check that it's not conflicting with an intermediate page table mapping.
993 */
994 bool fOk = true;
995 unsigned i = cPTs;
996 while (fOk && i-- > 0)
997 fOk = !pVM->pgm.s.apInterPaePDs[iPDPTE]->a[iPDNew + i].n.u1Present;
998 if (!fOk)
999 continue;
1000
1001 /*
1002 * Ask for the mapping.
1003 */
1004 RTGCPTR GCPtrNewMapping = (iPDPTE << X86_PDPT_SHIFT) + (iPDNew << X86_PD_PAE_SHIFT);
1005
1006 if (pMapping->pfnRelocate(pVM, GCPtrOldMapping, GCPtrNewMapping, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))
1007 {
1008 pgmR3MapRelocate(pVM, pMapping, GCPtrOldMapping, GCPtrNewMapping);
1009 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCResolveConflict, a);
1010 return VINF_SUCCESS;
1011 }
1012 }
1013 }
1014 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCResolveConflict, a);
1015 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, GCPtrOldMapping, pMapping->cb >> X86_PD_PAE_SHIFT));
1016 return VERR_PGM_NO_HYPERVISOR_ADDRESS;
1017}
1018
1019/**
1020 * Checks guest PD for conflicts with VMM GC mappings.
1021 *
1022 * @returns true if conflict detected.
1023 * @returns false if not.
1024 * @param pVM The virtual machine.
1025 * @param cr3 Guest context CR3 register.
1026 * @param fRawR0 Whether RawR0 is enabled or not.
1027 */
1028PGMR3DECL(bool) PGMR3MapHasConflicts(PVM pVM, uint64_t cr3, bool fRawR0) /** @todo how many HasConflict constructs do we really need? */
1029{
1030 /*
1031 * Can skip this if mappings are safely fixed.
1032 */
1033 if (pVM->pgm.s.fMappingsFixed)
1034 return false;
1035
1036 Assert(PGMGetGuestMode(pVM) <= PGMMODE_PAE);
1037
1038 /*
1039 * Iterate mappings.
1040 */
1041 if (PGMGetGuestMode(pVM) == PGMMODE_32_BIT)
1042 {
1043 /*
1044 * Resolve the page directory.
1045 */
1046 PX86PD pPD = pVM->pgm.s.pGuestPDHC;
1047 Assert(pPD);
1048 Assert(pPD == (PX86PD)MMPhysGCPhys2HCVirt(pVM, cr3 & X86_CR3_PAGE_MASK, sizeof(*pPD)));
1049
1050 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1051 {
1052 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
1053 unsigned iPT = pCur->cPTs;
1054 while (iPT-- > 0)
1055 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
1056 && (fRawR0 || pPD->a[iPDE + iPT].n.u1User))
1057 {
1058 STAM_COUNTER_INC(&pVM->pgm.s.StatHCDetectedConflicts);
1059 Log(("PGMR3HasMappingConflicts: Conflict was detected at %VGv for mapping %s (32 bits)\n"
1060 " iPDE=%#x iPT=%#x PDE=%VGp.\n",
1061 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
1062 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
1063 return true;
1064 }
1065 }
1066 }
1067 else
1068 if (PGMGetGuestMode(pVM) == PGMMODE_PAE)
1069 {
1070 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1071 {
1072 X86PDEPAE Pde;
1073 RTGCPTR GCPtr = pCur->GCPtr;
1074
1075 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
1076 while (iPT-- > 0)
1077 {
1078 Pde.u = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
1079
1080 if ( Pde.n.u1Present
1081 && (fRawR0 || Pde.n.u1User))
1082 {
1083 STAM_COUNTER_INC(&pVM->pgm.s.StatHCDetectedConflicts);
1084 Log(("PGMR3HasMappingConflicts: Conflict was detected at %VGv for mapping %s (PAE)\n"
1085 " PDE=%VGp.\n",
1086 GCPtr, pCur->pszDesc, Pde.u));
1087 return true;
1088 }
1089 GCPtr += (1 << X86_PD_PAE_SHIFT);
1090 }
1091 }
1092 }
1093 else
1094 AssertFailed();
1095
1096 return false;
1097}
1098
1099
1100/**
1101 * Read memory from the guest mappings.
1102 *
1103 * This will use the page tables associated with the mappings to
1104 * read the memory. This means that not all kind of memory is readable
1105 * since we don't necessarily know how to convert that physical address
1106 * to a HC virtual one.
1107 *
1108 * @returns VBox status.
1109 * @param pVM VM handle.
1110 * @param pvDst The destination address (HC of course).
1111 * @param GCPtrSrc The source address (GC virtual address).
1112 * @param cb Number of bytes to read.
1113 */
1114PGMR3DECL(int) PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
1115{
1116/** @todo remove this simplicity hack */
1117 /*
1118 * Simplicity over speed... Chop the request up into chunks
1119 * which don't cross pages.
1120 */
1121 if (cb + (GCPtrSrc & PAGE_OFFSET_MASK) > PAGE_SIZE)
1122 {
1123 for (;;)
1124 {
1125 unsigned cbRead = RT_MIN(cb, PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK));
1126 int rc = PGMR3MapRead(pVM, pvDst, GCPtrSrc, cbRead);
1127 if (VBOX_FAILURE(rc))
1128 return rc;
1129 cb -= cbRead;
1130 if (!cb)
1131 break;
1132 pvDst = (char *)pvDst + cbRead;
1133 GCPtrSrc += cbRead;
1134 }
1135 return VINF_SUCCESS;
1136 }
1137
1138 /*
1139 * Find the mapping.
1140 */
1141 PPGMMAPPING pCur = CTXALLSUFF(pVM->pgm.s.pMappings);
1142 while (pCur)
1143 {
1144 RTGCUINTPTR off = (RTGCUINTPTR)GCPtrSrc - (RTGCUINTPTR)pCur->GCPtr;
1145 if (off < pCur->cb)
1146 {
1147 if (off + cb > pCur->cb)
1148 {
1149 AssertMsgFailed(("Invalid page range %VGv LB%#x. mapping '%s' %VGv to %VGv\n",
1150 GCPtrSrc, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast));
1151 return VERR_INVALID_PARAMETER;
1152 }
1153
1154 unsigned iPT = off >> X86_PD_SHIFT;
1155 unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
1156 while (cb > 0 && iPTE < RT_ELEMENTS(CTXALLSUFF(pCur->aPTs[iPT].pPT)->a))
1157 {
1158 if (!CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].n.u1Present)
1159 return VERR_PAGE_NOT_PRESENT;
1160 RTHCPHYS HCPhys = CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].u & X86_PTE_PAE_PG_MASK;
1161
1162 /*
1163 * Get the virtual page from the physical one.
1164 */
1165 void *pvPage;
1166 int rc = MMR3HCPhys2HCVirt(pVM, HCPhys, &pvPage);
1167 if (VBOX_FAILURE(rc))
1168 return rc;
1169
1170 memcpy(pvDst, (char *)pvPage + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
1171 return VINF_SUCCESS;
1172 }
1173 }
1174
1175 /* next */
1176 pCur = CTXALLSUFF(pCur->pNext);
1177 }
1178
1179 return VERR_INVALID_POINTER;
1180}
1181
1182
1183/**
1184 * Info callback for 'pgmhandlers'.
1185 *
1186 * @param pHlp The output helpers.
1187 * @param pszArgs The arguments. phys or virt.
1188 */
1189DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1190{
1191 pHlp->pfnPrintf(pHlp, pVM->pgm.s.fMappingsFixed
1192 ? "\nThe mappings are FIXED.\n"
1193 : "\nThe mappings are FLOATING.\n");
1194 PPGMMAPPING pCur;
1195 for (pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1196 pHlp->pfnPrintf(pHlp, "%VGv - %VGv %s\n", pCur->GCPtr, pCur->GCPtrLast, pCur->pszDesc);
1197}
1198
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette