VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMMap.cpp@ 5348

Last change on this file since 5348 was 5348, checked in by vboxsync, 17 years ago

Fixed horrible issue in the relocation code where it was masking .n.u1Present instead of .u in the PDEs. Showed up when logging on to solaris.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.3 KB
Line 
1/* $Id: PGMMap.cpp 5348 2007-10-17 10:17:31Z vboxsync $ */
2/** @file
3 * PGM - Page Manager, Guest Context Mappings.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/dbgf.h>
24#include <VBox/pgm.h>
25#include "PGMInternal.h"
26#include <VBox/vm.h>
27
28#include <VBox/log.h>
29#include <VBox/err.h>
30#include <iprt/asm.h>
31#include <iprt/assert.h>
32#include <iprt/string.h>
33
34
35/*******************************************************************************
36* Internal Functions *
37*******************************************************************************/
38static void pgmR3MapClearPDEs(PPGM pPGM, PPGMMAPPING pMap, int iOldPDE);
39static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, int iNewPDE);
40static DECLCALLBACK(int) pgmR3DumpMappingsPhysicalCB(PAVLROGCPHYSNODECORE pNode, void *pvUser);
41static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
42static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
43
44
45
46/**
47 * Creates a page table based mapping in GC.
48 *
49 * @returns VBox status code.
50 * @param pVM VM Handle.
51 * @param GCPtr Virtual Address. (Page table aligned!)
52 * @param cb Size of the range. Must be a 4MB aligned!
53 * @param pfnRelocate Relocation callback function.
54 * @param pvUser User argument to the callback.
55 * @param pszDesc Pointer to description string. This must not be freed.
56 */
57PGMR3DECL(int) PGMR3MapPT(PVM pVM, RTGCPTR GCPtr, size_t cb, PFNPGMRELOCATE pfnRelocate, void *pvUser, const char *pszDesc)
58{
59 LogFlow(("PGMR3MapPT: GCPtr=%#x cb=%d pfnRelocate=%p pvUser=%p pszDesc=%s\n", GCPtr, cb, pfnRelocate, pvUser, pszDesc));
60 AssertMsg(pVM->pgm.s.pInterPD && pVM->pgm.s.pHC32BitPD, ("Paging isn't initialized, init order problems!\n"));
61
62 /*
63 * Validate input.
64 */
65 if (cb < _2M || cb > 64 * _1M)
66 {
67 AssertMsgFailed(("Serious? cb=%d\n", cb));
68 return VERR_INVALID_PARAMETER;
69 }
70 cb = RT_ALIGN_Z(cb, _4M);
71 RTGCPTR GCPtrLast = GCPtr + cb - 1;
72 if (GCPtrLast < GCPtr)
73 {
74 AssertMsgFailed(("Range wraps! GCPtr=%x GCPtrLast=%x\n", GCPtr, GCPtrLast));
75 return VERR_INVALID_PARAMETER;
76 }
77 if (pVM->pgm.s.fMappingsFixed)
78 {
79 AssertMsgFailed(("Mappings are fixed! It's not possible to add new mappings at this time!\n"));
80 return VERR_PGM_MAPPINGS_FIXED;
81 }
82 if (!pfnRelocate)
83 {
84 AssertMsgFailed(("Callback is required\n"));
85 return VERR_INVALID_PARAMETER;
86 }
87
88 /*
89 * Find list location.
90 */
91 PPGMMAPPING pPrev = NULL;
92 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
93 while (pCur)
94 {
95 if (pCur->GCPtrLast >= GCPtr && pCur->GCPtr <= GCPtrLast)
96 {
97 AssertMsgFailed(("Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
98 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
99 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address is already in use by %s. req %#x-%#x take %#x-%#x\n",
100 pCur->pszDesc, GCPtr, GCPtrLast, pCur->GCPtr, pCur->GCPtrLast));
101 return VERR_PGM_MAPPING_CONFLICT;
102 }
103 if (pCur->GCPtr > GCPtr)
104 break;
105 pPrev = pCur;
106 pCur = pCur->pNextR3;
107 }
108
109 /*
110 * Check for conflicts with intermediate mappings.
111 */
112 const unsigned iPageDir = GCPtr >> PGDIR_SHIFT;
113 const unsigned cPTs = cb >> PGDIR_SHIFT;
114 unsigned i;
115 for (i = 0; i < cPTs; i++)
116 {
117 if (pVM->pgm.s.pInterPD->a[iPageDir + i].n.u1Present)
118 {
119 AssertMsgFailed(("Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
120 LogRel(("VERR_PGM_MAPPING_CONFLICT: Address %#x is already in use by an intermediate mapping.\n", GCPtr + (i << PAGE_SHIFT)));
121 return VERR_PGM_MAPPING_CONFLICT;
122 }
123 }
124 /** @todo AMD64: add check in PAE structures too, so we can remove all the 32-Bit paging stuff there. */
125
126 /*
127 * Allocate and initialize the new list node.
128 */
129 PPGMMAPPING pNew;
130 int rc = MMHyperAlloc(pVM, RT_OFFSETOF(PGMMAPPING, aPTs[cPTs]), 0, MM_TAG_PGM, (void **)&pNew);
131 if (VBOX_FAILURE(rc))
132 return rc;
133 pNew->GCPtr = GCPtr;
134 pNew->GCPtrLast = GCPtrLast;
135 pNew->cb = cb;
136 pNew->pszDesc = pszDesc;
137 pNew->pfnRelocate = pfnRelocate;
138 pNew->pvUser = pvUser;
139 pNew->cPTs = cPTs;
140
141 /*
142 * Allocate page tables and insert them into the page directories.
143 * (One 32-bit PT and two PAE PTs.)
144 */
145 uint8_t *pbPTs;
146 rc = MMHyperAlloc(pVM, PAGE_SIZE * 3 * cPTs, PAGE_SIZE, MM_TAG_PGM, (void **)&pbPTs);
147 if (VBOX_FAILURE(rc))
148 {
149 MMHyperFree(pVM, pNew);
150 return VERR_NO_MEMORY;
151 }
152
153 /*
154 * Init the page tables and insert them into the page directories.
155 */
156 Log4(("PGMR3MapPT: GCPtr=%VGv cPTs=%u pbPTs=%p\n", GCPtr, cPTs, pbPTs));
157 for (i = 0; i < cPTs; i++)
158 {
159 /*
160 * 32-bit.
161 */
162 pNew->aPTs[i].pPTR3 = (PVBOXPT)pbPTs;
163 pNew->aPTs[i].pPTGC = MMHyperR3ToGC(pVM, pNew->aPTs[i].pPTR3);
164 pNew->aPTs[i].pPTR0 = MMHyperR3ToR0(pVM, pNew->aPTs[i].pPTR3);
165 pNew->aPTs[i].HCPhysPT = MMR3HyperHCVirt2HCPhys(pVM, pNew->aPTs[i].pPTR3);
166 pbPTs += PAGE_SIZE;
167 Log4(("PGMR3MapPT: i=%d: pPTHC=%p pPTGC=%p HCPhysPT=%RHp\n",
168 i, pNew->aPTs[i].pPTR3, pNew->aPTs[i].pPTGC, pNew->aPTs[i].HCPhysPT));
169
170 /*
171 * PAE.
172 */
173 pNew->aPTs[i].HCPhysPaePT0 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs);
174 pNew->aPTs[i].HCPhysPaePT1 = MMR3HyperHCVirt2HCPhys(pVM, pbPTs + PAGE_SIZE);
175 pNew->aPTs[i].paPaePTsR3 = (PX86PTPAE)pbPTs;
176 pNew->aPTs[i].paPaePTsGC = MMHyperR3ToGC(pVM, pbPTs);
177 pNew->aPTs[i].paPaePTsR0 = MMHyperR3ToR0(pVM, pbPTs);
178 pbPTs += PAGE_SIZE * 2;
179 Log4(("PGMR3MapPT: i=%d: paPaePTsHC=%p paPaePTsGC=%p HCPhysPaePT0=%RHp HCPhysPaePT1=%RHp\n",
180 i, pNew->aPTs[i].paPaePTsR3, pNew->aPTs[i].paPaePTsGC, pNew->aPTs[i].HCPhysPaePT0, pNew->aPTs[i].HCPhysPaePT1));
181 }
182 pgmR3MapSetPDEs(pVM, pNew, iPageDir);
183
184 /*
185 * Insert the new mapping.
186 */
187 pNew->pNextR3 = pCur;
188 pNew->pNextGC = pCur ? MMHyperR3ToGC(pVM, pCur) : 0;
189 pNew->pNextR0 = pCur ? MMHyperR3ToR0(pVM, pCur) : 0;
190 if (pPrev)
191 {
192 pPrev->pNextR3 = pNew;
193 pPrev->pNextGC = MMHyperR3ToGC(pVM, pNew);
194 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pNew);
195 }
196 else
197 {
198 pVM->pgm.s.pMappingsR3 = pNew;
199 pVM->pgm.s.pMappingsGC = MMHyperR3ToGC(pVM, pNew);
200 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pNew);
201 }
202
203 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
204 return VINF_SUCCESS;
205}
206
207
208/**
209 * Removes a page table based mapping.
210 *
211 * @returns VBox status code.
212 * @param pVM VM Handle.
213 * @param GCPtr Virtual Address. (Page table aligned!)
214 */
215PGMR3DECL(int) PGMR3UnmapPT(PVM pVM, RTGCPTR GCPtr)
216{
217 LogFlow(("PGMR3UnmapPT: GCPtr=%#x\n", GCPtr));
218
219 /*
220 * Find it.
221 */
222 PPGMMAPPING pPrev = NULL;
223 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
224 while (pCur)
225 {
226 if (pCur->GCPtr == GCPtr)
227 {
228 /*
229 * Unlink it.
230 */
231 if (pPrev)
232 {
233 pPrev->pNextR3 = pCur->pNextR3;
234 pPrev->pNextGC = pCur->pNextGC;
235 pPrev->pNextR0 = pCur->pNextR0;
236 }
237 else
238 {
239 pVM->pgm.s.pMappingsR3 = pCur->pNextR3;
240 pVM->pgm.s.pMappingsGC = pCur->pNextGC;
241 pVM->pgm.s.pMappingsR0 = pCur->pNextR0;
242 }
243
244 /*
245 * Free the page table memory, clear page directory entries
246 * and free the page tables and node memory.
247 */
248 MMHyperFree(pVM, pCur->aPTs[0].pPTR3);
249 pgmR3MapClearPDEs(&pVM->pgm.s, pCur, pCur->GCPtr >> PGDIR_SHIFT);
250 MMHyperFree(pVM, pCur);
251
252 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
253 return VINF_SUCCESS;
254 }
255
256 /* done? */
257 if (pCur->GCPtr > GCPtr)
258 break;
259
260 /* next */
261 pPrev = pCur;
262 pCur = pCur->pNextR3;
263 }
264
265 AssertMsgFailed(("No mapping for %#x found!\n", GCPtr));
266 return VERR_INVALID_PARAMETER;
267}
268
269
270/**
271 * Gets the size of the current guest mappings if they were to be
272 * put next to oneanother.
273 *
274 * @returns VBox status code.
275 * @param pVM The VM.
276 * @param pcb Where to store the size.
277 */
278PGMR3DECL(int) PGMR3MappingsSize(PVM pVM, size_t *pcb)
279{
280 size_t cb = 0;
281 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
282 cb += pCur->cb;
283
284 *pcb = cb;
285 Log(("PGMR3MappingsSize: return %d (%#x) bytes\n", cb, cb));
286 return VINF_SUCCESS;
287}
288
289
290/**
291 * Fixes the guest context mappings in a range reserved from the Guest OS.
292 *
293 * @returns VBox status code.
294 * @param pVM The VM.
295 * @param GCPtrBase The address of the reserved range of guest memory.
296 * @param cb The size of the range starting at GCPtrBase.
297 */
298PGMR3DECL(int) PGMR3MappingsFix(PVM pVM, RTGCPTR GCPtrBase, size_t cb)
299{
300 Log(("PGMR3MappingsFix: GCPtrBase=%#x cb=%#x\n", GCPtrBase, cb));
301
302 /*
303 * This is all or nothing at all. So, a tiny bit of paranoia first.
304 */
305 if (GCPtrBase & PAGE_OFFSET_MASK_BIG)
306 {
307 AssertMsgFailed(("GCPtrBase (%#x) has to be aligned on a 4MB address!\n", GCPtrBase));
308 return VERR_INVALID_PARAMETER;
309 }
310 if (!cb || (cb & PAGE_OFFSET_MASK_BIG))
311 {
312 AssertMsgFailed(("cb (%#x) is 0 or not aligned on a 4MB address!\n", cb));
313 return VERR_INVALID_PARAMETER;
314 }
315
316 /*
317 * Before we do anything we'll do a forced PD sync to try make sure any
318 * pending relocations because of these mappings have been resolved.
319 */
320 PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), true);
321
322 /*
323 * Check that it's not conflicting with a core code mapping in the intermediate page table.
324 */
325 unsigned iPDNew = GCPtrBase >> PGDIR_SHIFT;
326 unsigned i = cb >> PGDIR_SHIFT;
327 while (i-- > 0)
328 {
329 if (pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present)
330 {
331 /* Check that it's not one or our mappings. */
332 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
333 while (pCur)
334 {
335 if (iPDNew + i - (pCur->GCPtr >> PGDIR_SHIFT) < (pCur->cb >> PGDIR_SHIFT))
336 break;
337 pCur = pCur->pNextR3;
338 }
339 if (!pCur)
340 {
341 LogRel(("PGMR3MappingsFix: Conflicts with intermediate PDE %#x (GCPtrBase=%VGv cb=%#zx). The guest should retry.\n",
342 iPDNew + i, GCPtrBase, cb));
343 return VERR_PGM_MAPPINGS_FIX_CONFLICT;
344 }
345 }
346 }
347
348 /*
349 * Loop the mappings and check that they all agree on their new locations.
350 */
351 RTGCPTR GCPtrCur = GCPtrBase;
352 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
353 while (pCur)
354 {
355 if (!pCur->pfnRelocate(pVM, pCur->GCPtr, GCPtrCur, PGMRELOCATECALL_SUGGEST, pCur->pvUser))
356 {
357 AssertMsgFailed(("The suggested fixed address %#x was rejected by '%s'!\n", GCPtrCur, pCur->pszDesc));
358 return VERR_PGM_MAPPINGS_FIX_REJECTED;
359 }
360 /* next */
361 GCPtrCur += pCur->cb;
362 pCur = pCur->pNextR3;
363 }
364 if (GCPtrCur > GCPtrBase + cb)
365 {
366 AssertMsgFailed(("cb (%#x) is less than the required range %#x!\n", cb, GCPtrCur - GCPtrBase));
367 return VERR_PGM_MAPPINGS_FIX_TOO_SMALL;
368 }
369
370 /*
371 * Loop the table assigning the mappings to the passed in memory
372 * and call their relocator callback.
373 */
374 GCPtrCur = GCPtrBase;
375 pCur = pVM->pgm.s.pMappingsR3;
376 while (pCur)
377 {
378 unsigned iPDOld = pCur->GCPtr >> PGDIR_SHIFT;
379 iPDNew = GCPtrCur >> PGDIR_SHIFT;
380
381 /*
382 * Relocate the page table(s).
383 */
384 pgmR3MapClearPDEs(&pVM->pgm.s, pCur, iPDOld);
385 pgmR3MapSetPDEs(pVM, pCur, iPDNew);
386
387 /*
388 * Update the entry.
389 */
390 pCur->GCPtr = GCPtrCur;
391 pCur->GCPtrLast = GCPtrCur + pCur->cb - 1;
392
393 /*
394 * Callback to execute the relocation.
395 */
396 pCur->pfnRelocate(pVM, iPDOld << PGDIR_SHIFT, iPDNew << PGDIR_SHIFT, PGMRELOCATECALL_RELOCATE, pCur->pvUser);
397
398 /*
399 * Advance.
400 */
401 GCPtrCur += pCur->cb;
402 pCur = pCur->pNextR3;
403 }
404
405 /*
406 * Turn off CR3 updating monitoring.
407 */
408 int rc2 = PGM_GST_PFN(UnmonitorCR3, pVM)(pVM);
409 AssertRC(rc2);
410
411 /*
412 * Mark the mappings as fixed and return.
413 */
414 pVM->pgm.s.fMappingsFixed = true;
415 pVM->pgm.s.GCPtrMappingFixed = GCPtrBase;
416 pVM->pgm.s.cbMappingFixed = cb;
417 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
418 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
419 return VINF_SUCCESS;
420}
421
422
423/**
424 * Unfixes the mappings.
425 * After calling this function mapping conflict detection will be enabled.
426 *
427 * @returns VBox status code.
428 * @param pVM The VM.
429 */
430PGMR3DECL(int) PGMR3MappingsUnfix(PVM pVM)
431{
432 Log(("PGMR3MappingsUnfix: fMappingsFixed=%d\n", pVM->pgm.s.fMappingsFixed));
433 pVM->pgm.s.fMappingsFixed = false;
434 pVM->pgm.s.GCPtrMappingFixed = 0;
435 pVM->pgm.s.cbMappingFixed = 0;
436 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
437
438 /*
439 * Re-enable the CR3 monitoring.
440 *
441 * Paranoia: We flush the page pool before doing that because Windows
442 * is using the CR3 page both as a PD and a PT, e.g. the pool may
443 * be monitoring it.
444 */
445#ifdef PGMPOOL_WITH_MONITORING
446 pgmPoolFlushAll(pVM);
447#endif
448 int rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
449 AssertRC(rc);
450
451 return VINF_SUCCESS;
452}
453
454
455/**
456 * Map pages into the intermediate context (switcher code).
457 * These pages are mapped at both the give virtual address and at
458 * the physical address (for identity mapping).
459 *
460 * @returns VBox status code.
461 * @param pVM The virtual machine.
462 * @param Addr Intermediate context address of the mapping.
463 * @param HCPhys Start of the range of physical pages. This must be entriely below 4GB!
464 * @param cbPages Number of bytes to map.
465 *
466 * @remark This API shall not be used to anything but mapping the switcher code.
467 */
468PGMR3DECL(int) PGMR3MapIntermediate(PVM pVM, RTUINTPTR Addr, RTHCPHYS HCPhys, unsigned cbPages)
469{
470 LogFlow(("PGMR3MapIntermediate: Addr=%RTptr HCPhys=%VHp cbPages=%#x\n", Addr, HCPhys, cbPages));
471
472 /*
473 * Adjust input.
474 */
475 cbPages += (uint32_t)HCPhys & PAGE_OFFSET_MASK;
476 cbPages = RT_ALIGN(cbPages, PAGE_SIZE);
477 HCPhys &= X86_PTE_PAE_PG_MASK;
478 Addr &= PAGE_BASE_MASK;
479 /* We only care about the first 4GB, because on AMD64 we'll be repeating them all over the address space. */
480 uint32_t uAddress = (uint32_t)Addr;
481
482 /*
483 * Assert input and state.
484 */
485 AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
486 AssertMsg(pVM->pgm.s.pInterPD, ("Bad init order, paging.\n"));
487 AssertMsg(cbPages <= (512 << PAGE_SHIFT), ("The mapping is too big %d bytes\n", cbPages));
488 AssertMsg(HCPhys < _4G && HCPhys + cbPages < _4G, ("Addr=%RTptr HCPhys=%VHp cbPages=%d\n", Addr, HCPhys, cbPages));
489
490 /*
491 * Check for internal conflicts between the virtual address and the physical address.
492 */
493 if ( uAddress != HCPhys
494 && ( uAddress < HCPhys
495 ? HCPhys - uAddress < cbPages
496 : uAddress - HCPhys < cbPages
497 )
498 )
499 {
500 AssertMsgFailed(("Addr=%RTptr HCPhys=%VHp cbPages=%d\n", Addr, HCPhys, cbPages));
501 LogRel(("Addr=%RTptr HCPhys=%VHp cbPages=%d\n", Addr, HCPhys, cbPages));
502 return VERR_PGM_MAPPINGS_FIX_CONFLICT; /** @todo new error code */
503 }
504
505 /* The intermediate mapping must not conflict with our default hypervisor address. */
506 size_t cbHyper;
507 RTGCPTR pvHyperGC = MMHyperGetArea(pVM, &cbHyper);
508 if (uAddress < pvHyperGC
509 ? uAddress + cbPages > pvHyperGC
510 : pvHyperGC + cbHyper > uAddress
511 )
512 {
513 AssertMsgFailed(("Addr=%RTptr HyperGC=%VGv cbPages=%zu\n", Addr, pvHyperGC, cbPages));
514 LogRel(("Addr=%RTptr HyperGC=%VGv cbPages=%zu\n", Addr, pvHyperGC, cbPages));
515 return VERR_PGM_MAPPINGS_FIX_CONFLICT; /** @todo new error code */
516 }
517
518 const unsigned cPages = cbPages >> PAGE_SHIFT;
519 int rc = pgmR3MapIntermediateCheckOne(pVM, uAddress, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
520 if (VBOX_FAILURE(rc))
521 return rc;
522 rc = pgmR3MapIntermediateCheckOne(pVM, (uintptr_t)HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
523 if (VBOX_FAILURE(rc))
524 return rc;
525
526 /*
527 * Everythings fine, do the mapping.
528 */
529 pgmR3MapIntermediateDoOne(pVM, uAddress, HCPhys, cPages, pVM->pgm.s.apInterPTs[0], pVM->pgm.s.apInterPaePTs[0]);
530 pgmR3MapIntermediateDoOne(pVM, (uintptr_t)HCPhys, HCPhys, cPages, pVM->pgm.s.apInterPTs[1], pVM->pgm.s.apInterPaePTs[1]);
531
532 return VINF_SUCCESS;
533}
534
535
536/**
537 * Validates that there are no conflicts for this mapping into the intermediate context.
538 *
539 * @returns VBox status code.
540 * @param pVM VM handle.
541 * @param uAddress Address of the mapping.
542 * @param cPages Number of pages.
543 * @param pPTDefault Pointer to the default page table for this mapping.
544 * @param pPTPaeDefault Pointer to the default page table for this mapping.
545 */
546static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
547{
548 AssertMsg((uAddress >> X86_PD_SHIFT) + cPages <= 1024, ("64-bit fixme\n"));
549
550 /*
551 * Check that the ranges are available.
552 * (This codes doesn't have to be fast.)
553 */
554 while (cPages > 0)
555 {
556 /*
557 * 32-Bit.
558 */
559 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
560 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
561 PX86PT pPT = pPTDefault;
562 if (pVM->pgm.s.pInterPD->a[iPDE].u)
563 {
564 RTHCPHYS HCPhysPT = pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK;
565 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[0]))
566 pPT = pVM->pgm.s.apInterPTs[0];
567 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPTs[1]))
568 pPT = pVM->pgm.s.apInterPTs[1];
569 else
570 {
571 /** @todo this must be handled with a relocation of the conflicting mapping!
572 * Which of course cannot be done because we're in the middle of the initialization. bad design! */
573 AssertMsgFailed(("Conflict between core code and PGMR3Mapping(). uAddress=%VHv\n", uAddress));
574 LogRel(("Conflict between core code and PGMR3Mapping(). uAddress=%VHv\n", uAddress));
575 return VERR_PGM_MAPPINGS_FIX_CONFLICT; /** @todo error codes! */
576 }
577 }
578 if (pPT->a[iPTE].u)
579 {
580 AssertMsgFailed(("Conflict iPTE=%#x iPDE=%#x uAddress=%VHv pPT->a[iPTE].u=%RX32\n", iPTE, iPDE, uAddress, pPT->a[iPTE].u));
581 LogRel(("Conflict iPTE=%#x iPDE=%#x uAddress=%VHv pPT->a[iPTE].u=%RX32\n",
582 iPTE, iPDE, uAddress, pPT->a[iPTE].u));
583 return VERR_PGM_MAPPINGS_FIX_CONFLICT; /** @todo error codes! */
584 }
585
586 /*
587 * PAE.
588 */
589 const unsigned iPDPE= (uAddress >> X86_PDPTR_SHIFT) & X86_PDPTR_MASK;
590 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
591 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
592 Assert(iPDPE < 4);
593 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
594 PX86PTPAE pPTPae = pPTPaeDefault;
595 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
596 {
597 RTHCPHYS HCPhysPT = pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK;
598 if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
599 pPTPae = pVM->pgm.s.apInterPaePTs[0];
600 else if (HCPhysPT == MMPage2Phys(pVM, pVM->pgm.s.apInterPaePTs[0]))
601 pPTPae = pVM->pgm.s.apInterPaePTs[1];
602 else
603 {
604 /** @todo this must be handled with a relocation of the conflicting mapping!
605 * Which of course cannot be done because we're in the middle of the initialization. bad design! */
606 AssertMsgFailed(("Conflict between core code and PGMR3Mapping(). uAddress=%VHv\n", uAddress));
607 LogRel(("Conflict between core code and PGMR3Mapping(). uAddress=%VHv\n", uAddress));
608 return VERR_PGM_MAPPINGS_FIX_CONFLICT; /** @todo error codes! */
609 }
610 }
611 if (pPTPae->a[iPTE].u)
612 {
613 AssertMsgFailed(("Conflict iPTE=%#x iPDE=%#x uAddress=%VHv pPTPae->a[iPTE].u=%#RX64\n", iPTE, iPDE, uAddress, pPTPae->a[iPTE].u));
614 LogRel(("Conflict iPTE=%#x iPDE=%#x uAddress=%VHv pPTPae->a[iPTE].u=%#RX64\n",
615 iPTE, iPDE, uAddress, pPTPae->a[iPTE].u));
616 return VERR_PGM_MAPPINGS_FIX_CONFLICT; /** @todo error codes! */
617 }
618
619 /* next */
620 uAddress += PAGE_SIZE;
621 cPages--;
622 }
623
624 return VINF_SUCCESS;
625}
626
627
628
629/**
630 * Sets up the intermediate page tables for a verified mapping.
631 *
632 * @param pVM VM handle.
633 * @param uAddress Address of the mapping.
634 * @param HCPhys The physical address of the page range.
635 * @param cPages Number of pages.
636 * @param pPTDefault Pointer to the default page table for this mapping.
637 * @param pPTPaeDefault Pointer to the default page table for this mapping.
638 */
639static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault)
640{
641 while (cPages > 0)
642 {
643 /*
644 * 32-Bit.
645 */
646 unsigned iPDE = (uAddress >> X86_PD_SHIFT) & X86_PD_MASK;
647 unsigned iPTE = (uAddress >> X86_PT_SHIFT) & X86_PT_MASK;
648 PX86PT pPT;
649 if (pVM->pgm.s.pInterPD->a[iPDE].u)
650 pPT = (PX86PT)MMPagePhys2Page(pVM, pVM->pgm.s.pInterPD->a[iPDE].u & X86_PDE_PG_MASK);
651 else
652 {
653 pVM->pgm.s.pInterPD->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
654 | (uint32_t)MMPage2Phys(pVM, pPTDefault);
655 pPT = pPTDefault;
656 }
657 pPT->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | (uint32_t)HCPhys;
658
659 /*
660 * PAE
661 */
662 const unsigned iPDPE= (uAddress >> X86_PDPTR_SHIFT) & X86_PDPTR_MASK;
663 iPDE = (uAddress >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
664 iPTE = (uAddress >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK;
665 Assert(iPDPE < 4);
666 Assert(pVM->pgm.s.apInterPaePDs[iPDPE]);
667 PX86PTPAE pPTPae;
668 if (pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u)
669 pPTPae = (PX86PTPAE)MMPagePhys2Page(pVM, pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u & X86_PDE_PAE_PG_MASK);
670 else
671 {
672 pPTPae = pPTPaeDefault;
673 pVM->pgm.s.apInterPaePDs[iPDPE]->a[iPDE].u = X86_PDE_P | X86_PDE_A | X86_PDE_RW
674 | MMPage2Phys(pVM, pPTPaeDefault);
675 }
676 pPTPae->a[iPTE].u = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D | HCPhys;
677
678 /* next */
679 cPages--;
680 HCPhys += PAGE_SIZE;
681 uAddress += PAGE_SIZE;
682 }
683}
684
685
686/**
687 * Clears all PDEs involved with the mapping.
688 *
689 * @param pPGM Pointer to the PGM instance data.
690 * @param pMap Pointer to the mapping in question.
691 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
692 */
693static void pgmR3MapClearPDEs(PPGM pPGM, PPGMMAPPING pMap, int iOldPDE)
694{
695 unsigned i = pMap->cPTs;
696 iOldPDE += i;
697 while (i-- > 0)
698 {
699 iOldPDE--;
700
701 /*
702 * 32-bit.
703 */
704 pPGM->pInterPD->a[iOldPDE].u = 0;
705 pPGM->pHC32BitPD->a[iOldPDE].u = 0;
706
707 /*
708 * PAE.
709 */
710 const int iPD = iOldPDE / 256;
711 int iPDE = iOldPDE * 2 % 512;
712 pPGM->apInterPaePDs[iPD]->a[iPDE].u = 0;
713 pPGM->apHCPaePDs[iPD]->a[iPDE].u = 0;
714 iPDE++;
715 pPGM->apInterPaePDs[iPD]->a[iPDE].u = 0;
716 pPGM->apHCPaePDs[iPD]->a[iPDE].u = 0;
717 }
718}
719
720
721/**
722 * Sets all PDEs involved with the mapping.
723 *
724 * @param pVM The VM handle.
725 * @param pMap Pointer to the mapping in question.
726 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
727 */
728static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, int iNewPDE)
729{
730 PPGM pPGM = &pVM->pgm.s;
731
732 /* If mappings are not supposed to be put in the shadow page table, then this function is a nop. */
733 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
734 return;
735
736 /*
737 * Init the page tables and insert them into the page directories.
738 */
739 unsigned i = pMap->cPTs;
740 iNewPDE += i;
741 while (i-- > 0)
742 {
743 iNewPDE--;
744
745 /*
746 * 32-bit.
747 */
748 if (pPGM->pHC32BitPD->a[iNewPDE].n.u1Present)
749 pgmPoolFree(pVM, pPGM->pHC32BitPD->a[iNewPDE].u & X86_PDE_PG_MASK, PGMPOOL_IDX_PD, iNewPDE);
750 X86PDE Pde;
751 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
752 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
753 pPGM->pInterPD->a[iNewPDE] = Pde;
754 pPGM->pHC32BitPD->a[iNewPDE] = Pde;
755
756 /*
757 * PAE.
758 */
759 const int iPD = iNewPDE / 256;
760 int iPDE = iNewPDE * 2 % 512;
761 if (pPGM->apHCPaePDs[iPD]->a[iPDE].n.u1Present)
762 pgmPoolFree(pVM, pPGM->apHCPaePDs[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2);
763 X86PDEPAE PdePae0;
764 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
765 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae0;
766 pPGM->apHCPaePDs[iPD]->a[iPDE] = PdePae0;
767
768 iPDE++;
769 if (pPGM->apHCPaePDs[iPD]->a[iPDE].n.u1Present)
770 pgmPoolFree(pVM, pPGM->apHCPaePDs[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2 + 1);
771 X86PDEPAE PdePae1;
772 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
773 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae1;
774 pPGM->apHCPaePDs[iPD]->a[iPDE] = PdePae1;
775 }
776}
777
778/**
779 * Relocates a mapping to a new address.
780 *
781 * @param pVM VM handle.
782 * @param pMapping The mapping to relocate.
783 * @param iPDOld Old page directory index.
784 * @param iPDNew New page directory index.
785 */
786void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, int iPDOld, int iPDNew)
787{
788 Log(("PGM: Relocating %s from %#x to %#x\n", pMapping->pszDesc, iPDOld << PGDIR_SHIFT, iPDNew << PGDIR_SHIFT));
789 Assert(((unsigned)iPDOld << PGDIR_SHIFT) == pMapping->GCPtr);
790
791 /*
792 * Relocate the page table(s).
793 */
794 pgmR3MapClearPDEs(&pVM->pgm.s, pMapping, iPDOld);
795 pgmR3MapSetPDEs(pVM, pMapping, iPDNew);
796
797 /*
798 * Update and resort the mapping list.
799 */
800
801 /* Find previous mapping for pMapping, put result into pPrevMap. */
802 PPGMMAPPING pPrevMap = NULL;
803 PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3;
804 while (pCur && pCur != pMapping)
805 {
806 /* next */
807 pPrevMap = pCur;
808 pCur = pCur->pNextR3;
809 }
810 Assert(pCur);
811
812 /* Find mapping which >= than pMapping. */
813 RTGCPTR GCPtrNew = iPDNew << PGDIR_SHIFT;
814 PPGMMAPPING pPrev = NULL;
815 pCur = pVM->pgm.s.pMappingsR3;
816 while (pCur && pCur->GCPtr < GCPtrNew)
817 {
818 /* next */
819 pPrev = pCur;
820 pCur = pCur->pNextR3;
821 }
822
823 if (pCur != pMapping && pPrev != pMapping)
824 {
825 /*
826 * Unlink.
827 */
828 if (pPrevMap)
829 {
830 pPrevMap->pNextR3 = pMapping->pNextR3;
831 pPrevMap->pNextGC = pMapping->pNextGC;
832 pPrevMap->pNextR0 = pMapping->pNextR0;
833 }
834 else
835 {
836 pVM->pgm.s.pMappingsR3 = pMapping->pNextR3;
837 pVM->pgm.s.pMappingsGC = pMapping->pNextGC;
838 pVM->pgm.s.pMappingsR0 = pMapping->pNextR0;
839 }
840
841 /*
842 * Link
843 */
844 pMapping->pNextR3 = pCur;
845 if (pPrev)
846 {
847 pMapping->pNextGC = pPrev->pNextGC;
848 pMapping->pNextR0 = pPrev->pNextR0;
849 pPrev->pNextR3 = pMapping;
850 pPrev->pNextGC = MMHyperR3ToGC(pVM, pMapping);
851 pPrev->pNextR0 = MMHyperR3ToR0(pVM, pMapping);
852 }
853 else
854 {
855 pMapping->pNextGC = pVM->pgm.s.pMappingsGC;
856 pMapping->pNextR0 = pVM->pgm.s.pMappingsR0;
857 pVM->pgm.s.pMappingsR3 = pMapping;
858 pVM->pgm.s.pMappingsGC = MMHyperR3ToGC(pVM, pMapping);
859 pVM->pgm.s.pMappingsR0 = MMHyperR3ToR0(pVM, pMapping);
860 }
861 }
862
863 /*
864 * Update the entry.
865 */
866 pMapping->GCPtr = GCPtrNew;
867 pMapping->GCPtrLast = GCPtrNew + pMapping->cb - 1;
868
869 /*
870 * Callback to execute the relocation.
871 */
872 pMapping->pfnRelocate(pVM, iPDOld << PGDIR_SHIFT, iPDNew << PGDIR_SHIFT, PGMRELOCATECALL_RELOCATE, pMapping->pvUser);
873}
874
875
876/**
877 * Resolves a conflict between a page table based GC mapping and
878 * the Guest OS page tables.
879 *
880 * @returns VBox status code.
881 * @param pVM VM Handle.
882 * @param pMapping The mapping which conflicts.
883 * @param pPDSrc The page directory of the guest OS.
884 * @param iPDOld The index to the start of the current mapping.
885 */
886int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PVBOXPD pPDSrc, int iPDOld)
887{
888 STAM_PROFILE_START(&pVM->pgm.s.StatHCResolveConflict, a);
889
890 /*
891 * Scan for free page directory entries.
892 *
893 * Note that we do not support mappings at the very end of the
894 * address space since that will break our GCPtrEnd assumptions.
895 */
896 const unsigned cPTs = pMapping->cPTs;
897 unsigned iPDNew = ELEMENTS(pPDSrc->a) - cPTs; /* (+ 1 - 1) */
898 while (iPDNew-- > 0)
899 {
900 if (pPDSrc->a[iPDNew].n.u1Present)
901 continue;
902 if (cPTs > 1)
903 {
904 bool fOk = true;
905 for (unsigned i = 1; fOk && i < cPTs; i++)
906 if (pPDSrc->a[iPDNew + i].n.u1Present)
907 fOk = false;
908 if (!fOk)
909 continue;
910 }
911
912 /*
913 * Check that it's not conflicting with an intermediate page table mapping.
914 */
915 bool fOk = true;
916 unsigned i = cPTs;
917 while (fOk && i-- > 0)
918 fOk = !pVM->pgm.s.pInterPD->a[iPDNew + i].n.u1Present;
919 if (!fOk)
920 continue;
921 /** @todo AMD64 should check the PAE directories and skip the 32bit stuff. */
922
923 /*
924 * Ask the mapping.
925 */
926 if (pMapping->pfnRelocate(pVM, iPDOld << PGDIR_SHIFT, iPDNew << PGDIR_SHIFT, PGMRELOCATECALL_SUGGEST, pMapping->pvUser))
927 {
928 pgmR3MapRelocate(pVM, pMapping, iPDOld, iPDNew);
929 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCResolveConflict, a);
930 return VINF_SUCCESS;
931 }
932 }
933
934 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCResolveConflict, a);
935 AssertMsgFailed(("Failed to relocate page table mapping '%s' from %#x! (cPTs=%d)\n", pMapping->pszDesc, iPDOld << PGDIR_SHIFT, cPTs));
936 return VERR_PGM_NO_HYPERVISOR_ADDRESS;
937}
938
939
940
941/**
942 * Checks guest PD for conflicts with VMM GC mappings.
943 *
944 * @returns true if conflict detected.
945 * @returns false if not.
946 * @param pVM The virtual machine.
947 * @param cr3 Guest context CR3 register.
948 * @param fRawR0 Whether RawR0 is enabled or not.
949 */
950PGMR3DECL(bool) PGMR3MapHasConflicts(PVM pVM, uint32_t cr3, bool fRawR0) /** @todo how many HasConflict constructs do we really need? */
951{
952 /*
953 * Can skip this if mappings are safely fixed.
954 */
955 if (pVM->pgm.s.fMappingsFixed)
956 return false;
957
958 /*
959 * Resolve the page directory.
960 */
961 PVBOXPD pPD = pVM->pgm.s.pGuestPDHC; /** @todo Fix PAE! */
962 Assert(pPD);
963 Assert(pPD == (PVBOXPD)MMPhysGCPhys2HCVirt(pVM, cr3 & X86_CR3_PAGE_MASK, sizeof(*pPD)));
964
965 /*
966 * Iterate mappings.
967 */
968 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
969 {
970 unsigned iPDE = pCur->GCPtr >> PGDIR_SHIFT;
971 unsigned iPT = pCur->cPTs;
972 while (iPT-- > 0)
973 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
974 && (fRawR0 || pPD->a[iPDE + iPT].n.u1User))
975 {
976 STAM_COUNTER_INC(&pVM->pgm.s.StatHCDetectedConflicts);
977 #if 1
978 Log(("PGMR3HasMappingConflicts: Conflict was detected at %VGv for mapping %s\n"
979 " iPDE=%#x iPT=%#x PDE=%VGp.\n",
980 (iPT + iPDE) << PGDIR_SHIFT, pCur->pszDesc,
981 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
982 #else
983 AssertMsgFailed(("PGMR3HasMappingConflicts: Conflict was detected at %VGv for mapping %s\n"
984 " iPDE=%#x iPT=%#x PDE=%VGp.\n",
985 (iPT + iPDE) << PGDIR_SHIFT, pCur->pszDesc,
986 iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
987 #endif
988 return true;
989 }
990 }
991
992 return false;
993}
994
995
996/**
997 * Read memory from the guest mappings.
998 *
999 * This will use the page tables associated with the mappings to
1000 * read the memory. This means that not all kind of memory is readable
1001 * since we don't necessarily know how to convert that physical address
1002 * to a HC virtual one.
1003 *
1004 * @returns VBox status.
1005 * @param pVM VM handle.
1006 * @param pvDst The destination address (HC of course).
1007 * @param GCPtrSrc The source address (GC virtual address).
1008 * @param cb Number of bytes to read.
1009 */
1010PGMR3DECL(int) PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
1011{
1012/** @todo remove this simplicity hack */
1013 /*
1014 * Simplicity over speed... Chop the request up into chunks
1015 * which don't cross pages.
1016 */
1017 if (cb + (GCPtrSrc & PAGE_OFFSET_MASK) > PAGE_SIZE)
1018 {
1019 for (;;)
1020 {
1021 unsigned cbRead = RT_MIN(cb, PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK));
1022 int rc = PGMR3MapRead(pVM, pvDst, GCPtrSrc, cbRead);
1023 if (VBOX_FAILURE(rc))
1024 return rc;
1025 cb -= cbRead;
1026 if (!cb)
1027 break;
1028 pvDst = (char *)pvDst + cbRead;
1029 GCPtrSrc += cbRead;
1030 }
1031 return VINF_SUCCESS;
1032 }
1033
1034 /*
1035 * Find the mapping.
1036 */
1037 PPGMMAPPING pCur = CTXALLSUFF(pVM->pgm.s.pMappings);
1038 while (pCur)
1039 {
1040 RTGCUINTPTR off = (RTGCUINTPTR)GCPtrSrc - (RTGCUINTPTR)pCur->GCPtr;
1041 if (off < pCur->cb)
1042 {
1043 if (off + cb > pCur->cb)
1044 {
1045 AssertMsgFailed(("Invalid page range %VGv LB%#x. mapping '%s' %VGv to %VGv\n",
1046 GCPtrSrc, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast));
1047 return VERR_INVALID_PARAMETER;
1048 }
1049
1050 unsigned iPT = off >> PGDIR_SHIFT;
1051 unsigned iPTE = (off >> PAGE_SHIFT) & PTE_MASK;
1052 while (cb > 0 && iPTE < ELEMENTS(CTXALLSUFF(pCur->aPTs[iPT].pPT)->a))
1053 {
1054 if (!CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].n.u1Present)
1055 return VERR_PAGE_NOT_PRESENT;
1056 RTHCPHYS HCPhys = CTXALLSUFF(pCur->aPTs[iPT].paPaePTs)[iPTE / 512].a[iPTE % 512].u & X86_PTE_PAE_PG_MASK;
1057
1058 /*
1059 * Get the virtual page from the physical one.
1060 */
1061 void *pvPage;
1062 int rc = MMR3HCPhys2HCVirt(pVM, HCPhys, &pvPage);
1063 if (VBOX_FAILURE(rc))
1064 return rc;
1065
1066 memcpy(pvDst, (char *)pvPage + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
1067 return VINF_SUCCESS;
1068 }
1069 }
1070
1071 /* next */
1072 pCur = CTXALLSUFF(pCur->pNext);
1073 }
1074
1075 return VERR_INVALID_POINTER;
1076}
1077
1078
1079/**
1080 * Dumps one virtual handler range.
1081 *
1082 * @returns 0
1083 * @param pNode Pointer to a PGMVIRTHANDLER.
1084 * @param pvUser Pointer to command helper functions.
1085 */
1086static DECLCALLBACK(int) pgmVirtHandlerDump(PAVLROGCPTRNODECORE pNode, void *pvUser)
1087{
1088 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1089
1090 switch (pCur->enmType)
1091 {
1092 case PGMVIRTHANDLERTYPE_EIP:
1093 RTLogPrintf("EIP %RGv-%RGv size %RGv %s\n", pCur->GCPtr, pCur->GCPtrLast, pCur->cb, pCur->pszDesc);
1094 break;
1095 case PGMVIRTHANDLERTYPE_NORMAL:
1096 RTLogPrintf("NORMAL %RGv-%RGv size %RGv %s\n", pCur->GCPtr, pCur->GCPtrLast, pCur->cb, pCur->pszDesc);
1097 break;
1098 case PGMVIRTHANDLERTYPE_WRITE:
1099 RTLogPrintf("WRITE %RGv-%RGv size %RGv %s\n", pCur->GCPtr, pCur->GCPtrLast, pCur->cb, pCur->pszDesc);
1100 break;
1101 case PGMVIRTHANDLERTYPE_HYPERVISOR:
1102 RTLogPrintf("WRITEHYP %RGv-%RGv size %RGv %s\n", pCur->GCPtr, pCur->GCPtrLast, pCur->cb, pCur->pszDesc);
1103 break;
1104 case PGMVIRTHANDLERTYPE_ALL:
1105 RTLogPrintf("ALL %RGv-%RGv size %RGv %s\n", pCur->GCPtr, pCur->GCPtrLast, pCur->cb, pCur->pszDesc);
1106 break;
1107 }
1108 if (pCur->enmType != PGMVIRTHANDLERTYPE_HYPERVISOR)
1109 for (unsigned i = 0; i < pCur->cPages; i++)
1110 RTLogPrintf("Physical page %#05d %VGp\n", i, pCur->aPhysToVirt[i].Core.Key);
1111 return 0;
1112}
1113
1114/**
1115 * Dumps the current mappings to the log
1116 *
1117 * @returns VBox status.
1118 * @param pVM Pointer to the current VM (if any).
1119 *
1120 */
1121PGMR3DECL(void) PGMR3DumpMappings(PVM pVM)
1122{
1123 /*
1124 * Print message about the fixedness of the mappings and dump them.
1125 */
1126 RTLogPrintf(pVM->pgm.s.fMappingsFixed ? "\nThe mappings are FIXED.\n" : "\nThe mappings are FLOATING.\n");
1127
1128 PPGMMAPPING pCur;
1129 for (pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
1130 RTLogPrintf("%VGv - %VGv %s\n", pCur->GCPtr, pCur->GCPtrLast, pCur->pszDesc);
1131
1132/** @todo The handler stuff is totally alien here. This should be converted into a 'info' function. */
1133 RTLogPrintf("\nVirtual handlers:\n");
1134 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesHC->VirtHandlers, true, pgmVirtHandlerDump, pVM);
1135
1136 RTLogPrintf("\n"
1137 "Physical handlers: (PhysHandlers=%d (%#x))\n"
1138 "From - To (incl) HandlerHC UserHC HandlerHC UserHC HandlerGC UserGC Type Description\n",
1139 pVM->pgm.s.pTreesHC->PhysHandlers, pVM->pgm.s.pTreesHC->PhysHandlers);
1140 RTAvlroGCPhysDoWithAll(&pVM->pgm.s.pTreesHC->PhysHandlers, true, pgmR3DumpMappingsPhysicalCB, NULL);
1141}
1142
1143
1144/**
1145 * Dumps one physical handler range.
1146 *
1147 * @returns 0
1148 * @param pNode Pointer to a PGMPHYSHANDLER.
1149 * @param pvUser Ignored.
1150 */
1151static DECLCALLBACK(int) pgmR3DumpMappingsPhysicalCB(PAVLROGCPHYSNODECORE pNode, void *pvUser)
1152{
1153 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)pNode; NOREF(pvUser);
1154 const char *pszType;
1155 switch (pCur->enmType)
1156 {
1157 case PGMPHYSHANDLERTYPE_MMIO: pszType = "MMIO "; break;
1158 case PGMPHYSHANDLERTYPE_PHYSICAL: pszType = "Natural"; break;
1159 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE: pszType = "Write "; break;
1160 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL: pszType = "All "; break;
1161 default: pszType = "????"; break;
1162 }
1163 RTLogPrintf("%VGp - %VGp %VHv %VHv %VHv %VHv %VGv %VGv %s %s\n",
1164 pCur->Core.Key, pCur->Core.KeyLast, pCur->pfnHandlerR3, pCur->pvUserR3, pCur->pfnHandlerR0, pCur->pvUserR0,
1165 pCur->pfnHandlerGC, pCur->pvUserGC, pszType, pCur->pszDesc);
1166 return 0;
1167}
1168
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette