VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 24125

Last change on this file since 24125 was 23121, checked in by vboxsync, 15 years ago

Paging updates:

  • use the dirty page handling after fewer writes
  • don't always invalidate PTEs in pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs; just flipping the X86_PTE_W bit is sufficient
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 67.5 KB
Line 
1/* $Id: PGMAllHandler.cpp 23121 2009-09-18 11:12:52Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/dbgf.h>
28#include <VBox/pgm.h>
29#include <VBox/iom.h>
30#include <VBox/mm.h>
31#include <VBox/em.h>
32#include <VBox/stam.h>
33#include <VBox/rem.h>
34#include <VBox/dbgf.h>
35#include <VBox/rem.h>
36#include "PGMInternal.h"
37#include <VBox/vm.h>
38
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <VBox/selm.h>
46
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam);
52static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur);
53static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur);
54
55
56
57/**
58 * Register a access handler for a physical range.
59 *
60 * @returns VBox status code.
61 * @retval VINF_SUCCESS when successfully installed.
62 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
63 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
64 * flagged together with a pool clearing.
65 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
66 * one. A debug assertion is raised.
67 *
68 * @param pVM VM Handle.
69 * @param enmType Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
70 * @param GCPhys Start physical address.
71 * @param GCPhysLast Last physical address. (inclusive)
72 * @param pfnHandlerR3 The R3 handler.
73 * @param pvUserR3 User argument to the R3 handler.
74 * @param pfnHandlerR0 The R0 handler.
75 * @param pvUserR0 User argument to the R0 handler.
76 * @param pfnHandlerRC The RC handler.
77 * @param pvUserRC User argument to the RC handler. This can be a value
78 * less that 0x10000 or a (non-null) pointer that is
79 * automatically relocatated.
80 * @param pszDesc Pointer to description string. This must not be freed.
81 */
82VMMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
83 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
84 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
85 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
86 R3PTRTYPE(const char *) pszDesc)
87{
88 Log(("PGMHandlerPhysicalRegisterEx: enmType=%d GCPhys=%RGp GCPhysLast=%RGp pfnHandlerR3=%RHv pvUserR3=%RHv pfnHandlerR0=%RHv pvUserR0=%RHv pfnHandlerGC=%RRv pvUserGC=%RRv pszDesc=%s\n",
89 enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3, pfnHandlerR0, pvUserR0, pfnHandlerRC, pvUserRC, R3STRING(pszDesc)));
90
91 /*
92 * Validate input.
93 */
94 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
95 switch (enmType)
96 {
97 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
98 break;
99 case PGMPHYSHANDLERTYPE_MMIO:
100 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
101 /* Simplification in PGMPhysRead among other places. */
102 AssertMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
103 AssertMsgReturn((GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
104 break;
105 default:
106 AssertMsgFailed(("Invalid input enmType=%d!\n", enmType));
107 return VERR_INVALID_PARAMETER;
108 }
109 AssertMsgReturn( (RTRCUINTPTR)pvUserRC < 0x10000
110 || MMHyperR3ToRC(pVM, MMHyperRCToR3(pVM, pvUserRC)) == pvUserRC,
111 ("Not RC pointer! pvUserRC=%RRv\n", pvUserRC),
112 VERR_INVALID_PARAMETER);
113 AssertMsgReturn( (RTR0UINTPTR)pvUserR0 < 0x10000
114 || MMHyperR3ToR0(pVM, MMHyperR0ToR3(pVM, pvUserR0)) == pvUserR0,
115 ("Not R0 pointer! pvUserR0=%RHv\n", pvUserR0),
116 VERR_INVALID_PARAMETER);
117 AssertPtrReturn(pfnHandlerR3, VERR_INVALID_POINTER);
118 AssertReturn(pfnHandlerR0, VERR_INVALID_PARAMETER);
119 AssertReturn(pfnHandlerRC, VERR_INVALID_PARAMETER);
120
121 /*
122 * We require the range to be within registered ram.
123 * There is no apparent need to support ranges which cover more than one ram range.
124 */
125 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
126 while (pRam && GCPhys > pRam->GCPhysLast)
127 pRam = pRam->CTX_SUFF(pNext);
128 if ( !pRam
129 || GCPhysLast < pRam->GCPhys
130 || GCPhys > pRam->GCPhysLast)
131 {
132#ifdef IN_RING3
133 DBGFR3Info(pVM, "phys", NULL, NULL);
134#endif
135 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
136 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
137 }
138
139 /*
140 * Allocate and initialize the new entry.
141 */
142 PPGMPHYSHANDLER pNew;
143 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
144 if (RT_FAILURE(rc))
145 return rc;
146
147 pNew->Core.Key = GCPhys;
148 pNew->Core.KeyLast = GCPhysLast;
149 pNew->enmType = enmType;
150 pNew->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
151 pNew->pfnHandlerR3 = pfnHandlerR3;
152 pNew->pvUserR3 = pvUserR3;
153 pNew->pfnHandlerR0 = pfnHandlerR0;
154 pNew->pvUserR0 = pvUserR0;
155 pNew->pfnHandlerRC = pfnHandlerRC;
156 pNew->pvUserRC = pvUserRC;
157 pNew->pszDesc = pszDesc;
158
159 pgmLock(pVM);
160
161 /*
162 * Try insert into list.
163 */
164 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core))
165 {
166 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam);
167 if (rc == VINF_PGM_SYNC_CR3)
168 rc = VINF_PGM_GCPHYS_ALIASED;
169 pgmUnlock(pVM);
170 PGM_INVL_ALL_VCPU_TLBS(pVM);
171#ifndef IN_RING3
172 REMNotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
173#else
174 REMR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
175#endif
176 if (rc != VINF_SUCCESS)
177 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
178 return rc;
179 }
180
181 pgmUnlock(pVM);
182
183#if defined(IN_RING3) && defined(VBOX_STRICT)
184 DBGFR3Info(pVM, "handlers", "phys nostats", NULL);
185#endif
186 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s\n", GCPhys, GCPhysLast, pszDesc));
187 MMHyperFree(pVM, pNew);
188 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
189}
190
191
192/**
193 * Sets ram range flags and attempts updating shadow PTs.
194 *
195 * @returns VBox status code.
196 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
197 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
198 * the guest page aliased or/and mapped by multiple PTs. FFs set.
199 * @param pVM The VM handle.
200 * @param pCur The physical handler.
201 * @param pRam The RAM range.
202 */
203static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
204{
205 /*
206 * Iterate the guest ram pages updating the flags and flushing PT entries
207 * mapping the page.
208 */
209 bool fFlushTLBs = false;
210 int rc = VINF_SUCCESS;
211 const unsigned uState = pgmHandlerPhysicalCalcState(pCur);
212 uint32_t cPages = pCur->cPages;
213 uint32_t i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
214 for (;;)
215 {
216 PPGMPAGE pPage = &pRam->aPages[i];
217 AssertMsg(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO || PGM_PAGE_IS_MMIO(pPage),
218 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << PAGE_SHIFT), pPage));
219
220 /* Only do upgrades. */
221 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
222 {
223 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
224
225 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, pPage, false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
226 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
227 rc = rc2;
228 }
229
230 /* next */
231 if (--cPages == 0)
232 break;
233 i++;
234 }
235
236 if (fFlushTLBs && rc == VINF_SUCCESS)
237 {
238 PGM_INVL_ALL_VCPU_TLBS(pVM);
239 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs\n"));
240 }
241 else
242 {
243 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc\n", rc));
244 }
245 return rc;
246}
247
248
249/**
250 * Register a physical page access handler.
251 *
252 * @returns VBox status code.
253 * @param pVM VM Handle.
254 * @param GCPhys Start physical address.
255 */
256VMMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
257{
258 /*
259 * Find the handler.
260 */
261 pgmLock(pVM);
262 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
263 if (pCur)
264 {
265 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
266 pCur->Core.Key, pCur->Core.KeyLast, R3STRING(pCur->pszDesc)));
267
268 /*
269 * Clear the page bits and notify the REM about this change.
270 */
271 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
272 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
273 MMHyperFree(pVM, pCur);
274 pgmUnlock(pVM);
275 PGM_INVL_ALL_VCPU_TLBS(pVM);
276 return VINF_SUCCESS;
277 }
278 pgmUnlock(pVM);
279
280 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
281 return VERR_PGM_HANDLER_NOT_FOUND;
282}
283
284
285/**
286 * Shared code with modify.
287 */
288static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur)
289{
290 RTGCPHYS GCPhysStart = pCur->Core.Key;
291 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
292
293 /*
294 * Page align the range.
295 *
296 * Since we've reset (recalculated) the physical handler state of all pages
297 * we can make use of the page states to figure out whether a page should be
298 * included in the REM notification or not.
299 */
300 if ( (pCur->Core.Key & PAGE_OFFSET_MASK)
301 || ((pCur->Core.KeyLast + 1) & PAGE_OFFSET_MASK))
302 {
303 Assert(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO);
304
305 if (GCPhysStart & PAGE_OFFSET_MASK)
306 {
307 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysStart);
308 if ( pPage
309 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
310 {
311 RTGCPHYS GCPhys = (GCPhysStart + (PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
312 if ( GCPhys > GCPhysLast
313 || GCPhys < GCPhysStart)
314 return;
315 GCPhysStart = GCPhys;
316 }
317 else
318 GCPhysStart &= X86_PTE_PAE_PG_MASK;
319 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
320 }
321
322 if (GCPhysLast & PAGE_OFFSET_MASK)
323 {
324 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysLast);
325 if ( pPage
326 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
327 {
328 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
329 if ( GCPhys < GCPhysStart
330 || GCPhys > GCPhysLast)
331 return;
332 GCPhysLast = GCPhys;
333 }
334 else
335 GCPhysLast |= PAGE_OFFSET_MASK;
336 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
337 }
338 }
339
340 /*
341 * Tell REM.
342 */
343 const bool fRestoreAsRAM = pCur->pfnHandlerR3
344 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
345#ifndef IN_RING3
346 REMNotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
347#else
348 REMR3NotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
349#endif
350}
351
352
353/**
354 * pgmHandlerPhysicalResetRamFlags helper that checks for
355 * other handlers on edge pages.
356 */
357DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PPGM pPGM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
358{
359 /*
360 * Look for other handlers.
361 */
362 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
363 for (;;)
364 {
365 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, GCPhys, fAbove);
366 if ( !pCur
367 || ((fAbove ? pCur->Core.Key : pCur->Core.KeyLast) >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
368 break;
369 unsigned uThisState = pgmHandlerPhysicalCalcState(pCur);
370 uState = RT_MAX(uState, uThisState);
371
372 /* next? */
373 RTGCPHYS GCPhysNext = fAbove
374 ? pCur->Core.KeyLast + 1
375 : pCur->Core.Key - 1;
376 if ((GCPhysNext >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
377 break;
378 GCPhys = GCPhysNext;
379 }
380
381 /*
382 * Update if we found something that is a higher priority
383 * state than the current.
384 */
385 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
386 {
387 PPGMPAGE pPage;
388 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
389 if ( RT_SUCCESS(rc)
390 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
391 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
392 else
393 AssertRC(rc);
394 }
395}
396
397
398/**
399 * Resets an aliased page.
400 *
401 * @param pVM The VM.
402 * @param pPage The page.
403 * @param GCPhysPage The page address in case it comes in handy.
404 */
405void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
406{
407 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO);
408 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
409
410 /*
411 * Flush any shadow page table references *first*.
412 */
413 bool fFlushTLBs = false;
414 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
415 AssertLogRelRCReturnVoid(rc);
416# ifdef IN_RC
417 if (fFlushTLBs && rc != VINF_PGM_SYNC_CR3)
418 PGM_INVL_VCPU_TLBS(VMMGetCpu0(pVM));
419# else
420 HWACCMFlushTLBOnAllVCpus(pVM);
421# endif
422
423 /*
424 * Make it an MMIO/Zero page.
425 */
426 PGM_PAGE_SET_HCPHYS(pPage, pVM->pgm.s.HCPhysZeroPg);
427 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO);
428 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
429 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
430 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
431
432 NOREF(GCPhysPage);
433}
434
435
436/**
437 * Resets ram range flags.
438 *
439 * @returns VBox status code.
440 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
441 * @param pVM The VM handle.
442 * @param pCur The physical handler.
443 *
444 * @remark We don't start messing with the shadow page tables, as we've already got code
445 * in Trap0e which deals with out of sync handler flags (originally conceived for
446 * global pages).
447 */
448static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur)
449{
450 /*
451 * Iterate the guest ram pages updating the state.
452 */
453 RTUINT cPages = pCur->cPages;
454 RTGCPHYS GCPhys = pCur->Core.Key;
455 PPGMRAMRANGE pRamHint = NULL;
456 PPGM pPGM = &pVM->pgm.s;
457 for (;;)
458 {
459 PPGMPAGE pPage;
460 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, &pRamHint);
461 if (RT_SUCCESS(rc))
462 {
463 /* Reset MMIO2 for MMIO pages to MMIO, since this aliasing is our business.
464 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
465 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
466 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys);
467 AssertMsg(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO || PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", GCPhys, pPage));
468 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
469 }
470 else
471 AssertRC(rc);
472
473 /* next */
474 if (--cPages == 0)
475 break;
476 GCPhys += PAGE_SIZE;
477 }
478
479 /*
480 * Check for partial start and end pages.
481 */
482 if (pCur->Core.Key & PAGE_OFFSET_MASK)
483 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.Key - 1, false /* fAbove */, &pRamHint);
484 if ((pCur->Core.KeyLast & PAGE_OFFSET_MASK) != PAGE_SIZE - 1)
485 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.KeyLast + 1, true /* fAbove */, &pRamHint);
486}
487
488
489/**
490 * Modify a physical page access handler.
491 *
492 * Modification can only be done to the range it self, not the type or anything else.
493 *
494 * @returns VBox status code.
495 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
496 * and a new registration must be performed!
497 * @param pVM VM handle.
498 * @param GCPhysCurrent Current location.
499 * @param GCPhys New location.
500 * @param GCPhysLast New last location.
501 */
502VMMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
503{
504 /*
505 * Remove it.
506 */
507 int rc;
508 pgmLock(pVM);
509 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
510 if (pCur)
511 {
512 /*
513 * Clear the ram flags. (We're gonna move or free it!)
514 */
515 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
516 const bool fRestoreAsRAM = pCur->pfnHandlerR3
517 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
518
519 /*
520 * Validate the new range, modify and reinsert.
521 */
522 if (GCPhysLast >= GCPhys)
523 {
524 /*
525 * We require the range to be within registered ram.
526 * There is no apparent need to support ranges which cover more than one ram range.
527 */
528 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
529 while (pRam && GCPhys > pRam->GCPhysLast)
530 pRam = pRam->CTX_SUFF(pNext);
531 if ( pRam
532 && GCPhys <= pRam->GCPhysLast
533 && GCPhysLast >= pRam->GCPhys)
534 {
535 pCur->Core.Key = GCPhys;
536 pCur->Core.KeyLast = GCPhysLast;
537 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> PAGE_SHIFT;
538
539 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
540 {
541 PGMPHYSHANDLERTYPE enmType = pCur->enmType;
542 RTGCPHYS GCPhysLast = pCur->Core.KeyLast - GCPhys + 1;
543 bool fHasHCHandler = !!pCur->pfnHandlerR3;
544
545 /*
546 * Set ram flags, flush shadow PT entries and finally tell REM about this.
547 */
548 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
549 pgmUnlock(pVM);
550
551#ifndef IN_RING3
552 REMNotifyHandlerPhysicalModify(pVM, enmType, GCPhysCurrent, GCPhys,
553 GCPhysLast - GCPhys + 1, fHasHCHandler, fRestoreAsRAM);
554#else
555 REMR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysCurrent, GCPhys,
556 GCPhysLast, fHasHCHandler, fRestoreAsRAM);
557#endif
558 PGM_INVL_ALL_VCPU_TLBS(pVM);
559 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
560 GCPhysCurrent, GCPhys, GCPhysLast));
561 return VINF_SUCCESS;
562 }
563
564 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
565 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
566 }
567 else
568 {
569 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
570 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
571 }
572 }
573 else
574 {
575 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
576 rc = VERR_INVALID_PARAMETER;
577 }
578
579 /*
580 * Invalid new location, free it.
581 * We've only gotta notify REM and free the memory.
582 */
583 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
584 MMHyperFree(pVM, pCur);
585 }
586 else
587 {
588 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
589 rc = VERR_PGM_HANDLER_NOT_FOUND;
590 }
591
592 pgmUnlock(pVM);
593 return rc;
594}
595
596
597/**
598 * Changes the callbacks associated with a physical access handler.
599 *
600 * @returns VBox status code.
601 * @param pVM VM Handle.
602 * @param GCPhys Start physical address.
603 * @param pfnHandlerR3 The R3 handler.
604 * @param pvUserR3 User argument to the R3 handler.
605 * @param pfnHandlerR0 The R0 handler.
606 * @param pvUserR0 User argument to the R0 handler.
607 * @param pfnHandlerRC The RC handler.
608 * @param pvUserRC User argument to the RC handler. Values larger or
609 * equal to 0x10000 will be relocated automatically.
610 * @param pszDesc Pointer to description string. This must not be freed.
611 */
612VMMDECL(int) PGMHandlerPhysicalChangeCallbacks(PVM pVM, RTGCPHYS GCPhys,
613 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
614 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
615 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
616 R3PTRTYPE(const char *) pszDesc)
617{
618 /*
619 * Get the handler.
620 */
621 int rc = VINF_SUCCESS;
622 pgmLock(pVM);
623 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
624 if (pCur)
625 {
626 /*
627 * Change callbacks.
628 */
629 pCur->pfnHandlerR3 = pfnHandlerR3;
630 pCur->pvUserR3 = pvUserR3;
631 pCur->pfnHandlerR0 = pfnHandlerR0;
632 pCur->pvUserR0 = pvUserR0;
633 pCur->pfnHandlerRC = pfnHandlerRC;
634 pCur->pvUserRC = pvUserRC;
635 pCur->pszDesc = pszDesc;
636 }
637 else
638 {
639 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
640 rc = VERR_PGM_HANDLER_NOT_FOUND;
641 }
642
643 pgmUnlock(pVM);
644 return rc;
645}
646
647
648/**
649 * Splits a physical access handler in two.
650 *
651 * @returns VBox status code.
652 * @param pVM VM Handle.
653 * @param GCPhys Start physical address of the handler.
654 * @param GCPhysSplit The split address.
655 */
656VMMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
657{
658 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
659
660 /*
661 * Do the allocation without owning the lock.
662 */
663 PPGMPHYSHANDLER pNew;
664 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
665 if (RT_FAILURE(rc))
666 return rc;
667
668 /*
669 * Get the handler.
670 */
671 pgmLock(pVM);
672 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
673 if (RT_LIKELY(pCur))
674 {
675 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
676 {
677 /*
678 * Create new handler node for the 2nd half.
679 */
680 *pNew = *pCur;
681 pNew->Core.Key = GCPhysSplit;
682 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
683
684 pCur->Core.KeyLast = GCPhysSplit - 1;
685 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
686
687 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
688 {
689 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
690 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
691 pgmUnlock(pVM);
692 return VINF_SUCCESS;
693 }
694 AssertMsgFailed(("whu?\n"));
695 rc = VERR_INTERNAL_ERROR;
696 }
697 else
698 {
699 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
700 rc = VERR_INVALID_PARAMETER;
701 }
702 }
703 else
704 {
705 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
706 rc = VERR_PGM_HANDLER_NOT_FOUND;
707 }
708 pgmUnlock(pVM);
709 MMHyperFree(pVM, pNew);
710 return rc;
711}
712
713
714/**
715 * Joins up two adjacent physical access handlers which has the same callbacks.
716 *
717 * @returns VBox status code.
718 * @param pVM VM Handle.
719 * @param GCPhys1 Start physical address of the first handler.
720 * @param GCPhys2 Start physical address of the second handler.
721 */
722VMMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
723{
724 /*
725 * Get the handlers.
726 */
727 int rc;
728 pgmLock(pVM);
729 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
730 if (RT_LIKELY(pCur1))
731 {
732 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
733 if (RT_LIKELY(pCur2))
734 {
735 /*
736 * Make sure that they are adjacent, and that they've got the same callbacks.
737 */
738 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
739 {
740 if (RT_LIKELY( pCur1->pfnHandlerRC == pCur2->pfnHandlerRC
741 && pCur1->pfnHandlerR0 == pCur2->pfnHandlerR0
742 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3))
743 {
744 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
745 if (RT_LIKELY(pCur3 == pCur2))
746 {
747 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
748 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
749 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
750 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
751 MMHyperFree(pVM, pCur2);
752 pgmUnlock(pVM);
753 return VINF_SUCCESS;
754 }
755
756 Assert(pCur3 == pCur2);
757 rc = VERR_INTERNAL_ERROR;
758 }
759 else
760 {
761 AssertMsgFailed(("mismatching handlers\n"));
762 rc = VERR_ACCESS_DENIED;
763 }
764 }
765 else
766 {
767 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
768 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
769 rc = VERR_INVALID_PARAMETER;
770 }
771 }
772 else
773 {
774 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
775 rc = VERR_PGM_HANDLER_NOT_FOUND;
776 }
777 }
778 else
779 {
780 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
781 rc = VERR_PGM_HANDLER_NOT_FOUND;
782 }
783 pgmUnlock(pVM);
784 return rc;
785
786}
787
788
789/**
790 * Resets any modifications to individual pages in a physical
791 * page access handler region.
792 *
793 * This is used in pair with PGMHandlerPhysicalPageTempOff() or
794 * PGMHandlerPhysicalPageAlias().
795 *
796 * @returns VBox status code.
797 * @param pVM VM Handle
798 * @param GCPhys The start address of the handler regions, i.e. what you
799 * passed to PGMR3HandlerPhysicalRegister(),
800 * PGMHandlerPhysicalRegisterEx() or
801 * PGMHandlerPhysicalModify().
802 */
803VMMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys)
804{
805 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
806 pgmLock(pVM);
807
808 /*
809 * Find the handler.
810 */
811 int rc;
812 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
813 if (RT_LIKELY(pCur))
814 {
815 /*
816 * Validate type.
817 */
818 switch (pCur->enmType)
819 {
820 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
821 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
822 case PGMPHYSHANDLERTYPE_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
823 {
824 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysHandlerReset)); /**@Todo move out of switch */
825 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
826 Assert(pRam);
827 Assert(pRam->GCPhys <= pCur->Core.Key);
828 Assert(pRam->GCPhysLast >= pCur->Core.KeyLast);
829
830 if (pCur->enmType == PGMPHYSHANDLERTYPE_MMIO)
831 {
832 /*
833 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
834 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
835 * to do that now...
836 */
837 PPGMPAGE pPage = &pRam->aPages[(pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT];
838 uint32_t cLeft = pCur->cPages;
839 while (cLeft-- > 0)
840 {
841 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
842 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT));
843 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
844 pPage++;
845 }
846 }
847 else
848 {
849 /*
850 * Set the flags and flush shadow PT entries.
851 */
852 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
853 PGM_INVL_ALL_VCPU_TLBS(pVM);
854 }
855
856 rc = VINF_SUCCESS;
857 break;
858 }
859
860 /*
861 * Invalid.
862 */
863 default:
864 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCur->enmType));
865 rc = VERR_INTERNAL_ERROR;
866 break;
867 }
868 }
869 else
870 {
871 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
872 rc = VERR_PGM_HANDLER_NOT_FOUND;
873 }
874
875 pgmUnlock(pVM);
876 return rc;
877}
878
879
880/**
881 * Temporarily turns off the access monitoring of a page within a monitored
882 * physical write/all page access handler region.
883 *
884 * Use this when no further \#PFs are required for that page. Be aware that
885 * a page directory sync might reset the flags, and turn on access monitoring
886 * for the page.
887 *
888 * The caller must do required page table modifications.
889 *
890 * @returns VBox status code.
891 * @param pVM VM Handle
892 * @param GCPhys The start address of the access handler. This
893 * must be a fully page aligned range or we risk
894 * messing up other handlers installed for the
895 * start and end pages.
896 * @param GCPhysPage The physical address of the page to turn off
897 * access monitoring for.
898 */
899VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
900{
901 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhys=%RGp\n", GCPhys));
902
903 pgmLock(pVM);
904 /*
905 * Validate the range.
906 */
907 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
908 if (RT_LIKELY(pCur))
909 {
910 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
911 && GCPhysPage <= pCur->Core.KeyLast))
912 {
913 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
914 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
915
916 AssertReturnStmt( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
917 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL,
918 pgmUnlock(pVM), VERR_ACCESS_DENIED);
919
920 /*
921 * Change the page status.
922 */
923 PPGMPAGE pPage;
924 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
925 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
926 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
927 pgmUnlock(pVM);
928 return VINF_SUCCESS;
929 }
930 pgmUnlock(pVM);
931 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
932 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
933 return VERR_INVALID_PARAMETER;
934 }
935 pgmUnlock(pVM);
936 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
937 return VERR_PGM_HANDLER_NOT_FOUND;
938}
939
940
941/**
942 * Replaces an MMIO page with an MMIO2 page.
943 *
944 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
945 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
946 * backing, the caller must provide a replacement page. For various reasons the
947 * replacement page must be an MMIO2 page.
948 *
949 * The caller must do required page table modifications. You can get away
950 * without making any modifations since it's an MMIO page, the cost is an extra
951 * \#PF which will the resync the page.
952 *
953 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
954 *
955 * The caller may still get handler callback even after this call and must be
956 * able to deal correctly with such calls. The reason for these callbacks are
957 * either that we're executing in the recompiler (which doesn't know about this
958 * arrangement) or that we've been restored from saved state (where we won't
959 * save the change).
960 *
961 * @returns VBox status code.
962 * @param pVM The VM handle
963 * @param GCPhys The start address of the access handler. This
964 * must be a fully page aligned range or we risk
965 * messing up other handlers installed for the
966 * start and end pages.
967 * @param GCPhysPage The physical address of the page to turn off
968 * access monitoring for.
969 * @param GCPhysPageRemap The physical address of the MMIO2 page that
970 * serves as backing memory.
971 *
972 * @remark May cause a page pool flush if used on a page that is already
973 * aliased.
974 *
975 * @note This trick does only work reliably if the two pages are never ever
976 * mapped in the same page table. If they are the page pool code will
977 * be confused should either of them be flushed. See the special case
978 * of zero page aliasing mentioned in #3170.
979 *
980 */
981VMMDECL(int) PGMHandlerPhysicalPageAlias(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTGCPHYS GCPhysPageRemap)
982{
983/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
984
985 pgmLock(pVM);
986 /*
987 * Lookup and validate the range.
988 */
989 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
990 if (RT_LIKELY(pCur))
991 {
992 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
993 && GCPhysPage <= pCur->Core.KeyLast))
994 {
995 AssertReturnStmt(pCur->enmType == PGMPHYSHANDLERTYPE_MMIO, pgmUnlock(pVM), VERR_ACCESS_DENIED);
996 AssertReturnStmt(!(pCur->Core.Key & PAGE_OFFSET_MASK), pgmUnlock(pVM), VERR_INVALID_PARAMETER);
997 AssertReturnStmt((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, pgmUnlock(pVM), VERR_INVALID_PARAMETER);
998
999 /*
1000 * Get and validate the two pages.
1001 */
1002 PPGMPAGE pPageRemap;
1003 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPageRemap, &pPageRemap);
1004 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
1005 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1006 ("GCPhysPageRemap=%RGp %R[pgmpage]\n", GCPhysPageRemap, pPageRemap),
1007 pgmUnlock(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1008
1009 PPGMPAGE pPage;
1010 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
1011 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
1012 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1013 {
1014 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1015 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1016 VERR_PGM_PHYS_NOT_MMIO2);
1017 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1018 {
1019 pgmUnlock(pVM);
1020 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1021 }
1022
1023 /*
1024 * The page is already mapped as some other page, reset it
1025 * to an MMIO/ZERO page before doing the new mapping.
1026 */
1027 Log(("PGMHandlerPhysicalPageAlias: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1028 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1029 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage);
1030 }
1031 Assert(PGM_PAGE_IS_ZERO(pPage));
1032
1033 /*
1034 * Do the actual remapping here.
1035 * This page now serves as an alias for the backing memory specified.
1036 */
1037 LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RGp (%R[pgmpage])\n",
1038 GCPhysPage, pPage, GCPhysPageRemap, pPageRemap ));
1039 PGM_PAGE_SET_HCPHYS(pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1040 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1041 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1042 PGM_PAGE_SET_PAGEID(pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1043 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1044 LogFlow(("PGMHandlerPhysicalPageAlias: => %R[pgmpage]\n", pPage));
1045
1046 pgmUnlock(pVM);
1047 return VINF_SUCCESS;
1048 }
1049
1050 pgmUnlock(pVM);
1051 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1052 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1053 return VERR_INVALID_PARAMETER;
1054 }
1055
1056 pgmUnlock(pVM);
1057 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1058 return VERR_PGM_HANDLER_NOT_FOUND;
1059}
1060
1061/**
1062 * Replaces an MMIO page with an arbitrary HC page.
1063 *
1064 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1065 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1066 * backing, the caller must provide a replacement page. For various reasons the
1067 * replacement page must be an MMIO2 page.
1068 *
1069 * The caller must do required page table modifications. You can get away
1070 * without making any modifations since it's an MMIO page, the cost is an extra
1071 * \#PF which will the resync the page.
1072 *
1073 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1074 *
1075 * The caller may still get handler callback even after this call and must be
1076 * able to deal correctly with such calls. The reason for these callbacks are
1077 * either that we're executing in the recompiler (which doesn't know about this
1078 * arrangement) or that we've been restored from saved state (where we won't
1079 * save the change).
1080 *
1081 * @returns VBox status code.
1082 * @param pVM The VM handle
1083 * @param GCPhys The start address of the access handler. This
1084 * must be a fully page aligned range or we risk
1085 * messing up other handlers installed for the
1086 * start and end pages.
1087 * @param GCPhysPage The physical address of the page to turn off
1088 * access monitoring for.
1089 * @param HCPhysPageRemap The physical address of the HC page that
1090 * serves as backing memory.
1091 *
1092 * @remark May cause a page pool flush if used on a page that is already
1093 * aliased.
1094 */
1095VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1096{
1097/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1098
1099 /*
1100 * Lookup and validate the range.
1101 */
1102 pgmLock(pVM);
1103 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1104 if (RT_LIKELY(pCur))
1105 {
1106 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1107 && GCPhysPage <= pCur->Core.KeyLast))
1108 {
1109 AssertReturnStmt(pCur->enmType == PGMPHYSHANDLERTYPE_MMIO, pgmUnlock(pVM), VERR_ACCESS_DENIED);
1110 AssertReturnStmt(!(pCur->Core.Key & PAGE_OFFSET_MASK), pgmUnlock(pVM), VERR_INVALID_PARAMETER);
1111 AssertReturnStmt((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, pgmUnlock(pVM), VERR_INVALID_PARAMETER);
1112
1113 /*
1114 * Get and validate the pages.
1115 */
1116 PPGMPAGE pPage;
1117 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
1118 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
1119 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1120 {
1121 pgmUnlock(pVM);
1122 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1123 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1124 VERR_PGM_PHYS_NOT_MMIO2);
1125 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1126 }
1127 Assert(PGM_PAGE_IS_ZERO(pPage));
1128
1129 /*
1130 * Do the actual remapping here.
1131 * This page now serves as an alias for the backing memory specified.
1132 */
1133 LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RHp\n",
1134 GCPhysPage, pPage, HCPhysPageRemap));
1135 PGM_PAGE_SET_HCPHYS(pPage, HCPhysPageRemap);
1136 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1137 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1138 /** @todo hack alert
1139 * This needs to be done properly. Currently we get away with it as the recompiler directly calls
1140 * IOM read and write functions. Access through PGMPhysRead/Write will crash the process.
1141 */
1142 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
1143 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1144 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
1145 pgmUnlock(pVM);
1146 return VINF_SUCCESS;
1147 }
1148 pgmUnlock(pVM);
1149 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1150 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1151 return VERR_INVALID_PARAMETER;
1152 }
1153 pgmUnlock(pVM);
1154
1155 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1156 return VERR_PGM_HANDLER_NOT_FOUND;
1157}
1158
1159
1160/**
1161 * Checks if a physical range is handled
1162 *
1163 * @returns boolean
1164 * @param pVM VM Handle.
1165 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1166 * @remarks Caller must take the PGM lock...
1167 * @threads EMT.
1168 */
1169VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys)
1170{
1171 /*
1172 * Find the handler.
1173 */
1174 pgmLock(pVM);
1175 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1176 if (pCur)
1177 {
1178 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1179 Assert( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
1180 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1181 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO);
1182 pgmUnlock(pVM);
1183 return true;
1184 }
1185 pgmUnlock(pVM);
1186 return false;
1187}
1188
1189
1190/**
1191 * Checks if it's an disabled all access handler or write access handler at the
1192 * given address.
1193 *
1194 * @returns true if it's an all access handler, false if it's a write access
1195 * handler.
1196 * @param pVM Pointer to the shared VM structure.
1197 * @param GCPhys The address of the page with a disabled handler.
1198 *
1199 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
1200 */
1201bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys)
1202{
1203 pgmLock(pVM);
1204 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1205 if (!pCur)
1206 {
1207 pgmUnlock(pVM);
1208 AssertFailed();
1209 return true;
1210 }
1211 Assert( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
1212 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1213 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO); /* sanity */
1214 /* Only whole pages can be disabled. */
1215 Assert( pCur->Core.Key <= (GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK)
1216 && pCur->Core.KeyLast >= (GCPhys | PAGE_OFFSET_MASK));
1217
1218 bool bRet = pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE;
1219 pgmUnlock(pVM);
1220 return bRet;
1221}
1222
1223
1224/**
1225 * Check if particular guest's VA is being monitored.
1226 *
1227 * @returns true or false
1228 * @param pVM VM handle.
1229 * @param GCPtr Virtual address.
1230 * @remarks Will acquire the PGM lock.
1231 * @threads Any.
1232 */
1233VMMDECL(bool) PGMHandlerVirtualIsRegistered(PVM pVM, RTGCPTR GCPtr)
1234{
1235 pgmLock(pVM);
1236 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, GCPtr);
1237 pgmUnlock(pVM);
1238
1239 return pCur != NULL;
1240}
1241
1242
1243/**
1244 * Search for virtual handler with matching physical address
1245 *
1246 * @returns VBox status code
1247 * @param pVM The VM handle.
1248 * @param GCPhys GC physical address to search for.
1249 * @param ppVirt Where to store the pointer to the virtual handler structure.
1250 * @param piPage Where to store the pointer to the index of the cached physical page.
1251 */
1252int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage)
1253{
1254 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1255 Assert(ppVirt);
1256
1257 pgmLock(pVM);
1258 PPGMPHYS2VIRTHANDLER pCur;
1259 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, GCPhys);
1260 if (pCur)
1261 {
1262 /* found a match! */
1263 *ppVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1264 *piPage = pCur - &(*ppVirt)->aPhysToVirt[0];
1265 pgmUnlock(pVM);
1266
1267#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1268 AssertRelease(pCur->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD);
1269#endif
1270 LogFlow(("PHYS2VIRT: found match for %RGp -> %RGv *piPage=%#x\n", GCPhys, (*ppVirt)->Core.Key, *piPage));
1271 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1272 return VINF_SUCCESS;
1273 }
1274
1275 pgmUnlock(pVM);
1276 *ppVirt = NULL;
1277 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1278 return VERR_PGM_HANDLER_NOT_FOUND;
1279}
1280
1281
1282/**
1283 * Deal with aliases in phys2virt.
1284 *
1285 * As pointed out by the various todos, this currently only deals with
1286 * aliases where the two ranges match 100%.
1287 *
1288 * @param pVM The VM handle.
1289 * @param pPhys2Virt The node we failed insert.
1290 */
1291static void pgmHandlerVirtualInsertAliased(PVM pVM, PPGMPHYS2VIRTHANDLER pPhys2Virt)
1292{
1293 /*
1294 * First find the node which is conflicting with us.
1295 */
1296 /** @todo Deal with partial overlapping. (Unlikly situation, so I'm too lazy to do anything about it now.) */
1297 /** @todo check if the current head node covers the ground we do. This is highly unlikely
1298 * and I'm too lazy to implement this now as it will require sorting the list and stuff like that. */
1299 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1300#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1301 AssertReleaseMsg(pHead != pPhys2Virt, ("%RGp-%RGp offVirtHandler=%#RX32\n",
1302 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler));
1303#endif
1304 if (RT_UNLIKELY(!pHead || pHead->Core.KeyLast != pPhys2Virt->Core.KeyLast))
1305 {
1306 /** @todo do something clever here... */
1307 LogRel(("pgmHandlerVirtualInsertAliased: %RGp-%RGp\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1308 pPhys2Virt->offNextAlias = 0;
1309 return;
1310 }
1311
1312 /*
1313 * Insert ourselves as the next node.
1314 */
1315 if (!(pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1316 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IN_TREE;
1317 else
1318 {
1319 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pHead + (pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1320 pPhys2Virt->offNextAlias = ((intptr_t)pNext - (intptr_t)pPhys2Virt)
1321 | PGMPHYS2VIRTHANDLER_IN_TREE;
1322 }
1323 pHead->offNextAlias = ((intptr_t)pPhys2Virt - (intptr_t)pHead)
1324 | (pHead->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1325 Log(("pgmHandlerVirtualInsertAliased: %RGp-%RGp offNextAlias=%#RX32\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1326}
1327
1328
1329/**
1330 * Resets one virtual handler range.
1331 *
1332 * This is called by HandlerVirtualUpdate when it has detected some kind of
1333 * problem and have started clearing the virtual handler page states (or
1334 * when there have been registration/deregistrations). For this reason this
1335 * function will only update the page status if it's lower than desired.
1336 *
1337 * @returns 0
1338 * @param pNode Pointer to a PGMVIRTHANDLER.
1339 * @param pvUser The VM handle.
1340 */
1341DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1342{
1343 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1344 PVM pVM = (PVM)pvUser;
1345
1346 Assert(PGMIsLockOwner(pVM));
1347 /*
1348 * Iterate the pages and apply the new state.
1349 */
1350 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1351 PPGMRAMRANGE pRamHint = NULL;
1352 RTGCUINTPTR offPage = ((RTGCUINTPTR)pCur->Core.Key & PAGE_OFFSET_MASK);
1353 RTGCUINTPTR cbLeft = pCur->cb;
1354 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1355 {
1356 PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1357 if (pPhys2Virt->Core.Key != NIL_RTGCPHYS)
1358 {
1359 /*
1360 * Update the page state wrt virtual handlers.
1361 */
1362 PPGMPAGE pPage;
1363 int rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, pPhys2Virt->Core.Key, &pPage, &pRamHint);
1364 if ( RT_SUCCESS(rc)
1365 && PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1366 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, uState);
1367 else
1368 AssertRC(rc);
1369
1370 /*
1371 * Need to insert the page in the Phys2Virt lookup tree?
1372 */
1373 if (pPhys2Virt->Core.KeyLast == NIL_RTGCPHYS)
1374 {
1375#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1376 AssertRelease(!pPhys2Virt->offNextAlias);
1377#endif
1378 unsigned cbPhys = cbLeft;
1379 if (cbPhys > PAGE_SIZE - offPage)
1380 cbPhys = PAGE_SIZE - offPage;
1381 else
1382 Assert(iPage == pCur->cPages - 1);
1383 pPhys2Virt->Core.KeyLast = pPhys2Virt->Core.Key + cbPhys - 1; /* inclusive */
1384 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IS_HEAD | PGMPHYS2VIRTHANDLER_IN_TREE;
1385 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core))
1386 pgmHandlerVirtualInsertAliased(pVM, pPhys2Virt);
1387#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1388 else
1389 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core,
1390 ("%RGp-%RGp offNextAlias=%#RX32\n",
1391 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1392#endif
1393 Log2(("PHYS2VIRT: Insert physical range %RGp-%RGp offNextAlias=%#RX32 %s\n",
1394 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1395 }
1396 }
1397 cbLeft -= PAGE_SIZE - offPage;
1398 offPage = 0;
1399 }
1400
1401 return 0;
1402}
1403
1404#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
1405
1406/**
1407 * Worker for pgmHandlerVirtualDumpPhysPages.
1408 *
1409 * @returns 0 (continue enumeration).
1410 * @param pNode The virtual handler node.
1411 * @param pvUser User argument, unused.
1412 */
1413static DECLCALLBACK(int) pgmHandlerVirtualDumpPhysPagesCallback(PAVLROGCPHYSNODECORE pNode, void *pvUser)
1414{
1415 PPGMPHYS2VIRTHANDLER pCur = (PPGMPHYS2VIRTHANDLER)pNode;
1416 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1417 Log(("PHYS2VIRT: Range %RGp-%RGp for virtual handler: %s\n", pCur->Core.Key, pCur->Core.KeyLast, pVirt->pszDesc));
1418 return 0;
1419}
1420
1421
1422/**
1423 * Assertion / logging helper for dumping all the
1424 * virtual handlers to the log.
1425 *
1426 * @param pVM Pointer to the shared VM structure.
1427 */
1428void pgmHandlerVirtualDumpPhysPages(PVM pVM)
1429{
1430 RTAvlroGCPhysDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, true /* from left */,
1431 pgmHandlerVirtualDumpPhysPagesCallback, 0);
1432}
1433
1434#endif /* VBOX_STRICT || LOG_ENABLED */
1435#ifdef VBOX_STRICT
1436
1437/**
1438 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1439 * and its AVL enumerators.
1440 */
1441typedef struct PGMAHAFIS
1442{
1443 /** The current physical address. */
1444 RTGCPHYS GCPhys;
1445 /** The state we've calculated. */
1446 unsigned uVirtStateFound;
1447 /** The state we're matching up to. */
1448 unsigned uVirtState;
1449 /** Number of errors. */
1450 unsigned cErrors;
1451 /** The VM handle. */
1452 PVM pVM;
1453} PGMAHAFIS, *PPGMAHAFIS;
1454
1455
1456#if 0 /* unused */
1457/**
1458 * Verify virtual handler by matching physical address.
1459 *
1460 * @returns 0
1461 * @param pNode Pointer to a PGMVIRTHANDLER.
1462 * @param pvUser Pointer to user parameter.
1463 */
1464static DECLCALLBACK(int) pgmHandlerVirtualVerifyOneByPhysAddr(PAVLROGCPTRNODECORE pNode, void *pvUser)
1465{
1466 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1467 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1468
1469 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1470 {
1471 if ((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == pState->GCPhys)
1472 {
1473 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1474 if (pState->uVirtState < uState)
1475 {
1476 error
1477 }
1478
1479 if (pState->uVirtState == uState)
1480 break; //??
1481 }
1482 }
1483 return 0;
1484}
1485#endif /* unused */
1486
1487
1488/**
1489 * Verify a virtual handler (enumeration callback).
1490 *
1491 * Called by PGMAssertHandlerAndFlagsInSync to check the sanity of all
1492 * the virtual handlers, esp. that the physical addresses matches up.
1493 *
1494 * @returns 0
1495 * @param pNode Pointer to a PGMVIRTHANDLER.
1496 * @param pvUser Pointer to a PPGMAHAFIS structure.
1497 */
1498static DECLCALLBACK(int) pgmHandlerVirtualVerifyOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1499{
1500 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)pNode;
1501 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1502 PVM pVM = pState->pVM;
1503
1504 /*
1505 * Validate the type and calc state.
1506 */
1507 switch (pVirt->enmType)
1508 {
1509 case PGMVIRTHANDLERTYPE_WRITE:
1510 case PGMVIRTHANDLERTYPE_ALL:
1511 break;
1512 default:
1513 AssertMsgFailed(("unknown/wrong enmType=%d\n", pVirt->enmType));
1514 pState->cErrors++;
1515 return 0;
1516 }
1517 const unsigned uState = pgmHandlerVirtualCalcState(pVirt);
1518
1519 /*
1520 * Check key alignment.
1521 */
1522 if ( (pVirt->aPhysToVirt[0].Core.Key & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->Core.Key & PAGE_OFFSET_MASK)
1523 && pVirt->aPhysToVirt[0].Core.Key != NIL_RTGCPHYS)
1524 {
1525 AssertMsgFailed(("virt handler phys has incorrect key! %RGp %RGv %s\n",
1526 pVirt->aPhysToVirt[0].Core.Key, pVirt->Core.Key, R3STRING(pVirt->pszDesc)));
1527 pState->cErrors++;
1528 }
1529
1530 if ( (pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->Core.KeyLast & PAGE_OFFSET_MASK)
1531 && pVirt->aPhysToVirt[pVirt->cPages - 1].Core.Key != NIL_RTGCPHYS)
1532 {
1533 AssertMsgFailed(("virt handler phys has incorrect key! %RGp %RGv %s\n",
1534 pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast, pVirt->Core.KeyLast, R3STRING(pVirt->pszDesc)));
1535 pState->cErrors++;
1536 }
1537
1538 /*
1539 * Check pages for sanity and state.
1540 */
1541 RTGCUINTPTR GCPtr = (RTGCUINTPTR)pVirt->Core.Key;
1542 for (unsigned iPage = 0; iPage < pVirt->cPages; iPage++, GCPtr += PAGE_SIZE)
1543 {
1544 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1545 {
1546 PVMCPU pVCpu = &pVM->aCpus[i];
1547
1548 RTGCPHYS GCPhysGst;
1549 uint64_t fGst;
1550 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, &fGst, &GCPhysGst);
1551 if ( rc == VERR_PAGE_NOT_PRESENT
1552 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1553 {
1554 if (pVirt->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
1555 {
1556 AssertMsgFailed(("virt handler phys out of sync. %RGp GCPhysNew=~0 iPage=%#x %RGv %s\n",
1557 pVirt->aPhysToVirt[iPage].Core.Key, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1558 pState->cErrors++;
1559 }
1560 continue;
1561 }
1562
1563 AssertRCReturn(rc, 0);
1564 if ((pVirt->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) != GCPhysGst)
1565 {
1566 AssertMsgFailed(("virt handler phys out of sync. %RGp GCPhysGst=%RGp iPage=%#x %RGv %s\n",
1567 pVirt->aPhysToVirt[iPage].Core.Key, GCPhysGst, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1568 pState->cErrors++;
1569 continue;
1570 }
1571
1572 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysGst);
1573 if (!pPage)
1574 {
1575 AssertMsgFailed(("virt handler getting ram flags. GCPhysGst=%RGp iPage=%#x %RGv %s\n",
1576 GCPhysGst, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1577 pState->cErrors++;
1578 continue;
1579 }
1580
1581 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1582 {
1583 AssertMsgFailed(("virt handler state mismatch. pPage=%R[pgmpage] GCPhysGst=%RGp iPage=%#x %RGv state=%d expected>=%d %s\n",
1584 pPage, GCPhysGst, iPage, GCPtr, PGM_PAGE_GET_HNDL_VIRT_STATE(pPage), uState, R3STRING(pVirt->pszDesc)));
1585 pState->cErrors++;
1586 continue;
1587 }
1588 } /* for each VCPU */
1589 } /* for pages in virtual mapping. */
1590
1591 return 0;
1592}
1593
1594
1595/**
1596 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1597 * that the physical addresses associated with virtual handlers are correct.
1598 *
1599 * @returns Number of mismatches.
1600 * @param pVM The VM handle.
1601 */
1602VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVM pVM)
1603{
1604 PPGM pPGM = &pVM->pgm.s;
1605 PGMAHAFIS State;
1606 State.GCPhys = 0;
1607 State.uVirtState = 0;
1608 State.uVirtStateFound = 0;
1609 State.cErrors = 0;
1610 State.pVM = pVM;
1611
1612 Assert(PGMIsLockOwner(pVM));
1613
1614 /*
1615 * Check the RAM flags against the handlers.
1616 */
1617 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges); pRam; pRam = pRam->CTX_SUFF(pNext))
1618 {
1619 const unsigned cPages = pRam->cb >> PAGE_SHIFT;
1620 for (unsigned iPage = 0; iPage < cPages; iPage++)
1621 {
1622 PGMPAGE const *pPage = &pRam->aPages[iPage];
1623 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1624 {
1625 State.GCPhys = pRam->GCPhys + (iPage << PAGE_SHIFT);
1626
1627 /*
1628 * Physical first - calculate the state based on the handlers
1629 * active on the page, then compare.
1630 */
1631 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
1632 {
1633 /* the first */
1634 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys);
1635 if (!pPhys)
1636 {
1637 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys, true);
1638 if ( pPhys
1639 && pPhys->Core.Key > (State.GCPhys + PAGE_SIZE - 1))
1640 pPhys = NULL;
1641 Assert(!pPhys || pPhys->Core.Key >= State.GCPhys);
1642 }
1643 if (pPhys)
1644 {
1645 unsigned uState = pgmHandlerPhysicalCalcState(pPhys);
1646
1647 /* more? */
1648 while (pPhys->Core.KeyLast < (State.GCPhys | PAGE_OFFSET_MASK))
1649 {
1650 PPGMPHYSHANDLER pPhys2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers,
1651 pPhys->Core.KeyLast + 1, true);
1652 if ( !pPhys2
1653 || pPhys2->Core.Key > (State.GCPhys | PAGE_OFFSET_MASK))
1654 break;
1655 unsigned uState2 = pgmHandlerPhysicalCalcState(pPhys2);
1656 uState = RT_MAX(uState, uState2);
1657 pPhys = pPhys2;
1658 }
1659
1660 /* compare.*/
1661 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1662 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1663 {
1664 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1665 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhys->pszDesc));
1666 State.cErrors++;
1667 }
1668
1669#ifdef IN_RING3
1670 /* validate that REM is handling it. */
1671 if ( !REMR3IsPageAccessHandled(pVM, State.GCPhys)
1672 /* ignore shadowed ROM for the time being. */
1673 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW)
1674 {
1675 AssertMsgFailed(("ram range vs phys handler REM mismatch. GCPhys=%RGp state=%d %s\n",
1676 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), pPhys->pszDesc));
1677 State.cErrors++;
1678 }
1679#endif
1680 }
1681 else
1682 {
1683 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1684 State.cErrors++;
1685 }
1686 }
1687
1688 /*
1689 * Virtual handlers.
1690 */
1691 if (PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage))
1692 {
1693 State.uVirtState = PGM_PAGE_GET_HNDL_VIRT_STATE(pPage);
1694#if 1
1695 /* locate all the matching physical ranges. */
1696 State.uVirtStateFound = PGM_PAGE_HNDL_VIRT_STATE_NONE;
1697 RTGCPHYS GCPhysKey = State.GCPhys;
1698 for (;;)
1699 {
1700 PPGMPHYS2VIRTHANDLER pPhys2Virt = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
1701 GCPhysKey, true /* above-or-equal */);
1702 if ( !pPhys2Virt
1703 || (pPhys2Virt->Core.Key & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1704 break;
1705
1706 /* the head */
1707 GCPhysKey = pPhys2Virt->Core.KeyLast;
1708 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1709 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1710 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1711
1712 /* any aliases */
1713 while (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1714 {
1715 pPhys2Virt = (PPGMPHYS2VIRTHANDLER)((uintptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1716 pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1717 uState = pgmHandlerVirtualCalcState(pCur);
1718 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1719 }
1720
1721 /* done? */
1722 if ((GCPhysKey & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1723 break;
1724 }
1725#else
1726 /* very slow */
1727 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOneByPhysAddr, &State);
1728#endif
1729 if (State.uVirtState != State.uVirtStateFound)
1730 {
1731 AssertMsgFailed(("ram range vs virt handler flags mismatch. GCPhys=%RGp uVirtState=%#x uVirtStateFound=%#x\n",
1732 State.GCPhys, State.uVirtState, State.uVirtStateFound));
1733 State.cErrors++;
1734 }
1735 }
1736 }
1737 } /* foreach page in ram range. */
1738 } /* foreach ram range. */
1739
1740 /*
1741 * Check that the physical addresses of the virtual handlers matches up
1742 * and that they are otherwise sane.
1743 */
1744 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOne, &State);
1745
1746 /*
1747 * Do the reverse check for physical handlers.
1748 */
1749 /** @todo */
1750
1751 return State.cErrors;
1752}
1753
1754#endif /* VBOX_STRICT */
1755
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette