VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 94800

Last change on this file since 94800 was 94800, checked in by vboxsync, 2 years ago

VMM/IEM,PGM: TLB work, esp. on the data one. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 74.9 KB
Line 
1/* $Id: PGMAllHandler.cpp 94800 2022-05-03 21:49:43Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/dbgf.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/iom.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/stam.h>
31#include <VBox/vmm/dbgf.h>
32#ifdef IN_RING0
33# include <VBox/vmm/pdmdev.h>
34#endif
35#include "PGMInternal.h"
36#include <VBox/vmm/vmcc.h>
37#include "PGMInline.h"
38
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
42# include <iprt/asm-amd64-x86.h>
43#endif
44#include <iprt/string.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47#include <VBox/vmm/selm.h>
48
49
50/*********************************************************************************************************************************
51* Global Variables *
52*********************************************************************************************************************************/
53/** Dummy physical access handler type record. */
54CTX_SUFF(PGMPHYSHANDLERTYPEINT) const g_pgmHandlerPhysicalDummyType =
55{
56 /* .hType = */ UINT64_C(0x93b7557e1937aaff),
57 /* .enmKind = */ PGMPHYSHANDLERKIND_INVALID,
58 /* .uState = */ PGM_PAGE_HNDL_PHYS_STATE_ALL,
59 /* .fKeepPgmLock = */ true,
60 /* .fRing0DevInsIdx = */ false,
61#ifdef IN_RING0
62 /* .afPadding = */ {false},
63 /* .pfnHandler = */ pgmR0HandlerPhysicalHandlerToRing3,
64 /* .pfnPfHandler = */ pgmR0HandlerPhysicalPfHandlerToRing3,
65#elif defined(IN_RING3)
66 /* .fRing0Enabled = */ false,
67 /* .pfnHandler = */ pgmR3HandlerPhysicalHandlerInvalid,
68#else
69# error "unsupported context"
70#endif
71 /* .pszDesc = */ "dummy"
72};
73
74
75/*********************************************************************************************************************************
76* Internal Functions *
77*********************************************************************************************************************************/
78static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
79 void *pvBitmap, uint32_t offBitmap);
80static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur);
81static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur);
82
83
84#ifndef IN_RING3
85
86/**
87 * @callback_method_impl{FNPGMPHYSHANDLER,
88 * Dummy for forcing ring-3 handling of the access.}
89 */
90DECLCALLBACK(VBOXSTRICTRC)
91pgmR0HandlerPhysicalHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
92 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
93{
94 RT_NOREF(pVM, pVCpu, GCPhys, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin, uUser);
95 return VINF_EM_RAW_EMULATE_INSTR;
96}
97
98
99/**
100 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
101 * Dummy for forcing ring-3 handling of the access.}
102 */
103DECLCALLBACK(VBOXSTRICTRC)
104pgmR0HandlerPhysicalPfHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
105 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
106{
107 RT_NOREF(pVM, pVCpu, uErrorCode, pRegFrame, pvFault, GCPhysFault, uUser);
108 return VINF_EM_RAW_EMULATE_INSTR;
109}
110
111#endif /* !IN_RING3 */
112
113
114/**
115 * Creates a physical access handler, allocation part.
116 *
117 * @returns VBox status code.
118 * @retval VERR_OUT_OF_RESOURCES if no more handlers available.
119 *
120 * @param pVM The cross context VM structure.
121 * @param hType The handler type registration handle.
122 * @param uUser User argument to the handlers (not pointer).
123 * @param pszDesc Description of this handler. If NULL, the type
124 * description will be used instead.
125 * @param ppPhysHandler Where to return the access handler structure on
126 * success.
127 */
128int pgmHandlerPhysicalExCreate(PVMCC pVM, PGMPHYSHANDLERTYPE hType, uint64_t uUser,
129 R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler)
130{
131 /*
132 * Validate input.
133 */
134 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
135 AssertReturn(pType, VERR_INVALID_HANDLE);
136 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
137 AssertPtr(ppPhysHandler);
138
139 Log(("pgmHandlerPhysicalExCreate: uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
140 uUser, hType, pType->enmKind, pType->pszDesc, pszDesc, R3STRING(pszDesc)));
141
142 /*
143 * Allocate and initialize the new entry.
144 */
145 int rc = PGM_LOCK(pVM);
146 AssertRCReturn(rc, rc);
147
148 PPGMPHYSHANDLER pNew = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.allocateNode();
149 if (pNew)
150 {
151 pNew->Key = NIL_RTGCPHYS;
152 pNew->KeyLast = NIL_RTGCPHYS;
153 pNew->cPages = 0;
154 pNew->cAliasedPages = 0;
155 pNew->cTmpOffPages = 0;
156 pNew->uUser = uUser;
157 pNew->hType = hType;
158 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc
159#ifdef IN_RING3
160 : pType->pszDesc;
161#else
162 : pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK].pszDesc;
163#endif
164
165 PGM_UNLOCK(pVM);
166 *ppPhysHandler = pNew;
167 return VINF_SUCCESS;
168 }
169
170 PGM_UNLOCK(pVM);
171 return VERR_OUT_OF_RESOURCES;
172}
173
174
175/**
176 * Duplicates a physical access handler.
177 *
178 * @returns VBox status code.
179 * @retval VINF_SUCCESS when successfully installed.
180 *
181 * @param pVM The cross context VM structure.
182 * @param pPhysHandlerSrc The source handler to duplicate
183 * @param ppPhysHandler Where to return the access handler structure on
184 * success.
185 */
186int pgmHandlerPhysicalExDup(PVMCC pVM, PPGMPHYSHANDLER pPhysHandlerSrc, PPGMPHYSHANDLER *ppPhysHandler)
187{
188 return pgmHandlerPhysicalExCreate(pVM, pPhysHandlerSrc->hType, pPhysHandlerSrc->uUser,
189 pPhysHandlerSrc->pszDesc, ppPhysHandler);
190}
191
192
193/**
194 * Register a access handler for a physical range.
195 *
196 * @returns VBox status code.
197 * @retval VINF_SUCCESS when successfully installed.
198 * @retval VINF_PGM_GCPHYS_ALIASED could be returned.
199 *
200 * @param pVM The cross context VM structure.
201 * @param pPhysHandler The physical handler.
202 * @param GCPhys Start physical address.
203 * @param GCPhysLast Last physical address. (inclusive)
204 */
205int pgmHandlerPhysicalExRegister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
206{
207 /*
208 * Validate input.
209 */
210 AssertReturn(pPhysHandler, VERR_INVALID_POINTER);
211 PGMPHYSHANDLERTYPE const hType = pPhysHandler->hType;
212 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
213 AssertReturn(pType, VERR_INVALID_HANDLE);
214 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
215
216 AssertPtr(pPhysHandler);
217
218 Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n", GCPhys, GCPhysLast,
219 hType, pType->enmKind, pType->pszDesc, pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc)));
220 AssertReturn(pPhysHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
221
222 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
223 Assert(GCPhysLast - GCPhys < _4G); /* ASSUMPTION in PGMAllPhys.cpp */
224
225 switch (pType->enmKind)
226 {
227 case PGMPHYSHANDLERKIND_WRITE:
228 break;
229 case PGMPHYSHANDLERKIND_MMIO:
230 case PGMPHYSHANDLERKIND_ALL:
231 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
232 AssertMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
233 AssertMsgReturn((GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
234 break;
235 default:
236 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
237 return VERR_INVALID_PARAMETER;
238 }
239
240 /*
241 * We require the range to be within registered ram.
242 * There is no apparent need to support ranges which cover more than one ram range.
243 */
244 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
245 if ( !pRam
246 || GCPhysLast > pRam->GCPhysLast)
247 {
248#ifdef IN_RING3
249 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
250#endif
251 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
252 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
253 }
254 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
255 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
256
257 /*
258 * Try insert into list.
259 */
260 pPhysHandler->Key = GCPhys;
261 pPhysHandler->KeyLast = GCPhysLast;
262 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
263
264 int rc = PGM_LOCK(pVM);
265 if (RT_SUCCESS(rc))
266 {
267 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pPhysHandler);
268 if (RT_SUCCESS(rc))
269 {
270 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
271 if (rc == VINF_PGM_SYNC_CR3)
272 rc = VINF_PGM_GCPHYS_ALIASED;
273
274#if defined(IN_RING3) || defined(IN_RING0)
275 NEMHCNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1);
276#endif
277 PGM_UNLOCK(pVM);
278
279 if (rc != VINF_SUCCESS)
280 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
281 return rc;
282 }
283 PGM_UNLOCK(pVM);
284 }
285
286 pPhysHandler->Key = NIL_RTGCPHYS;
287 pPhysHandler->KeyLast = NIL_RTGCPHYS;
288
289 AssertMsgReturn(rc == VERR_ALREADY_EXISTS, ("%Rrc GCPhys=%RGp GCPhysLast=%RGp\n", rc, GCPhys, GCPhysLast), rc);
290
291#if defined(IN_RING3) && defined(VBOX_STRICT)
292 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
293#endif
294 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
295 GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc)));
296 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
297}
298
299
300/**
301 * Register a access handler for a physical range.
302 *
303 * @returns VBox status code.
304 * @retval VINF_SUCCESS when successfully installed.
305 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
306 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
307 * flagged together with a pool clearing.
308 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
309 * one. A debug assertion is raised.
310 *
311 * @param pVM The cross context VM structure.
312 * @param GCPhys Start physical address.
313 * @param GCPhysLast Last physical address. (inclusive)
314 * @param hType The handler type registration handle.
315 * @param uUser User argument to the handler.
316 * @param pszDesc Description of this handler. If NULL, the type
317 * description will be used instead.
318 */
319VMMDECL(int) PGMHandlerPhysicalRegister(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPHYSHANDLERTYPE hType,
320 uint64_t uUser, R3PTRTYPE(const char *) pszDesc)
321{
322#ifdef LOG_ENABLED
323 PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
324 Log(("PGMHandlerPhysicalRegister: GCPhys=%RGp GCPhysLast=%RGp uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
325 GCPhys, GCPhysLast, uUser, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
326#endif
327
328 PPGMPHYSHANDLER pNew;
329 int rc = pgmHandlerPhysicalExCreate(pVM, hType, uUser, pszDesc, &pNew);
330 if (RT_SUCCESS(rc))
331 {
332 rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast);
333 if (RT_SUCCESS(rc))
334 return rc;
335 pgmHandlerPhysicalExDestroy(pVM, pNew);
336 }
337 return rc;
338}
339
340
341/**
342 * Sets ram range flags and attempts updating shadow PTs.
343 *
344 * @returns VBox status code.
345 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
346 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
347 * the guest page aliased or/and mapped by multiple PTs. FFs set.
348 * @param pVM The cross context VM structure.
349 * @param pCur The physical handler.
350 * @param pRam The RAM range.
351 * @param pvBitmap Dirty bitmap. Optional.
352 * @param offBitmap Dirty bitmap offset.
353 */
354static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
355 void *pvBitmap, uint32_t offBitmap)
356{
357 /*
358 * Iterate the guest ram pages updating the flags and flushing PT entries
359 * mapping the page.
360 */
361 bool fFlushTLBs = false;
362 int rc = VINF_SUCCESS;
363 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
364 const unsigned uState = pCurType->uState;
365 uint32_t cPages = pCur->cPages;
366 uint32_t i = (pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
367 for (;;)
368 {
369 PPGMPAGE pPage = &pRam->aPages[i];
370 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage),
371 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << GUEST_PAGE_SHIFT), pPage));
372
373 /* Only do upgrades. */
374 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
375 {
376 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
377
378 const RTGCPHYS GCPhysPage = pRam->GCPhys + (i << GUEST_PAGE_SHIFT);
379 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage,
380 false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
381 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
382 rc = rc2;
383
384#ifdef VBOX_WITH_NATIVE_NEM
385 /* Tell NEM about the protection update. */
386 if (VM_IS_NEM_ENABLED(pVM))
387 {
388 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
389 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
390 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
391 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
392 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
393 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
394 }
395#endif
396 if (pvBitmap)
397 ASMBitSet(pvBitmap, offBitmap);
398 }
399
400 /* next */
401 if (--cPages == 0)
402 break;
403 i++;
404 offBitmap++;
405 }
406
407 if (fFlushTLBs)
408 {
409 PGM_INVL_ALL_VCPU_TLBS(pVM);
410 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc));
411 }
412 else
413 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));
414
415 return rc;
416}
417
418
419/**
420 * Deregister a physical page access handler.
421 *
422 * @returns VBox status code.
423 * @param pVM The cross context VM structure.
424 * @param pPhysHandler The handler to deregister (but not free).
425 */
426int pgmHandlerPhysicalExDeregister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler)
427{
428 LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s\n",
429 pPhysHandler->Key, pPhysHandler->KeyLast, R3STRING(pPhysHandler->pszDesc)));
430
431 int rc = PGM_LOCK(pVM);
432 AssertRCReturn(rc, rc);
433
434 RTGCPHYS const GCPhys = pPhysHandler->Key;
435 AssertReturnStmt(GCPhys != NIL_RTGCPHYS, PGM_UNLOCK(pVM), VERR_PGM_HANDLER_NOT_FOUND);
436
437 /*
438 * Remove the handler from the tree.
439 */
440
441 PPGMPHYSHANDLER pRemoved;
442 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
443 if (RT_SUCCESS(rc))
444 {
445 if (pRemoved == pPhysHandler)
446 {
447 /*
448 * Clear the page bits, notify the REM about this change and clear
449 * the cache.
450 */
451 pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler);
452 if (VM_IS_NEM_ENABLED(pVM))
453 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pPhysHandler);
454 pVM->pgm.s.idxLastPhysHandler = 0;
455
456 pPhysHandler->Key = NIL_RTGCPHYS;
457 pPhysHandler->KeyLast = NIL_RTGCPHYS;
458
459 PGM_UNLOCK(pVM);
460
461 return VINF_SUCCESS;
462 }
463
464 /*
465 * Both of the failure conditions here are considered internal processing
466 * errors because they can only be caused by race conditions or corruption.
467 * If we ever need to handle concurrent deregistration, we have to move
468 * the NIL_RTGCPHYS check inside the PGM lock.
469 */
470 pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pRemoved);
471 }
472
473 PGM_UNLOCK(pVM);
474
475 if (RT_FAILURE(rc))
476 AssertMsgFailed(("Didn't find range starting at %RGp in the tree! %Rrc=rc\n", GCPhys, rc));
477 else
478 AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n",
479 GCPhys, pRemoved, pPhysHandler));
480 return VERR_PGM_HANDLER_IPE_1;
481}
482
483
484/**
485 * Destroys (frees) a physical handler.
486 *
487 * The caller must deregister it before destroying it!
488 *
489 * @returns VBox status code.
490 * @param pVM The cross context VM structure.
491 * @param pHandler The handler to free. NULL if ignored.
492 */
493int pgmHandlerPhysicalExDestroy(PVMCC pVM, PPGMPHYSHANDLER pHandler)
494{
495 if (pHandler)
496 {
497 AssertPtr(pHandler);
498 AssertReturn(pHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
499
500 int rc = PGM_LOCK(pVM);
501 if (RT_SUCCESS(rc))
502 {
503 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pHandler);
504 PGM_UNLOCK(pVM);
505 }
506 return rc;
507 }
508 return VINF_SUCCESS;
509}
510
511
512/**
513 * Deregister a physical page access handler.
514 *
515 * @returns VBox status code.
516 * @param pVM The cross context VM structure.
517 * @param GCPhys Start physical address.
518 */
519VMMDECL(int) PGMHandlerPhysicalDeregister(PVMCC pVM, RTGCPHYS GCPhys)
520{
521 AssertReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
522
523 /*
524 * Find the handler.
525 */
526 int rc = PGM_LOCK(pVM);
527 AssertRCReturn(rc, rc);
528
529 PPGMPHYSHANDLER pRemoved;
530 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
531 if (RT_SUCCESS(rc))
532 {
533 Assert(pRemoved->Key == GCPhys);
534 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
535 pRemoved->Key, pRemoved->KeyLast, R3STRING(pRemoved->pszDesc)));
536
537 /*
538 * Clear the page bits, notify the REM about this change and clear
539 * the cache.
540 */
541 pgmHandlerPhysicalResetRamFlags(pVM, pRemoved);
542 if (VM_IS_NEM_ENABLED(pVM))
543 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pRemoved);
544 pVM->pgm.s.idxLastPhysHandler = 0;
545
546 pRemoved->Key = NIL_RTGCPHYS;
547 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pRemoved);
548
549 PGM_UNLOCK(pVM);
550 return rc;
551 }
552
553 PGM_UNLOCK(pVM);
554
555 if (rc == VERR_NOT_FOUND)
556 {
557 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
558 rc = VERR_PGM_HANDLER_NOT_FOUND;
559 }
560 return rc;
561}
562
563
564/**
565 * Shared code with modify.
566 */
567static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur)
568{
569#ifdef VBOX_WITH_NATIVE_NEM
570 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
571 RTGCPHYS GCPhysStart = pCur->Key;
572 RTGCPHYS GCPhysLast = pCur->KeyLast;
573
574 /*
575 * Page align the range.
576 *
577 * Since we've reset (recalculated) the physical handler state of all pages
578 * we can make use of the page states to figure out whether a page should be
579 * included in the REM notification or not.
580 */
581 if ( (pCur->Key & GUEST_PAGE_OFFSET_MASK)
582 || ((pCur->KeyLast + 1) & GUEST_PAGE_OFFSET_MASK))
583 {
584 Assert(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO);
585
586 if (GCPhysStart & GUEST_PAGE_OFFSET_MASK)
587 {
588 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart);
589 if ( pPage
590 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
591 {
592 RTGCPHYS GCPhys = (GCPhysStart + (GUEST_PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
593 if ( GCPhys > GCPhysLast
594 || GCPhys < GCPhysStart)
595 return;
596 GCPhysStart = GCPhys;
597 }
598 else
599 GCPhysStart &= X86_PTE_PAE_PG_MASK;
600 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
601 }
602
603 if (GCPhysLast & GUEST_PAGE_OFFSET_MASK)
604 {
605 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast);
606 if ( pPage
607 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
608 {
609 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
610 if ( GCPhys < GCPhysStart
611 || GCPhys > GCPhysLast)
612 return;
613 GCPhysLast = GCPhys;
614 }
615 else
616 GCPhysLast |= GUEST_PAGE_OFFSET_MASK;
617 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
618 }
619 }
620
621 /*
622 * Tell NEM.
623 */
624 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhysStart);
625 RTGCPHYS const cb = GCPhysLast - GCPhysStart + 1;
626 uint8_t u2State = UINT8_MAX;
627 NEMHCNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, cb,
628 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysStart) : NULL, &u2State);
629 if (u2State != UINT8_MAX && pRam)
630 pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhysStart - pRam->GCPhys) >> GUEST_PAGE_SHIFT],
631 cb >> GUEST_PAGE_SHIFT, u2State);
632#else
633 RT_NOREF(pVM, pCur);
634#endif
635}
636
637
638/**
639 * pgmHandlerPhysicalResetRamFlags helper that checks for other handlers on
640 * edge pages.
641 */
642DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVMCC pVM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
643{
644 /*
645 * Look for other handlers.
646 */
647 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
648 for (;;)
649 {
650 PPGMPHYSHANDLER pCur;
651 int rc;
652 if (fAbove)
653 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
654 GCPhys, &pCur);
655 else
656 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrBelow(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
657 GCPhys, &pCur);
658 if (rc == VERR_NOT_FOUND)
659 break;
660 AssertRCBreak(rc);
661 if (((fAbove ? pCur->Key : pCur->KeyLast) >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
662 break;
663 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
664 uState = RT_MAX(uState, pCurType->uState);
665
666 /* next? */
667 RTGCPHYS GCPhysNext = fAbove
668 ? pCur->KeyLast + 1
669 : pCur->Key - 1;
670 if ((GCPhysNext >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
671 break;
672 GCPhys = GCPhysNext;
673 }
674
675 /*
676 * Update if we found something that is a higher priority state than the current.
677 */
678 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
679 {
680 PPGMPAGE pPage;
681 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint);
682 if ( RT_SUCCESS(rc)
683 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
684 {
685 /* This should normally not be necessary. */
686 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
687 bool fFlushTLBs ;
688 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, false /*fFlushPTEs*/, &fFlushTLBs);
689 if (RT_SUCCESS(rc) && fFlushTLBs)
690 PGM_INVL_ALL_VCPU_TLBS(pVM);
691 else
692 AssertRC(rc);
693
694#ifdef VBOX_WITH_NATIVE_NEM
695 /* Tell NEM about the protection update. */
696 if (VM_IS_NEM_ENABLED(pVM))
697 {
698 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
699 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
700 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
701 PGM_RAMRANGE_CALC_PAGE_R3PTR(*ppRamHint, GCPhys),
702 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
703 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
704 }
705#endif
706 }
707 else
708 AssertRC(rc);
709 }
710}
711
712
713/**
714 * Resets an aliased page.
715 *
716 * @param pVM The cross context VM structure.
717 * @param pPage The page.
718 * @param GCPhysPage The page address in case it comes in handy.
719 * @param pRam The RAM range the page is associated with (for NEM
720 * notifications).
721 * @param fDoAccounting Whether to perform accounting. (Only set during
722 * reset where pgmR3PhysRamReset doesn't have the
723 * handler structure handy.)
724 */
725void pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam, bool fDoAccounting)
726{
727 Assert( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
728 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
729 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
730#ifdef VBOX_WITH_NATIVE_NEM
731 RTHCPHYS const HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
732#endif
733
734 /*
735 * Flush any shadow page table references *first*.
736 */
737 bool fFlushTLBs = false;
738 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
739 AssertLogRelRCReturnVoid(rc);
740 HMFlushTlbOnAllVCpus(pVM);
741
742 /*
743 * Make it an MMIO/Zero page.
744 */
745 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
746 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
747 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
748 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
749 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
750
751 /* Flush its TLB entry. */
752 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
753 /* Not calling IEMTlbInvalidateAllPhysicalAllCpus here as aliased pages are handled like MMIO by the IEM TLB. */
754
755 /*
756 * Do accounting for pgmR3PhysRamReset.
757 */
758 if (fDoAccounting)
759 {
760 PPGMPHYSHANDLER pHandler;
761 rc = pgmHandlerPhysicalLookup(pVM, GCPhysPage, &pHandler);
762 if (RT_SUCCESS(rc))
763 {
764 Assert(pHandler->cAliasedPages > 0);
765 pHandler->cAliasedPages--;
766 }
767 else
768 AssertMsgFailed(("rc=%Rrc GCPhysPage=%RGp\n", rc, GCPhysPage));
769 }
770
771#ifdef VBOX_WITH_NATIVE_NEM
772 /*
773 * Tell NEM about the protection change.
774 */
775 if (VM_IS_NEM_ENABLED(pVM))
776 {
777 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
778 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg,
779 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
780 NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State);
781 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
782 }
783#else
784 RT_NOREF(pRam);
785#endif
786}
787
788
789/**
790 * Resets ram range flags.
791 *
792 * @returns VBox status code.
793 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
794 * @param pVM The cross context VM structure.
795 * @param pCur The physical handler.
796 *
797 * @remark We don't start messing with the shadow page tables, as we've
798 * already got code in Trap0e which deals with out of sync handler
799 * flags (originally conceived for global pages).
800 */
801static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur)
802{
803 /*
804 * Iterate the guest ram pages updating the state.
805 */
806 RTUINT cPages = pCur->cPages;
807 RTGCPHYS GCPhys = pCur->Key;
808 PPGMRAMRANGE pRamHint = NULL;
809 for (;;)
810 {
811 PPGMPAGE pPage;
812 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
813 if (RT_SUCCESS(rc))
814 {
815 /* Reset aliased MMIO pages to MMIO, since this aliasing is our business.
816 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
817 bool fNemNotifiedAlready = false;
818 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
819 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
820 {
821 Assert(pCur->cAliasedPages > 0);
822 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, pRamHint, false /*fDoAccounting*/);
823 pCur->cAliasedPages--;
824 fNemNotifiedAlready = true;
825 }
826#ifdef VBOX_STRICT
827 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
828 AssertMsg(pCurType && (pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage)),
829 ("%RGp %R[pgmpage]\n", GCPhys, pPage));
830#endif
831 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
832
833#ifdef VBOX_WITH_NATIVE_NEM
834 /* Tell NEM about the protection change. */
835 if (VM_IS_NEM_ENABLED(pVM) && !fNemNotifiedAlready)
836 {
837 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
838 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
839 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
840 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamHint, GCPhys),
841 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
842 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
843 }
844#endif
845 RT_NOREF(fNemNotifiedAlready);
846 }
847 else
848 AssertRC(rc);
849
850 /* next */
851 if (--cPages == 0)
852 break;
853 GCPhys += GUEST_PAGE_SIZE;
854 }
855
856 pCur->cAliasedPages = 0;
857 pCur->cTmpOffPages = 0;
858
859 /*
860 * Check for partial start and end pages.
861 */
862 if (pCur->Key & GUEST_PAGE_OFFSET_MASK)
863 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Key - 1, false /* fAbove */, &pRamHint);
864 if ((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) != GUEST_PAGE_OFFSET_MASK)
865 pgmHandlerPhysicalRecalcPageState(pVM, pCur->KeyLast + 1, true /* fAbove */, &pRamHint);
866}
867
868
869#if 0 /* unused */
870/**
871 * Modify a physical page access handler.
872 *
873 * Modification can only be done to the range it self, not the type or anything else.
874 *
875 * @returns VBox status code.
876 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
877 * and a new registration must be performed!
878 * @param pVM The cross context VM structure.
879 * @param GCPhysCurrent Current location.
880 * @param GCPhys New location.
881 * @param GCPhysLast New last location.
882 */
883VMMDECL(int) PGMHandlerPhysicalModify(PVMCC pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
884{
885 /*
886 * Remove it.
887 */
888 int rc;
889 PGM_LOCK_VOID(pVM);
890 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
891 if (pCur)
892 {
893 /*
894 * Clear the ram flags. (We're gonna move or free it!)
895 */
896 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
897 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
898 @todo pCurType validation
899 bool const fRestoreAsRAM = pCurType->pfnHandlerR3 /** @todo this isn't entirely correct. */
900 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO;
901
902 /*
903 * Validate the new range, modify and reinsert.
904 */
905 if (GCPhysLast >= GCPhys)
906 {
907 /*
908 * We require the range to be within registered ram.
909 * There is no apparent need to support ranges which cover more than one ram range.
910 */
911 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
912 if ( pRam
913 && GCPhys <= pRam->GCPhysLast
914 && GCPhysLast >= pRam->GCPhys)
915 {
916 pCur->Core.Key = GCPhys;
917 pCur->Core.KeyLast = GCPhysLast;
918 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> GUEST_PAGE_SHIFT;
919
920 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
921 {
922 RTGCPHYS const cb = GCPhysLast - GCPhys + 1;
923 PGMPHYSHANDLERKIND const enmKind = pCurType->enmKind;
924
925 /*
926 * Set ram flags, flush shadow PT entries and finally tell REM about this.
927 */
928 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL, 0);
929
930 /** @todo NEM: not sure we need this notification... */
931 NEMHCNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb, fRestoreAsRAM);
932
933 PGM_UNLOCK(pVM);
934
935 PGM_INVL_ALL_VCPU_TLBS(pVM);
936 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
937 GCPhysCurrent, GCPhys, GCPhysLast));
938 return VINF_SUCCESS;
939 }
940
941 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
942 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
943 }
944 else
945 {
946 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
947 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
948 }
949 }
950 else
951 {
952 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
953 rc = VERR_INVALID_PARAMETER;
954 }
955
956 /*
957 * Invalid new location, flush the cache and free it.
958 * We've only gotta notify REM and free the memory.
959 */
960 if (VM_IS_NEM_ENABLED(pVM))
961 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pCur);
962 pVM->pgm.s.pLastPhysHandlerR0 = 0;
963 pVM->pgm.s.pLastPhysHandlerR3 = 0;
964 PGMHandlerPhysicalTypeRelease(pVM, pCur->hType);
965 MMHyperFree(pVM, pCur);
966 }
967 else
968 {
969 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
970 rc = VERR_PGM_HANDLER_NOT_FOUND;
971 }
972
973 PGM_UNLOCK(pVM);
974 return rc;
975}
976#endif /* unused */
977
978
979/**
980 * Changes the user callback arguments associated with a physical access handler.
981 *
982 * @returns VBox status code.
983 * @param pVM The cross context VM structure.
984 * @param GCPhys Start physical address of the handler.
985 * @param uUser User argument to the handlers.
986 */
987VMMDECL(int) PGMHandlerPhysicalChangeUserArg(PVMCC pVM, RTGCPHYS GCPhys, uint64_t uUser)
988{
989 /*
990 * Find the handler and make the change.
991 */
992 int rc = PGM_LOCK(pVM);
993 AssertRCReturn(rc, rc);
994
995 PPGMPHYSHANDLER pCur;
996 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
997 if (RT_SUCCESS(rc))
998 {
999 Assert(pCur->Key == GCPhys);
1000 pCur->uUser = uUser;
1001 }
1002 else if (rc == VERR_NOT_FOUND)
1003 {
1004 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1005 rc = VERR_PGM_HANDLER_NOT_FOUND;
1006 }
1007
1008 PGM_UNLOCK(pVM);
1009 return rc;
1010}
1011
1012#if 0 /* unused */
1013
1014/**
1015 * Splits a physical access handler in two.
1016 *
1017 * @returns VBox status code.
1018 * @param pVM The cross context VM structure.
1019 * @param GCPhys Start physical address of the handler.
1020 * @param GCPhysSplit The split address.
1021 */
1022VMMDECL(int) PGMHandlerPhysicalSplit(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
1023{
1024 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
1025
1026 /*
1027 * Do the allocation without owning the lock.
1028 */
1029 PPGMPHYSHANDLER pNew;
1030 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
1031 if (RT_FAILURE(rc))
1032 return rc;
1033
1034 /*
1035 * Get the handler.
1036 */
1037 PGM_LOCK_VOID(pVM);
1038 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1039 if (RT_LIKELY(pCur))
1040 {
1041 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
1042 {
1043 /*
1044 * Create new handler node for the 2nd half.
1045 */
1046 *pNew = *pCur;
1047 pNew->Core.Key = GCPhysSplit;
1048 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1049
1050 pCur->Core.KeyLast = GCPhysSplit - 1;
1051 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1052
1053 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
1054 {
1055 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
1056 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
1057 PGM_UNLOCK(pVM);
1058 return VINF_SUCCESS;
1059 }
1060 AssertMsgFailed(("whu?\n"));
1061 rc = VERR_PGM_PHYS_HANDLER_IPE;
1062 }
1063 else
1064 {
1065 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
1066 rc = VERR_INVALID_PARAMETER;
1067 }
1068 }
1069 else
1070 {
1071 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1072 rc = VERR_PGM_HANDLER_NOT_FOUND;
1073 }
1074 PGM_UNLOCK(pVM);
1075 MMHyperFree(pVM, pNew);
1076 return rc;
1077}
1078
1079
1080/**
1081 * Joins up two adjacent physical access handlers which has the same callbacks.
1082 *
1083 * @returns VBox status code.
1084 * @param pVM The cross context VM structure.
1085 * @param GCPhys1 Start physical address of the first handler.
1086 * @param GCPhys2 Start physical address of the second handler.
1087 */
1088VMMDECL(int) PGMHandlerPhysicalJoin(PVMCC pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
1089{
1090 /*
1091 * Get the handlers.
1092 */
1093 int rc;
1094 PGM_LOCK_VOID(pVM);
1095 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
1096 if (RT_LIKELY(pCur1))
1097 {
1098 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1099 if (RT_LIKELY(pCur2))
1100 {
1101 /*
1102 * Make sure that they are adjacent, and that they've got the same callbacks.
1103 */
1104 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
1105 {
1106 if (RT_LIKELY(pCur1->hType == pCur2->hType))
1107 {
1108 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1109 if (RT_LIKELY(pCur3 == pCur2))
1110 {
1111 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
1112 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1113 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
1114 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1115 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1116 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1117 PGMHandlerPhysicalTypeRelease(pVM, pCur2->hType);
1118 MMHyperFree(pVM, pCur2);
1119 PGM_UNLOCK(pVM);
1120 return VINF_SUCCESS;
1121 }
1122
1123 Assert(pCur3 == pCur2);
1124 rc = VERR_PGM_PHYS_HANDLER_IPE;
1125 }
1126 else
1127 {
1128 AssertMsgFailed(("mismatching handlers\n"));
1129 rc = VERR_ACCESS_DENIED;
1130 }
1131 }
1132 else
1133 {
1134 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
1135 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1136 rc = VERR_INVALID_PARAMETER;
1137 }
1138 }
1139 else
1140 {
1141 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
1142 rc = VERR_PGM_HANDLER_NOT_FOUND;
1143 }
1144 }
1145 else
1146 {
1147 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
1148 rc = VERR_PGM_HANDLER_NOT_FOUND;
1149 }
1150 PGM_UNLOCK(pVM);
1151 return rc;
1152
1153}
1154
1155#endif /* unused */
1156
1157/**
1158 * Resets any modifications to individual pages in a physical page access
1159 * handler region.
1160 *
1161 * This is used in pair with PGMHandlerPhysicalPageTempOff(),
1162 * PGMHandlerPhysicalPageAliasMmio2() or PGMHandlerPhysicalPageAliasHC().
1163 *
1164 * @returns VBox status code.
1165 * @param pVM The cross context VM structure.
1166 * @param GCPhys The start address of the handler regions, i.e. what you
1167 * passed to PGMR3HandlerPhysicalRegister(),
1168 * PGMHandlerPhysicalRegisterEx() or
1169 * PGMHandlerPhysicalModify().
1170 */
1171VMMDECL(int) PGMHandlerPhysicalReset(PVMCC pVM, RTGCPHYS GCPhys)
1172{
1173 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
1174 int rc = PGM_LOCK(pVM);
1175 AssertRCReturn(rc, rc);
1176
1177 /*
1178 * Find the handler.
1179 */
1180 PPGMPHYSHANDLER pCur;
1181 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1182 if (RT_SUCCESS(rc))
1183 {
1184 Assert(pCur->Key == GCPhys);
1185
1186 /*
1187 * Validate kind.
1188 */
1189 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1190 switch (pCurType->enmKind)
1191 {
1192 case PGMPHYSHANDLERKIND_WRITE:
1193 case PGMPHYSHANDLERKIND_ALL:
1194 case PGMPHYSHANDLERKIND_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
1195 {
1196 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset)); /** @todo move out of switch */
1197 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1198 Assert(pRam);
1199 Assert(pRam->GCPhys <= pCur->Key);
1200 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1201
1202 if (pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO)
1203 {
1204 /*
1205 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
1206 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
1207 * to do that now...
1208 */
1209 if (pCur->cAliasedPages)
1210 {
1211 PPGMPAGE pPage = &pRam->aPages[(pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
1212 RTGCPHYS GCPhysPage = pCur->Key;
1213 uint32_t cLeft = pCur->cPages;
1214 while (cLeft-- > 0)
1215 {
1216 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1217 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1218 {
1219 Assert(pCur->cAliasedPages > 0);
1220 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam, false /*fDoAccounting*/);
1221 --pCur->cAliasedPages;
1222#ifndef VBOX_STRICT
1223 if (pCur->cAliasedPages == 0)
1224 break;
1225#endif
1226 }
1227 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
1228 GCPhysPage += GUEST_PAGE_SIZE;
1229 pPage++;
1230 }
1231 Assert(pCur->cAliasedPages == 0);
1232 }
1233 }
1234 else if (pCur->cTmpOffPages > 0)
1235 {
1236 /*
1237 * Set the flags and flush shadow PT entries.
1238 */
1239 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
1240 }
1241
1242 pCur->cAliasedPages = 0;
1243 pCur->cTmpOffPages = 0;
1244
1245 rc = VINF_SUCCESS;
1246 break;
1247 }
1248
1249 /*
1250 * Invalid.
1251 */
1252 default:
1253 AssertMsgFailed(("Invalid type %d/%#x! Corruption!\n", pCurType->enmKind, pCur->hType));
1254 rc = VERR_PGM_PHYS_HANDLER_IPE;
1255 break;
1256 }
1257 }
1258 else if (rc == VERR_NOT_FOUND)
1259 {
1260 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1261 rc = VERR_PGM_HANDLER_NOT_FOUND;
1262 }
1263
1264 PGM_UNLOCK(pVM);
1265 return rc;
1266}
1267
1268
1269/**
1270 * Special version of PGMHandlerPhysicalReset used by MMIO2 w/ dirty page
1271 * tracking.
1272 *
1273 * @returns VBox status code.
1274 * @param pVM The cross context VM structure.
1275 * @param GCPhys The start address of the handler region.
1276 * @param pvBitmap Dirty bitmap. Caller has cleared this already, only
1277 * dirty bits will be set. Caller also made sure it's big
1278 * enough.
1279 * @param offBitmap Dirty bitmap offset.
1280 * @remarks Caller must own the PGM critical section.
1281 */
1282DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap)
1283{
1284 LogFlow(("pgmHandlerPhysicalResetMmio2WithBitmap GCPhys=%RGp\n", GCPhys));
1285 PGM_LOCK_ASSERT_OWNER(pVM);
1286
1287 /*
1288 * Find the handler.
1289 */
1290 PPGMPHYSHANDLER pCur;
1291 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1292 if (RT_SUCCESS(rc))
1293 {
1294 Assert(pCur->Key == GCPhys);
1295
1296 /*
1297 * Validate kind.
1298 */
1299 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1300 if ( pCurType
1301 && pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE)
1302 {
1303 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset));
1304
1305 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1306 Assert(pRam);
1307 Assert(pRam->GCPhys <= pCur->Key);
1308 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1309
1310 /*
1311 * Set the flags and flush shadow PT entries.
1312 */
1313 if (pCur->cTmpOffPages > 0)
1314 {
1315 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, pvBitmap, offBitmap);
1316 pCur->cTmpOffPages = 0;
1317 }
1318 else
1319 rc = VINF_SUCCESS;
1320 }
1321 else
1322 {
1323 AssertFailed();
1324 rc = VERR_WRONG_TYPE;
1325 }
1326 }
1327 else if (rc == VERR_NOT_FOUND)
1328 {
1329 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1330 rc = VERR_PGM_HANDLER_NOT_FOUND;
1331 }
1332
1333 return rc;
1334}
1335
1336
1337/**
1338 * Temporarily turns off the access monitoring of a page within a monitored
1339 * physical write/all page access handler region.
1340 *
1341 * Use this when no further \#PFs are required for that page. Be aware that
1342 * a page directory sync might reset the flags, and turn on access monitoring
1343 * for the page.
1344 *
1345 * The caller must do required page table modifications.
1346 *
1347 * @returns VBox status code.
1348 * @param pVM The cross context VM structure.
1349 * @param GCPhys The start address of the access handler. This
1350 * must be a fully page aligned range or we risk
1351 * messing up other handlers installed for the
1352 * start and end pages.
1353 * @param GCPhysPage The physical address of the page to turn off
1354 * access monitoring for.
1355 */
1356VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
1357{
1358 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhysPage=%RGp\n", GCPhysPage));
1359 int rc = PGM_LOCK(pVM);
1360 AssertRCReturn(rc, rc);
1361
1362 /*
1363 * Validate the range.
1364 */
1365 PPGMPHYSHANDLER pCur;
1366 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1367 if (RT_SUCCESS(rc))
1368 {
1369 Assert(pCur->Key == GCPhys);
1370 if (RT_LIKELY( GCPhysPage >= pCur->Key
1371 && GCPhysPage <= pCur->KeyLast))
1372 {
1373 Assert(!(pCur->Key & GUEST_PAGE_OFFSET_MASK));
1374 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
1375
1376 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1377 AssertReturnStmt( pCurType
1378 && ( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1379 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL),
1380 PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1381
1382 /*
1383 * Change the page status.
1384 */
1385 PPGMPAGE pPage;
1386 PPGMRAMRANGE pRam;
1387 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1388 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1389 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1390 {
1391 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1392 pCur->cTmpOffPages++;
1393
1394#ifdef VBOX_WITH_NATIVE_NEM
1395 /* Tell NEM about the protection change (VGA is using this to track dirty pages). */
1396 if (VM_IS_NEM_ENABLED(pVM))
1397 {
1398 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1399 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1400 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
1401 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1402 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1403 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1404 }
1405#endif
1406 }
1407 PGM_UNLOCK(pVM);
1408 return VINF_SUCCESS;
1409 }
1410 PGM_UNLOCK(pVM);
1411 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1412 return VERR_INVALID_PARAMETER;
1413 }
1414 PGM_UNLOCK(pVM);
1415
1416 if (rc == VERR_NOT_FOUND)
1417 {
1418 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1419 return VERR_PGM_HANDLER_NOT_FOUND;
1420 }
1421 return rc;
1422}
1423
1424
1425/**
1426 * Resolves an MMIO2 page.
1427 *
1428 * Caller as taken the PGM lock.
1429 *
1430 * @returns Pointer to the page if valid, NULL otherwise
1431 * @param pVM The cross context VM structure.
1432 * @param pDevIns The device owning it.
1433 * @param hMmio2 The MMIO2 region.
1434 * @param offMmio2Page The offset into the region.
1435 */
1436static PPGMPAGE pgmPhysResolveMmio2PageLocked(PVMCC pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2Page)
1437{
1438 /* Only works if the handle is in the handle table! */
1439 AssertReturn(hMmio2 != 0, NULL);
1440 hMmio2--;
1441
1442 /* Must check the first one for PGMREGMMIO2RANGE_F_FIRST_CHUNK. */
1443 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1444 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1445 AssertReturn(pCur, NULL);
1446 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
1447
1448 /* Loop thru the sub-ranges till we find the one covering offMmio2. */
1449 for (;;)
1450 {
1451#ifdef IN_RING3
1452 AssertReturn(pCur->pDevInsR3 == pDevIns, NULL);
1453#else
1454 AssertReturn(pCur->pDevInsR3 == pDevIns->pDevInsForR3, NULL);
1455#endif
1456
1457 /* Does it match the offset? */
1458 if (offMmio2Page < pCur->cbReal)
1459 return &pCur->RamRange.aPages[offMmio2Page >> GUEST_PAGE_SHIFT];
1460
1461 /* Advance if we can. */
1462 AssertReturn(!(pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), NULL);
1463 offMmio2Page -= pCur->cbReal;
1464 hMmio2++;
1465 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1466 pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1467 AssertReturn(pCur, NULL);
1468 }
1469}
1470
1471
1472/**
1473 * Replaces an MMIO page with an MMIO2 page.
1474 *
1475 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1476 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1477 * backing, the caller must provide a replacement page. For various reasons the
1478 * replacement page must be an MMIO2 page.
1479 *
1480 * The caller must do required page table modifications. You can get away
1481 * without making any modifications since it's an MMIO page, the cost is an extra
1482 * \#PF which will the resync the page.
1483 *
1484 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1485 *
1486 * The caller may still get handler callback even after this call and must be
1487 * able to deal correctly with such calls. The reason for these callbacks are
1488 * either that we're executing in the recompiler (which doesn't know about this
1489 * arrangement) or that we've been restored from saved state (where we won't
1490 * save the change).
1491 *
1492 * @returns VBox status code.
1493 * @param pVM The cross context VM structure.
1494 * @param GCPhys The start address of the access handler. This
1495 * must be a fully page aligned range or we risk
1496 * messing up other handlers installed for the
1497 * start and end pages.
1498 * @param GCPhysPage The physical address of the page to turn off
1499 * access monitoring for and replace with the MMIO2
1500 * page.
1501 * @param pDevIns The device instance owning @a hMmio2.
1502 * @param hMmio2 Handle to the MMIO2 region containing the page
1503 * to remap in the the MMIO page at @a GCPhys.
1504 * @param offMmio2PageRemap The offset into @a hMmio2 of the MMIO2 page that
1505 * should serve as backing memory.
1506 *
1507 * @remark May cause a page pool flush if used on a page that is already
1508 * aliased.
1509 *
1510 * @note This trick does only work reliably if the two pages are never ever
1511 * mapped in the same page table. If they are the page pool code will
1512 * be confused should either of them be flushed. See the special case
1513 * of zero page aliasing mentioned in #3170.
1514 *
1515 */
1516VMMDECL(int) PGMHandlerPhysicalPageAliasMmio2(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage,
1517 PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2PageRemap)
1518{
1519#ifdef VBOX_WITH_PGM_NEM_MODE
1520 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1521#endif
1522 int rc = PGM_LOCK(pVM);
1523 AssertRCReturn(rc, rc);
1524
1525 /*
1526 * Resolve the MMIO2 reference.
1527 */
1528 PPGMPAGE pPageRemap = pgmPhysResolveMmio2PageLocked(pVM, pDevIns, hMmio2, offMmio2PageRemap);
1529 if (RT_LIKELY(pPageRemap))
1530 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1531 ("hMmio2=%RU64 offMmio2PageRemap=%RGp %R[pgmpage]\n", hMmio2, offMmio2PageRemap, pPageRemap),
1532 PGM_UNLOCK(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1533 else
1534 {
1535 PGM_UNLOCK(pVM);
1536 return VERR_OUT_OF_RANGE;
1537 }
1538
1539 /*
1540 * Lookup and validate the range.
1541 */
1542 PPGMPHYSHANDLER pCur;
1543 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1544 if (RT_SUCCESS(rc))
1545 {
1546 Assert(pCur->Key == GCPhys);
1547 if (RT_LIKELY( GCPhysPage >= pCur->Key
1548 && GCPhysPage <= pCur->KeyLast))
1549 {
1550 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1551 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1552 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1553 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1554 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1555
1556 /*
1557 * Validate the page.
1558 */
1559 PPGMPAGE pPage;
1560 PPGMRAMRANGE pRam;
1561 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1562 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1563 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1564 {
1565 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1566 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1567 VERR_PGM_PHYS_NOT_MMIO2);
1568 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1569 {
1570 PGM_UNLOCK(pVM);
1571 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1572 }
1573
1574 /*
1575 * The page is already mapped as some other page, reset it
1576 * to an MMIO/ZERO page before doing the new mapping.
1577 */
1578 Log(("PGMHandlerPhysicalPageAliasMmio2: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1579 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1580 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam, false /*fDoAccounting*/);
1581 pCur->cAliasedPages--;
1582 }
1583 Assert(PGM_PAGE_IS_ZERO(pPage));
1584
1585 /*
1586 * Do the actual remapping here.
1587 * This page now serves as an alias for the backing memory specified.
1588 */
1589 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: %RGp (%R[pgmpage]) alias for %RU64/%RGp (%R[pgmpage])\n",
1590 GCPhysPage, pPage, hMmio2, offMmio2PageRemap, pPageRemap ));
1591 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1592 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1593 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1594 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1595 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1596 pCur->cAliasedPages++;
1597 Assert(pCur->cAliasedPages <= pCur->cPages);
1598
1599 /* Flush its TLB entry. */
1600 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1601 /* Not calling IEMTlbInvalidateAllPhysicalAllCpus here as aliased pages are handled like MMIO by the IEM TLB. */
1602
1603#ifdef VBOX_WITH_NATIVE_NEM
1604 /* Tell NEM about the backing and protection change. */
1605 if (VM_IS_NEM_ENABLED(pVM))
1606 {
1607 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1608 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1609 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1610 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO),
1611 PGMPAGETYPE_MMIO2_ALIAS_MMIO, &u2State);
1612 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1613 }
1614#endif
1615 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: => %R[pgmpage]\n", pPage));
1616 PGM_UNLOCK(pVM);
1617 return VINF_SUCCESS;
1618 }
1619
1620 PGM_UNLOCK(pVM);
1621 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1622 return VERR_INVALID_PARAMETER;
1623 }
1624
1625 PGM_UNLOCK(pVM);
1626 if (rc == VERR_NOT_FOUND)
1627 {
1628 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1629 return VERR_PGM_HANDLER_NOT_FOUND;
1630 }
1631 return rc;
1632}
1633
1634
1635/**
1636 * Replaces an MMIO page with an arbitrary HC page in the shadow page tables.
1637 *
1638 * This differs from PGMHandlerPhysicalPageAliasMmio2 in that the page doesn't
1639 * need to be a known MMIO2 page and that only shadow paging may access the
1640 * page. The latter distinction is important because the only use for this
1641 * feature is for mapping the special APIC access page that VT-x uses to detect
1642 * APIC MMIO operations, the page is shared between all guest CPUs and actually
1643 * not written to. At least at the moment.
1644 *
1645 * The caller must do required page table modifications. You can get away
1646 * without making any modifications since it's an MMIO page, the cost is an extra
1647 * \#PF which will the resync the page.
1648 *
1649 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1650 *
1651 *
1652 * @returns VBox status code.
1653 * @param pVM The cross context VM structure.
1654 * @param GCPhys The start address of the access handler. This
1655 * must be a fully page aligned range or we risk
1656 * messing up other handlers installed for the
1657 * start and end pages.
1658 * @param GCPhysPage The physical address of the page to turn off
1659 * access monitoring for.
1660 * @param HCPhysPageRemap The physical address of the HC page that
1661 * serves as backing memory.
1662 *
1663 * @remark May cause a page pool flush if used on a page that is already
1664 * aliased.
1665 */
1666VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1667{
1668/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1669#ifdef VBOX_WITH_PGM_NEM_MODE
1670 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1671#endif
1672 int rc = PGM_LOCK(pVM);
1673 AssertRCReturn(rc, rc);
1674
1675 /*
1676 * Lookup and validate the range.
1677 */
1678 PPGMPHYSHANDLER pCur;
1679 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1680 if (RT_SUCCESS(rc))
1681 {
1682 Assert(pCur->Key == GCPhys);
1683 if (RT_LIKELY( GCPhysPage >= pCur->Key
1684 && GCPhysPage <= pCur->KeyLast))
1685 {
1686 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1687 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1688 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1689 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1690 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1691
1692 /*
1693 * Get and validate the pages.
1694 */
1695 PPGMPAGE pPage;
1696 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
1697 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1698 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1699 {
1700 PGM_UNLOCK(pVM);
1701 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
1702 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1703 VERR_PGM_PHYS_NOT_MMIO2);
1704 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1705 }
1706 Assert(PGM_PAGE_IS_ZERO(pPage));
1707
1708 /*
1709 * Do the actual remapping here.
1710 * This page now serves as an alias for the backing memory
1711 * specified as far as shadow paging is concerned.
1712 */
1713 LogFlow(("PGMHandlerPhysicalPageAliasHC: %RGp (%R[pgmpage]) alias for %RHp\n",
1714 GCPhysPage, pPage, HCPhysPageRemap));
1715 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
1716 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
1717 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1718 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
1719 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1720 pCur->cAliasedPages++;
1721 Assert(pCur->cAliasedPages <= pCur->cPages);
1722
1723 /* Flush its TLB entry. */
1724 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1725 /* Not calling IEMTlbInvalidateAllPhysicalAllCpus here as aliased pages are handled like MMIO by the IEM TLB. */
1726
1727#ifdef VBOX_WITH_NATIVE_NEM
1728 /* Tell NEM about the backing and protection change. */
1729 if (VM_IS_NEM_ENABLED(pVM))
1730 {
1731 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
1732 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1733 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1734 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1735 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO),
1736 PGMPAGETYPE_SPECIAL_ALIAS_MMIO, &u2State);
1737 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1738 }
1739#endif
1740 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
1741 PGM_UNLOCK(pVM);
1742 return VINF_SUCCESS;
1743 }
1744 PGM_UNLOCK(pVM);
1745 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1746 return VERR_INVALID_PARAMETER;
1747 }
1748 PGM_UNLOCK(pVM);
1749
1750 if (rc == VERR_NOT_FOUND)
1751 {
1752 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1753 return VERR_PGM_HANDLER_NOT_FOUND;
1754 }
1755 return rc;
1756}
1757
1758
1759/**
1760 * Checks if a physical range is handled
1761 *
1762 * @returns boolean
1763 * @param pVM The cross context VM structure.
1764 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1765 * @remarks Caller must take the PGM lock...
1766 * @thread EMT.
1767 */
1768VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVMCC pVM, RTGCPHYS GCPhys)
1769{
1770 /*
1771 * Find the handler.
1772 */
1773 PGM_LOCK_VOID(pVM);
1774 PPGMPHYSHANDLER pCur;
1775 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
1776 if (RT_SUCCESS(rc))
1777 {
1778#ifdef VBOX_STRICT
1779 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
1780 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1781 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1782 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1783 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO);
1784#endif
1785 PGM_UNLOCK(pVM);
1786 return true;
1787 }
1788 PGM_UNLOCK(pVM);
1789 return false;
1790}
1791
1792
1793/**
1794 * Checks if it's an disabled all access handler or write access handler at the
1795 * given address.
1796 *
1797 * @returns true if it's an all access handler, false if it's a write access
1798 * handler.
1799 * @param pVM The cross context VM structure.
1800 * @param GCPhys The address of the page with a disabled handler.
1801 *
1802 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
1803 */
1804bool pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys)
1805{
1806 PGM_LOCK_VOID(pVM);
1807 PPGMPHYSHANDLER pCur;
1808 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
1809 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), true);
1810
1811 /* Only whole pages can be disabled. */
1812 Assert( pCur->Key <= (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK)
1813 && pCur->KeyLast >= (GCPhys | GUEST_PAGE_OFFSET_MASK));
1814
1815 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1816 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1817 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1818 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO); /* sanity */
1819 bool const fRet = pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE;
1820 PGM_UNLOCK(pVM);
1821 return fRet;
1822}
1823
1824#ifdef VBOX_STRICT
1825
1826/**
1827 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1828 * and its AVL enumerators.
1829 */
1830typedef struct PGMAHAFIS
1831{
1832 /** The current physical address. */
1833 RTGCPHYS GCPhys;
1834 /** Number of errors. */
1835 unsigned cErrors;
1836 /** Pointer to the VM. */
1837 PVM pVM;
1838} PGMAHAFIS, *PPGMAHAFIS;
1839
1840
1841/**
1842 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1843 * that the physical addresses associated with virtual handlers are correct.
1844 *
1845 * @returns Number of mismatches.
1846 * @param pVM The cross context VM structure.
1847 */
1848VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVMCC pVM)
1849{
1850 PPGM pPGM = &pVM->pgm.s;
1851 PGMAHAFIS State;
1852 State.GCPhys = 0;
1853 State.cErrors = 0;
1854 State.pVM = pVM;
1855
1856 PGM_LOCK_ASSERT_OWNER(pVM);
1857
1858 /*
1859 * Check the RAM flags against the handlers.
1860 */
1861 PPGMPHYSHANDLERTREE const pPhysHandlerTree = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree;
1862 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
1863 {
1864 const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
1865 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1866 {
1867 PGMPAGE const *pPage = &pRam->aPages[iPage];
1868 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1869 {
1870 State.GCPhys = pRam->GCPhys + (iPage << GUEST_PAGE_SHIFT);
1871
1872 /*
1873 * Physical first - calculate the state based on the handlers
1874 * active on the page, then compare.
1875 */
1876 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
1877 {
1878 /* the first */
1879 PPGMPHYSHANDLER pPhys;
1880 int rc = pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, State.GCPhys, &pPhys);
1881 if (rc == VERR_NOT_FOUND)
1882 {
1883 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
1884 State.GCPhys, &pPhys);
1885 if (RT_SUCCESS(rc))
1886 {
1887 Assert(pPhys->Key >= State.GCPhys);
1888 if (pPhys->Key > (State.GCPhys + GUEST_PAGE_SIZE - 1))
1889 pPhys = NULL;
1890 }
1891 else
1892 AssertLogRelMsgReturn(rc == VERR_NOT_FOUND, ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
1893 }
1894 else
1895 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
1896
1897 if (pPhys)
1898 {
1899 PCPGMPHYSHANDLERTYPEINT pPhysType = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys->hType);
1900 unsigned uState = pPhysType->uState;
1901
1902 /* more? */
1903 while (pPhys->KeyLast < (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1904 {
1905 PPGMPHYSHANDLER pPhys2;
1906 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
1907 pPhys->KeyLast + 1, &pPhys2);
1908 if (rc == VERR_NOT_FOUND)
1909 break;
1910 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc KeyLast+1=%RGp\n", rc, pPhys->KeyLast + 1), 999);
1911 if (pPhys2->Key > (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1912 break;
1913 PCPGMPHYSHANDLERTYPEINT pPhysType2 = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys2->hType);
1914 uState = RT_MAX(uState, pPhysType2->uState);
1915 pPhys = pPhys2;
1916 }
1917
1918 /* compare.*/
1919 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1920 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1921 {
1922 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1923 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhysType->pszDesc));
1924 State.cErrors++;
1925 }
1926 }
1927 else
1928 {
1929 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1930 State.cErrors++;
1931 }
1932 }
1933 }
1934 } /* foreach page in ram range. */
1935 } /* foreach ram range. */
1936
1937 /*
1938 * Do the reverse check for physical handlers.
1939 */
1940 /** @todo */
1941
1942 return State.cErrors;
1943}
1944
1945#endif /* VBOX_STRICT */
1946
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette