VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 93718

Last change on this file since 93718 was 93716, checked in by vboxsync, 3 years ago

VMM/PGM: Moved the physical handler allocation off the hyper heap and into its own slab, changing the it to the 'hardened' avl tree code. bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 74.5 KB
Line 
1/* $Id: PGMAllHandler.cpp 93716 2022-02-14 10:36:21Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/dbgf.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/iom.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/stam.h>
31#include <VBox/vmm/dbgf.h>
32#ifdef IN_RING0
33# include <VBox/vmm/pdmdev.h>
34#endif
35#include "PGMInternal.h"
36#include <VBox/vmm/vmcc.h>
37#include "PGMInline.h"
38
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm-amd64-x86.h>
42#include <iprt/string.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <VBox/vmm/selm.h>
46
47
48/*********************************************************************************************************************************
49* Global Variables *
50*********************************************************************************************************************************/
51/** Dummy physical access handler type record. */
52CTX_SUFF(PGMPHYSHANDLERTYPEINT) const g_pgmHandlerPhysicalDummyType =
53{
54 /* .hType = */ UINT64_C(0x93b7557e1937aaff),
55 /* .enmKind = */ PGMPHYSHANDLERKIND_INVALID,
56 /* .uState = */ PGM_PAGE_HNDL_PHYS_STATE_ALL,
57 /* .fKeepPgmLock = */ true,
58 /* .fRing0DevInsIdx = */ false,
59#ifdef IN_RING0
60 /* .afPadding = */ {false},
61 /* .pfnHandler = */ pgmR0HandlerPhysicalHandlerToRing3,
62 /* .pfnPfHandler = */ pgmR0HandlerPhysicalPfHandlerToRing3,
63#elif defined(IN_RING3)
64 /* .fRing0Enabled = */ false,
65 /* .pfnHandler = */ pgmR3HandlerPhysicalHandlerInvalid,
66#else
67# error "unsupported context"
68#endif
69 /* .pszDesc = */ "dummy"
70};
71
72
73/*********************************************************************************************************************************
74* Internal Functions *
75*********************************************************************************************************************************/
76static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
77 void *pvBitmap, uint32_t offBitmap);
78static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur);
79static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur);
80
81
82#ifndef IN_RING3
83
84/**
85 * @callback_method_impl{FNPGMPHYSHANDLER,
86 * Dummy for forcing ring-3 handling of the access.}
87 */
88DECLCALLBACK(VBOXSTRICTRC)
89pgmR0HandlerPhysicalHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
90 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
91{
92 RT_NOREF(pVM, pVCpu, GCPhys, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin, uUser);
93 return VINF_EM_RAW_EMULATE_INSTR;
94}
95
96
97/**
98 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
99 * Dummy for forcing ring-3 handling of the access.}
100 */
101DECLCALLBACK(VBOXSTRICTRC)
102pgmR0HandlerPhysicalPfHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
103 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
104{
105 RT_NOREF(pVM, pVCpu, uErrorCode, pRegFrame, pvFault, GCPhysFault, uUser);
106 return VINF_EM_RAW_EMULATE_INSTR;
107}
108
109#endif /* !IN_RING3 */
110
111
112/**
113 * Creates a physical access handler, allocation part.
114 *
115 * @returns VBox status code.
116 * @retval VERR_OUT_OF_RESOURCES if no more handlers available.
117 *
118 * @param pVM The cross context VM structure.
119 * @param hType The handler type registration handle.
120 * @param uUser User argument to the handlers (not pointer).
121 * @param pszDesc Description of this handler. If NULL, the type
122 * description will be used instead.
123 * @param ppPhysHandler Where to return the access handler structure on
124 * success.
125 */
126int pgmHandlerPhysicalExCreate(PVMCC pVM, PGMPHYSHANDLERTYPE hType, uint64_t uUser,
127 R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler)
128{
129 /*
130 * Validate input.
131 */
132 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
133 AssertReturn(pType, VERR_INVALID_HANDLE);
134 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
135 AssertPtr(ppPhysHandler);
136
137 Log(("pgmHandlerPhysicalExCreate: uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
138 uUser, hType, pType->enmKind, pType->pszDesc, pszDesc, R3STRING(pszDesc)));
139
140 /*
141 * Allocate and initialize the new entry.
142 */
143 int rc = PGM_LOCK(pVM);
144 AssertRCReturn(rc, rc);
145
146 PPGMPHYSHANDLER pNew = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.allocateNode();
147 if (pNew)
148 {
149 pNew->Key = NIL_RTGCPHYS;
150 pNew->KeyLast = NIL_RTGCPHYS;
151 pNew->cPages = 0;
152 pNew->cAliasedPages = 0;
153 pNew->cTmpOffPages = 0;
154 pNew->uUser = uUser;
155 pNew->hType = hType;
156 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc
157#ifdef IN_RING3
158 : pType->pszDesc;
159#else
160 : pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK].pszDesc;
161#endif
162
163 PGM_UNLOCK(pVM);
164 *ppPhysHandler = pNew;
165 return VINF_SUCCESS;
166 }
167
168 PGM_UNLOCK(pVM);
169 return VERR_OUT_OF_RESOURCES;
170}
171
172
173/**
174 * Duplicates a physical access handler.
175 *
176 * @returns VBox status code.
177 * @retval VINF_SUCCESS when successfully installed.
178 *
179 * @param pVM The cross context VM structure.
180 * @param pPhysHandlerSrc The source handler to duplicate
181 * @param ppPhysHandler Where to return the access handler structure on
182 * success.
183 */
184int pgmHandlerPhysicalExDup(PVMCC pVM, PPGMPHYSHANDLER pPhysHandlerSrc, PPGMPHYSHANDLER *ppPhysHandler)
185{
186 return pgmHandlerPhysicalExCreate(pVM, pPhysHandlerSrc->hType, pPhysHandlerSrc->uUser,
187 pPhysHandlerSrc->pszDesc, ppPhysHandler);
188}
189
190
191/**
192 * Register a access handler for a physical range.
193 *
194 * @returns VBox status code.
195 * @retval VINF_SUCCESS when successfully installed.
196 * @retval VINF_PGM_GCPHYS_ALIASED could be returned.
197 *
198 * @param pVM The cross context VM structure.
199 * @param pPhysHandler The physical handler.
200 * @param GCPhys Start physical address.
201 * @param GCPhysLast Last physical address. (inclusive)
202 */
203int pgmHandlerPhysicalExRegister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
204{
205 /*
206 * Validate input.
207 */
208 AssertReturn(pPhysHandler, VERR_INVALID_POINTER);
209 PGMPHYSHANDLERTYPE const hType = pPhysHandler->hType;
210 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
211 AssertReturn(pType, VERR_INVALID_HANDLE);
212 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
213
214 AssertPtr(pPhysHandler);
215
216 Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n", GCPhys, GCPhysLast,
217 hType, pType->enmKind, pType->pszDesc, pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc)));
218 AssertReturn(pPhysHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
219
220 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
221 Assert(GCPhysLast - GCPhys < _4G); /* ASSUMPTION in PGMAllPhys.cpp */
222
223 switch (pType->enmKind)
224 {
225 case PGMPHYSHANDLERKIND_WRITE:
226 break;
227 case PGMPHYSHANDLERKIND_MMIO:
228 case PGMPHYSHANDLERKIND_ALL:
229 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
230 AssertMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
231 AssertMsgReturn((GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
232 break;
233 default:
234 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
235 return VERR_INVALID_PARAMETER;
236 }
237
238 /*
239 * We require the range to be within registered ram.
240 * There is no apparent need to support ranges which cover more than one ram range.
241 */
242 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
243 if ( !pRam
244 || GCPhysLast > pRam->GCPhysLast)
245 {
246#ifdef IN_RING3
247 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
248#endif
249 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
250 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
251 }
252 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
253 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
254
255 /*
256 * Try insert into list.
257 */
258 pPhysHandler->Key = GCPhys;
259 pPhysHandler->KeyLast = GCPhysLast;
260 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
261
262 int rc = PGM_LOCK(pVM);
263 if (RT_SUCCESS(rc))
264 {
265 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pPhysHandler);
266 if (RT_SUCCESS(rc))
267 {
268 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
269 if (rc == VINF_PGM_SYNC_CR3)
270 rc = VINF_PGM_GCPHYS_ALIASED;
271
272#if defined(IN_RING3) || defined(IN_RING0)
273 NEMHCNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1);
274#endif
275 PGM_UNLOCK(pVM);
276
277 if (rc != VINF_SUCCESS)
278 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
279 return rc;
280 }
281 PGM_UNLOCK(pVM);
282 }
283
284 pPhysHandler->Key = NIL_RTGCPHYS;
285 pPhysHandler->KeyLast = NIL_RTGCPHYS;
286
287 AssertMsgReturn(rc == VERR_ALREADY_EXISTS, ("%Rrc GCPhys=%RGp GCPhysLast=%RGp\n", rc, GCPhys, GCPhysLast), rc);
288
289#if defined(IN_RING3) && defined(VBOX_STRICT)
290 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
291#endif
292 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
293 GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc)));
294 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
295}
296
297
298/**
299 * Register a access handler for a physical range.
300 *
301 * @returns VBox status code.
302 * @retval VINF_SUCCESS when successfully installed.
303 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
304 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
305 * flagged together with a pool clearing.
306 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
307 * one. A debug assertion is raised.
308 *
309 * @param pVM The cross context VM structure.
310 * @param GCPhys Start physical address.
311 * @param GCPhysLast Last physical address. (inclusive)
312 * @param hType The handler type registration handle.
313 * @param uUser User argument to the handler.
314 * @param pszDesc Description of this handler. If NULL, the type
315 * description will be used instead.
316 */
317VMMDECL(int) PGMHandlerPhysicalRegister(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPHYSHANDLERTYPE hType,
318 uint64_t uUser, R3PTRTYPE(const char *) pszDesc)
319{
320#ifdef LOG_ENABLED
321 PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
322 Log(("PGMHandlerPhysicalRegister: GCPhys=%RGp GCPhysLast=%RGp uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
323 GCPhys, GCPhysLast, uUser, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
324#endif
325
326 PPGMPHYSHANDLER pNew;
327 int rc = pgmHandlerPhysicalExCreate(pVM, hType, uUser, pszDesc, &pNew);
328 if (RT_SUCCESS(rc))
329 {
330 rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast);
331 if (RT_SUCCESS(rc))
332 return rc;
333 pgmHandlerPhysicalExDestroy(pVM, pNew);
334 }
335 return rc;
336}
337
338
339/**
340 * Sets ram range flags and attempts updating shadow PTs.
341 *
342 * @returns VBox status code.
343 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
344 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
345 * the guest page aliased or/and mapped by multiple PTs. FFs set.
346 * @param pVM The cross context VM structure.
347 * @param pCur The physical handler.
348 * @param pRam The RAM range.
349 * @param pvBitmap Dirty bitmap. Optional.
350 * @param offBitmap Dirty bitmap offset.
351 */
352static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
353 void *pvBitmap, uint32_t offBitmap)
354{
355 /*
356 * Iterate the guest ram pages updating the flags and flushing PT entries
357 * mapping the page.
358 */
359 bool fFlushTLBs = false;
360 int rc = VINF_SUCCESS;
361 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
362 const unsigned uState = pCurType->uState;
363 uint32_t cPages = pCur->cPages;
364 uint32_t i = (pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
365 for (;;)
366 {
367 PPGMPAGE pPage = &pRam->aPages[i];
368 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage),
369 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << GUEST_PAGE_SHIFT), pPage));
370
371 /* Only do upgrades. */
372 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
373 {
374 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
375
376 const RTGCPHYS GCPhysPage = pRam->GCPhys + (i << GUEST_PAGE_SHIFT);
377 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage,
378 false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
379 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
380 rc = rc2;
381
382#ifdef VBOX_WITH_NATIVE_NEM
383 /* Tell NEM about the protection update. */
384 if (VM_IS_NEM_ENABLED(pVM))
385 {
386 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
387 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
388 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
389 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
390 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
391 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
392 }
393#endif
394 if (pvBitmap)
395 ASMBitSet(pvBitmap, offBitmap);
396 }
397
398 /* next */
399 if (--cPages == 0)
400 break;
401 i++;
402 offBitmap++;
403 }
404
405 if (fFlushTLBs)
406 {
407 PGM_INVL_ALL_VCPU_TLBS(pVM);
408 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc));
409 }
410 else
411 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));
412
413 return rc;
414}
415
416
417/**
418 * Deregister a physical page access handler.
419 *
420 * @returns VBox status code.
421 * @param pVM The cross context VM structure.
422 * @param pPhysHandler The handler to deregister (but not free).
423 */
424int pgmHandlerPhysicalExDeregister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler)
425{
426 LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s\n",
427 pPhysHandler->Key, pPhysHandler->KeyLast, R3STRING(pPhysHandler->pszDesc)));
428
429 int rc = PGM_LOCK(pVM);
430 AssertRCReturn(rc, rc);
431
432 RTGCPHYS const GCPhys = pPhysHandler->Key;
433 AssertReturnStmt(GCPhys != NIL_RTGCPHYS, PGM_UNLOCK(pVM), VERR_PGM_HANDLER_NOT_FOUND);
434
435 /*
436 * Remove the handler from the tree.
437 */
438
439 PPGMPHYSHANDLER pRemoved;
440 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
441 if (RT_SUCCESS(rc))
442 {
443 if (pRemoved == pPhysHandler)
444 {
445 /*
446 * Clear the page bits, notify the REM about this change and clear
447 * the cache.
448 */
449 pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler);
450 if (VM_IS_NEM_ENABLED(pVM))
451 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pPhysHandler);
452 pVM->pgm.s.idxLastPhysHandler = 0;
453
454 pPhysHandler->Key = NIL_RTGCPHYS;
455 pPhysHandler->KeyLast = NIL_RTGCPHYS;
456
457 PGM_UNLOCK(pVM);
458
459 return VINF_SUCCESS;
460 }
461
462 /*
463 * Both of the failure conditions here are considered internal processing
464 * errors because they can only be caused by race conditions or corruption.
465 * If we ever need to handle concurrent deregistration, we have to move
466 * the NIL_RTGCPHYS check inside the PGM lock.
467 */
468 pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pRemoved);
469 }
470
471 PGM_UNLOCK(pVM);
472
473 if (RT_FAILURE(rc))
474 AssertMsgFailed(("Didn't find range starting at %RGp in the tree! %Rrc=rc\n", GCPhys, rc));
475 else
476 AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n",
477 GCPhys, pRemoved, pPhysHandler));
478 return VERR_PGM_HANDLER_IPE_1;
479}
480
481
482/**
483 * Destroys (frees) a physical handler.
484 *
485 * The caller must deregister it before destroying it!
486 *
487 * @returns VBox status code.
488 * @param pVM The cross context VM structure.
489 * @param pHandler The handler to free. NULL if ignored.
490 */
491int pgmHandlerPhysicalExDestroy(PVMCC pVM, PPGMPHYSHANDLER pHandler)
492{
493 if (pHandler)
494 {
495 AssertPtr(pHandler);
496 AssertReturn(pHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
497
498 int rc = PGM_LOCK(pVM);
499 if (RT_SUCCESS(rc))
500 {
501 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pHandler);
502 PGM_UNLOCK(pVM);
503 }
504 return rc;
505 }
506 return VINF_SUCCESS;
507}
508
509
510/**
511 * Deregister a physical page access handler.
512 *
513 * @returns VBox status code.
514 * @param pVM The cross context VM structure.
515 * @param GCPhys Start physical address.
516 */
517VMMDECL(int) PGMHandlerPhysicalDeregister(PVMCC pVM, RTGCPHYS GCPhys)
518{
519 AssertReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
520
521 /*
522 * Find the handler.
523 */
524 int rc = PGM_LOCK(pVM);
525 AssertRCReturn(rc, rc);
526
527 PPGMPHYSHANDLER pRemoved;
528 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
529 if (RT_SUCCESS(rc))
530 {
531 Assert(pRemoved->Key == GCPhys);
532 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
533 pRemoved->Key, pRemoved->KeyLast, R3STRING(pRemoved->pszDesc)));
534
535 /*
536 * Clear the page bits, notify the REM about this change and clear
537 * the cache.
538 */
539 pgmHandlerPhysicalResetRamFlags(pVM, pRemoved);
540 if (VM_IS_NEM_ENABLED(pVM))
541 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pRemoved);
542 pVM->pgm.s.idxLastPhysHandler = 0;
543
544 pRemoved->Key = NIL_RTGCPHYS;
545 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pRemoved);
546
547 PGM_UNLOCK(pVM);
548 return rc;
549 }
550
551 PGM_UNLOCK(pVM);
552
553 if (rc == VERR_NOT_FOUND)
554 {
555 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
556 rc = VERR_PGM_HANDLER_NOT_FOUND;
557 }
558 return rc;
559}
560
561
562/**
563 * Shared code with modify.
564 */
565static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur)
566{
567#ifdef VBOX_WITH_NATIVE_NEM
568 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
569 RTGCPHYS GCPhysStart = pCur->Key;
570 RTGCPHYS GCPhysLast = pCur->KeyLast;
571
572 /*
573 * Page align the range.
574 *
575 * Since we've reset (recalculated) the physical handler state of all pages
576 * we can make use of the page states to figure out whether a page should be
577 * included in the REM notification or not.
578 */
579 if ( (pCur->Key & GUEST_PAGE_OFFSET_MASK)
580 || ((pCur->KeyLast + 1) & GUEST_PAGE_OFFSET_MASK))
581 {
582 Assert(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO);
583
584 if (GCPhysStart & GUEST_PAGE_OFFSET_MASK)
585 {
586 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart);
587 if ( pPage
588 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
589 {
590 RTGCPHYS GCPhys = (GCPhysStart + (GUEST_PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
591 if ( GCPhys > GCPhysLast
592 || GCPhys < GCPhysStart)
593 return;
594 GCPhysStart = GCPhys;
595 }
596 else
597 GCPhysStart &= X86_PTE_PAE_PG_MASK;
598 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
599 }
600
601 if (GCPhysLast & GUEST_PAGE_OFFSET_MASK)
602 {
603 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast);
604 if ( pPage
605 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
606 {
607 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
608 if ( GCPhys < GCPhysStart
609 || GCPhys > GCPhysLast)
610 return;
611 GCPhysLast = GCPhys;
612 }
613 else
614 GCPhysLast |= GUEST_PAGE_OFFSET_MASK;
615 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
616 }
617 }
618
619 /*
620 * Tell NEM.
621 */
622 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhysStart);
623 RTGCPHYS const cb = GCPhysLast - GCPhysStart + 1;
624 uint8_t u2State = UINT8_MAX;
625 NEMHCNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, cb,
626 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysStart) : NULL, &u2State);
627 if (u2State != UINT8_MAX && pRam)
628 pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhysStart - pRam->GCPhys) >> GUEST_PAGE_SHIFT],
629 cb >> GUEST_PAGE_SHIFT, u2State);
630#else
631 RT_NOREF(pVM, pCur);
632#endif
633}
634
635
636/**
637 * pgmHandlerPhysicalResetRamFlags helper that checks for other handlers on
638 * edge pages.
639 */
640DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVMCC pVM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
641{
642 /*
643 * Look for other handlers.
644 */
645 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
646 for (;;)
647 {
648 PPGMPHYSHANDLER pCur;
649 int rc;
650 if (fAbove)
651 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
652 GCPhys, &pCur);
653 else
654 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrBelow(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
655 GCPhys, &pCur);
656 if (rc == VERR_NOT_FOUND)
657 break;
658 AssertRCBreak(rc);
659 if (((fAbove ? pCur->Key : pCur->KeyLast) >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
660 break;
661 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
662 uState = RT_MAX(uState, pCurType->uState);
663
664 /* next? */
665 RTGCPHYS GCPhysNext = fAbove
666 ? pCur->KeyLast + 1
667 : pCur->Key - 1;
668 if ((GCPhysNext >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
669 break;
670 GCPhys = GCPhysNext;
671 }
672
673 /*
674 * Update if we found something that is a higher priority state than the current.
675 */
676 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
677 {
678 PPGMPAGE pPage;
679 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint);
680 if ( RT_SUCCESS(rc)
681 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
682 {
683 /* This should normally not be necessary. */
684 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
685 bool fFlushTLBs ;
686 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, false /*fFlushPTEs*/, &fFlushTLBs);
687 if (RT_SUCCESS(rc) && fFlushTLBs)
688 PGM_INVL_ALL_VCPU_TLBS(pVM);
689 else
690 AssertRC(rc);
691
692#ifdef VBOX_WITH_NATIVE_NEM
693 /* Tell NEM about the protection update. */
694 if (VM_IS_NEM_ENABLED(pVM))
695 {
696 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
697 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
698 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
699 PGM_RAMRANGE_CALC_PAGE_R3PTR(*ppRamHint, GCPhys),
700 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
701 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
702 }
703#endif
704 }
705 else
706 AssertRC(rc);
707 }
708}
709
710
711/**
712 * Resets an aliased page.
713 *
714 * @param pVM The cross context VM structure.
715 * @param pPage The page.
716 * @param GCPhysPage The page address in case it comes in handy.
717 * @param pRam The RAM range the page is associated with (for NEM
718 * notifications).
719 * @param fDoAccounting Whether to perform accounting. (Only set during
720 * reset where pgmR3PhysRamReset doesn't have the
721 * handler structure handy.)
722 */
723void pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam, bool fDoAccounting)
724{
725 Assert( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
726 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
727 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
728#ifdef VBOX_WITH_NATIVE_NEM
729 RTHCPHYS const HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
730#endif
731
732 /*
733 * Flush any shadow page table references *first*.
734 */
735 bool fFlushTLBs = false;
736 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
737 AssertLogRelRCReturnVoid(rc);
738 HMFlushTlbOnAllVCpus(pVM);
739
740 /*
741 * Make it an MMIO/Zero page.
742 */
743 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
744 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
745 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
746 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
747 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
748
749 /* Flush its TLB entry. */
750 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
751
752 /*
753 * Do accounting for pgmR3PhysRamReset.
754 */
755 if (fDoAccounting)
756 {
757 PPGMPHYSHANDLER pHandler;
758 rc = pgmHandlerPhysicalLookup(pVM, GCPhysPage, &pHandler);
759 if (RT_SUCCESS(rc))
760 {
761 Assert(pHandler->cAliasedPages > 0);
762 pHandler->cAliasedPages--;
763 }
764 else
765 AssertMsgFailed(("rc=%Rrc GCPhysPage=%RGp\n", rc, GCPhysPage));
766 }
767
768#ifdef VBOX_WITH_NATIVE_NEM
769 /*
770 * Tell NEM about the protection change.
771 */
772 if (VM_IS_NEM_ENABLED(pVM))
773 {
774 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
775 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg,
776 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
777 NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State);
778 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
779 }
780#else
781 RT_NOREF(pRam);
782#endif
783}
784
785
786/**
787 * Resets ram range flags.
788 *
789 * @returns VBox status code.
790 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
791 * @param pVM The cross context VM structure.
792 * @param pCur The physical handler.
793 *
794 * @remark We don't start messing with the shadow page tables, as we've
795 * already got code in Trap0e which deals with out of sync handler
796 * flags (originally conceived for global pages).
797 */
798static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur)
799{
800 /*
801 * Iterate the guest ram pages updating the state.
802 */
803 RTUINT cPages = pCur->cPages;
804 RTGCPHYS GCPhys = pCur->Key;
805 PPGMRAMRANGE pRamHint = NULL;
806 for (;;)
807 {
808 PPGMPAGE pPage;
809 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
810 if (RT_SUCCESS(rc))
811 {
812 /* Reset aliased MMIO pages to MMIO, since this aliasing is our business.
813 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
814 bool fNemNotifiedAlready = false;
815 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
816 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
817 {
818 Assert(pCur->cAliasedPages > 0);
819 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, pRamHint, false /*fDoAccounting*/);
820 pCur->cAliasedPages--;
821 fNemNotifiedAlready = true;
822 }
823#ifdef VBOX_STRICT
824 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
825 AssertMsg(pCurType && (pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage)),
826 ("%RGp %R[pgmpage]\n", GCPhys, pPage));
827#endif
828 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
829
830#ifdef VBOX_WITH_NATIVE_NEM
831 /* Tell NEM about the protection change. */
832 if (VM_IS_NEM_ENABLED(pVM) && !fNemNotifiedAlready)
833 {
834 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
835 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
836 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
837 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamHint, GCPhys),
838 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
839 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
840 }
841#endif
842 RT_NOREF(fNemNotifiedAlready);
843 }
844 else
845 AssertRC(rc);
846
847 /* next */
848 if (--cPages == 0)
849 break;
850 GCPhys += GUEST_PAGE_SIZE;
851 }
852
853 pCur->cAliasedPages = 0;
854 pCur->cTmpOffPages = 0;
855
856 /*
857 * Check for partial start and end pages.
858 */
859 if (pCur->Key & GUEST_PAGE_OFFSET_MASK)
860 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Key - 1, false /* fAbove */, &pRamHint);
861 if ((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) != GUEST_PAGE_OFFSET_MASK)
862 pgmHandlerPhysicalRecalcPageState(pVM, pCur->KeyLast + 1, true /* fAbove */, &pRamHint);
863}
864
865
866#if 0 /* unused */
867/**
868 * Modify a physical page access handler.
869 *
870 * Modification can only be done to the range it self, not the type or anything else.
871 *
872 * @returns VBox status code.
873 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
874 * and a new registration must be performed!
875 * @param pVM The cross context VM structure.
876 * @param GCPhysCurrent Current location.
877 * @param GCPhys New location.
878 * @param GCPhysLast New last location.
879 */
880VMMDECL(int) PGMHandlerPhysicalModify(PVMCC pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
881{
882 /*
883 * Remove it.
884 */
885 int rc;
886 PGM_LOCK_VOID(pVM);
887 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
888 if (pCur)
889 {
890 /*
891 * Clear the ram flags. (We're gonna move or free it!)
892 */
893 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
894 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
895 @todo pCurType validation
896 bool const fRestoreAsRAM = pCurType->pfnHandlerR3 /** @todo this isn't entirely correct. */
897 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO;
898
899 /*
900 * Validate the new range, modify and reinsert.
901 */
902 if (GCPhysLast >= GCPhys)
903 {
904 /*
905 * We require the range to be within registered ram.
906 * There is no apparent need to support ranges which cover more than one ram range.
907 */
908 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
909 if ( pRam
910 && GCPhys <= pRam->GCPhysLast
911 && GCPhysLast >= pRam->GCPhys)
912 {
913 pCur->Core.Key = GCPhys;
914 pCur->Core.KeyLast = GCPhysLast;
915 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> GUEST_PAGE_SHIFT;
916
917 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
918 {
919 RTGCPHYS const cb = GCPhysLast - GCPhys + 1;
920 PGMPHYSHANDLERKIND const enmKind = pCurType->enmKind;
921
922 /*
923 * Set ram flags, flush shadow PT entries and finally tell REM about this.
924 */
925 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL, 0);
926
927 /** @todo NEM: not sure we need this notification... */
928 NEMHCNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb, fRestoreAsRAM);
929
930 PGM_UNLOCK(pVM);
931
932 PGM_INVL_ALL_VCPU_TLBS(pVM);
933 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
934 GCPhysCurrent, GCPhys, GCPhysLast));
935 return VINF_SUCCESS;
936 }
937
938 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
939 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
940 }
941 else
942 {
943 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
944 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
945 }
946 }
947 else
948 {
949 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
950 rc = VERR_INVALID_PARAMETER;
951 }
952
953 /*
954 * Invalid new location, flush the cache and free it.
955 * We've only gotta notify REM and free the memory.
956 */
957 if (VM_IS_NEM_ENABLED(pVM))
958 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pCur);
959 pVM->pgm.s.pLastPhysHandlerR0 = 0;
960 pVM->pgm.s.pLastPhysHandlerR3 = 0;
961 PGMHandlerPhysicalTypeRelease(pVM, pCur->hType);
962 MMHyperFree(pVM, pCur);
963 }
964 else
965 {
966 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
967 rc = VERR_PGM_HANDLER_NOT_FOUND;
968 }
969
970 PGM_UNLOCK(pVM);
971 return rc;
972}
973#endif /* unused */
974
975
976/**
977 * Changes the user callback arguments associated with a physical access handler.
978 *
979 * @returns VBox status code.
980 * @param pVM The cross context VM structure.
981 * @param GCPhys Start physical address of the handler.
982 * @param uUser User argument to the handlers.
983 */
984VMMDECL(int) PGMHandlerPhysicalChangeUserArg(PVMCC pVM, RTGCPHYS GCPhys, uint64_t uUser)
985{
986 /*
987 * Find the handler and make the change.
988 */
989 int rc = PGM_LOCK(pVM);
990 AssertRCReturn(rc, rc);
991
992 PPGMPHYSHANDLER pCur;
993 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
994 if (RT_SUCCESS(rc))
995 {
996 Assert(pCur->Key == GCPhys);
997 pCur->uUser = uUser;
998 }
999 else if (rc == VERR_NOT_FOUND)
1000 {
1001 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1002 rc = VERR_PGM_HANDLER_NOT_FOUND;
1003 }
1004
1005 PGM_UNLOCK(pVM);
1006 return rc;
1007}
1008
1009#if 0 /* unused */
1010
1011/**
1012 * Splits a physical access handler in two.
1013 *
1014 * @returns VBox status code.
1015 * @param pVM The cross context VM structure.
1016 * @param GCPhys Start physical address of the handler.
1017 * @param GCPhysSplit The split address.
1018 */
1019VMMDECL(int) PGMHandlerPhysicalSplit(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
1020{
1021 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
1022
1023 /*
1024 * Do the allocation without owning the lock.
1025 */
1026 PPGMPHYSHANDLER pNew;
1027 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
1028 if (RT_FAILURE(rc))
1029 return rc;
1030
1031 /*
1032 * Get the handler.
1033 */
1034 PGM_LOCK_VOID(pVM);
1035 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1036 if (RT_LIKELY(pCur))
1037 {
1038 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
1039 {
1040 /*
1041 * Create new handler node for the 2nd half.
1042 */
1043 *pNew = *pCur;
1044 pNew->Core.Key = GCPhysSplit;
1045 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1046
1047 pCur->Core.KeyLast = GCPhysSplit - 1;
1048 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1049
1050 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
1051 {
1052 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
1053 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
1054 PGM_UNLOCK(pVM);
1055 return VINF_SUCCESS;
1056 }
1057 AssertMsgFailed(("whu?\n"));
1058 rc = VERR_PGM_PHYS_HANDLER_IPE;
1059 }
1060 else
1061 {
1062 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
1063 rc = VERR_INVALID_PARAMETER;
1064 }
1065 }
1066 else
1067 {
1068 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1069 rc = VERR_PGM_HANDLER_NOT_FOUND;
1070 }
1071 PGM_UNLOCK(pVM);
1072 MMHyperFree(pVM, pNew);
1073 return rc;
1074}
1075
1076
1077/**
1078 * Joins up two adjacent physical access handlers which has the same callbacks.
1079 *
1080 * @returns VBox status code.
1081 * @param pVM The cross context VM structure.
1082 * @param GCPhys1 Start physical address of the first handler.
1083 * @param GCPhys2 Start physical address of the second handler.
1084 */
1085VMMDECL(int) PGMHandlerPhysicalJoin(PVMCC pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
1086{
1087 /*
1088 * Get the handlers.
1089 */
1090 int rc;
1091 PGM_LOCK_VOID(pVM);
1092 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
1093 if (RT_LIKELY(pCur1))
1094 {
1095 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1096 if (RT_LIKELY(pCur2))
1097 {
1098 /*
1099 * Make sure that they are adjacent, and that they've got the same callbacks.
1100 */
1101 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
1102 {
1103 if (RT_LIKELY(pCur1->hType == pCur2->hType))
1104 {
1105 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1106 if (RT_LIKELY(pCur3 == pCur2))
1107 {
1108 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
1109 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1110 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
1111 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1112 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1113 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1114 PGMHandlerPhysicalTypeRelease(pVM, pCur2->hType);
1115 MMHyperFree(pVM, pCur2);
1116 PGM_UNLOCK(pVM);
1117 return VINF_SUCCESS;
1118 }
1119
1120 Assert(pCur3 == pCur2);
1121 rc = VERR_PGM_PHYS_HANDLER_IPE;
1122 }
1123 else
1124 {
1125 AssertMsgFailed(("mismatching handlers\n"));
1126 rc = VERR_ACCESS_DENIED;
1127 }
1128 }
1129 else
1130 {
1131 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
1132 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1133 rc = VERR_INVALID_PARAMETER;
1134 }
1135 }
1136 else
1137 {
1138 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
1139 rc = VERR_PGM_HANDLER_NOT_FOUND;
1140 }
1141 }
1142 else
1143 {
1144 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
1145 rc = VERR_PGM_HANDLER_NOT_FOUND;
1146 }
1147 PGM_UNLOCK(pVM);
1148 return rc;
1149
1150}
1151
1152#endif /* unused */
1153
1154/**
1155 * Resets any modifications to individual pages in a physical page access
1156 * handler region.
1157 *
1158 * This is used in pair with PGMHandlerPhysicalPageTempOff(),
1159 * PGMHandlerPhysicalPageAliasMmio2() or PGMHandlerPhysicalPageAliasHC().
1160 *
1161 * @returns VBox status code.
1162 * @param pVM The cross context VM structure.
1163 * @param GCPhys The start address of the handler regions, i.e. what you
1164 * passed to PGMR3HandlerPhysicalRegister(),
1165 * PGMHandlerPhysicalRegisterEx() or
1166 * PGMHandlerPhysicalModify().
1167 */
1168VMMDECL(int) PGMHandlerPhysicalReset(PVMCC pVM, RTGCPHYS GCPhys)
1169{
1170 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
1171 int rc = PGM_LOCK(pVM);
1172 AssertRCReturn(rc, rc);
1173
1174 /*
1175 * Find the handler.
1176 */
1177 PPGMPHYSHANDLER pCur;
1178 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1179 if (RT_SUCCESS(rc))
1180 {
1181 Assert(pCur->Key == GCPhys);
1182
1183 /*
1184 * Validate kind.
1185 */
1186 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1187 switch (pCurType->enmKind)
1188 {
1189 case PGMPHYSHANDLERKIND_WRITE:
1190 case PGMPHYSHANDLERKIND_ALL:
1191 case PGMPHYSHANDLERKIND_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
1192 {
1193 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset)); /** @todo move out of switch */
1194 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1195 Assert(pRam);
1196 Assert(pRam->GCPhys <= pCur->Key);
1197 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1198
1199 if (pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO)
1200 {
1201 /*
1202 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
1203 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
1204 * to do that now...
1205 */
1206 if (pCur->cAliasedPages)
1207 {
1208 PPGMPAGE pPage = &pRam->aPages[(pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
1209 RTGCPHYS GCPhysPage = pCur->Key;
1210 uint32_t cLeft = pCur->cPages;
1211 while (cLeft-- > 0)
1212 {
1213 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1214 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1215 {
1216 Assert(pCur->cAliasedPages > 0);
1217 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam, false /*fDoAccounting*/);
1218 --pCur->cAliasedPages;
1219#ifndef VBOX_STRICT
1220 if (pCur->cAliasedPages == 0)
1221 break;
1222#endif
1223 }
1224 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
1225 GCPhysPage += GUEST_PAGE_SIZE;
1226 pPage++;
1227 }
1228 Assert(pCur->cAliasedPages == 0);
1229 }
1230 }
1231 else if (pCur->cTmpOffPages > 0)
1232 {
1233 /*
1234 * Set the flags and flush shadow PT entries.
1235 */
1236 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
1237 }
1238
1239 pCur->cAliasedPages = 0;
1240 pCur->cTmpOffPages = 0;
1241
1242 rc = VINF_SUCCESS;
1243 break;
1244 }
1245
1246 /*
1247 * Invalid.
1248 */
1249 default:
1250 AssertMsgFailed(("Invalid type %d/%#x! Corruption!\n", pCurType->enmKind, pCur->hType));
1251 rc = VERR_PGM_PHYS_HANDLER_IPE;
1252 break;
1253 }
1254 }
1255 else if (rc == VERR_NOT_FOUND)
1256 {
1257 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1258 rc = VERR_PGM_HANDLER_NOT_FOUND;
1259 }
1260
1261 PGM_UNLOCK(pVM);
1262 return rc;
1263}
1264
1265
1266/**
1267 * Special version of PGMHandlerPhysicalReset used by MMIO2 w/ dirty page
1268 * tracking.
1269 *
1270 * @returns VBox status code.
1271 * @param pVM The cross context VM structure.
1272 * @param GCPhys The start address of the handler region.
1273 * @param pvBitmap Dirty bitmap. Caller has cleared this already, only
1274 * dirty bits will be set. Caller also made sure it's big
1275 * enough.
1276 * @param offBitmap Dirty bitmap offset.
1277 * @remarks Caller must own the PGM critical section.
1278 */
1279DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap)
1280{
1281 LogFlow(("pgmHandlerPhysicalResetMmio2WithBitmap GCPhys=%RGp\n", GCPhys));
1282 PGM_LOCK_ASSERT_OWNER(pVM);
1283
1284 /*
1285 * Find the handler.
1286 */
1287 PPGMPHYSHANDLER pCur;
1288 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1289 if (RT_SUCCESS(rc))
1290 {
1291 Assert(pCur->Key == GCPhys);
1292
1293 /*
1294 * Validate kind.
1295 */
1296 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1297 if ( pCurType
1298 && pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE)
1299 {
1300 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset));
1301
1302 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1303 Assert(pRam);
1304 Assert(pRam->GCPhys <= pCur->Key);
1305 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1306
1307 /*
1308 * Set the flags and flush shadow PT entries.
1309 */
1310 if (pCur->cTmpOffPages > 0)
1311 {
1312 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, pvBitmap, offBitmap);
1313 pCur->cTmpOffPages = 0;
1314 }
1315 else
1316 rc = VINF_SUCCESS;
1317 }
1318 else
1319 {
1320 AssertFailed();
1321 rc = VERR_WRONG_TYPE;
1322 }
1323 }
1324 else if (rc == VERR_NOT_FOUND)
1325 {
1326 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1327 rc = VERR_PGM_HANDLER_NOT_FOUND;
1328 }
1329
1330 return rc;
1331}
1332
1333
1334/**
1335 * Temporarily turns off the access monitoring of a page within a monitored
1336 * physical write/all page access handler region.
1337 *
1338 * Use this when no further \#PFs are required for that page. Be aware that
1339 * a page directory sync might reset the flags, and turn on access monitoring
1340 * for the page.
1341 *
1342 * The caller must do required page table modifications.
1343 *
1344 * @returns VBox status code.
1345 * @param pVM The cross context VM structure.
1346 * @param GCPhys The start address of the access handler. This
1347 * must be a fully page aligned range or we risk
1348 * messing up other handlers installed for the
1349 * start and end pages.
1350 * @param GCPhysPage The physical address of the page to turn off
1351 * access monitoring for.
1352 */
1353VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
1354{
1355 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhysPage=%RGp\n", GCPhysPage));
1356 int rc = PGM_LOCK(pVM);
1357 AssertRCReturn(rc, rc);
1358
1359 /*
1360 * Validate the range.
1361 */
1362 PPGMPHYSHANDLER pCur;
1363 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1364 if (RT_SUCCESS(rc))
1365 {
1366 Assert(pCur->Key == GCPhys);
1367 if (RT_LIKELY( GCPhysPage >= pCur->Key
1368 && GCPhysPage <= pCur->KeyLast))
1369 {
1370 Assert(!(pCur->Key & GUEST_PAGE_OFFSET_MASK));
1371 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
1372
1373 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1374 AssertReturnStmt( pCurType
1375 && ( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1376 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL),
1377 PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1378
1379 /*
1380 * Change the page status.
1381 */
1382 PPGMPAGE pPage;
1383 PPGMRAMRANGE pRam;
1384 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1385 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1386 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1387 {
1388 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1389 pCur->cTmpOffPages++;
1390
1391#ifdef VBOX_WITH_NATIVE_NEM
1392 /* Tell NEM about the protection change (VGA is using this to track dirty pages). */
1393 if (VM_IS_NEM_ENABLED(pVM))
1394 {
1395 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1396 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1397 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
1398 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1399 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1400 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1401 }
1402#endif
1403 }
1404 PGM_UNLOCK(pVM);
1405 return VINF_SUCCESS;
1406 }
1407 PGM_UNLOCK(pVM);
1408 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1409 return VERR_INVALID_PARAMETER;
1410 }
1411 PGM_UNLOCK(pVM);
1412
1413 if (rc == VERR_NOT_FOUND)
1414 {
1415 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1416 return VERR_PGM_HANDLER_NOT_FOUND;
1417 }
1418 return rc;
1419}
1420
1421
1422/**
1423 * Resolves an MMIO2 page.
1424 *
1425 * Caller as taken the PGM lock.
1426 *
1427 * @returns Pointer to the page if valid, NULL otherwise
1428 * @param pVM The cross context VM structure.
1429 * @param pDevIns The device owning it.
1430 * @param hMmio2 The MMIO2 region.
1431 * @param offMmio2Page The offset into the region.
1432 */
1433static PPGMPAGE pgmPhysResolveMmio2PageLocked(PVMCC pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2Page)
1434{
1435 /* Only works if the handle is in the handle table! */
1436 AssertReturn(hMmio2 != 0, NULL);
1437 hMmio2--;
1438
1439 /* Must check the first one for PGMREGMMIO2RANGE_F_FIRST_CHUNK. */
1440 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1441 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1442 AssertReturn(pCur, NULL);
1443 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
1444
1445 /* Loop thru the sub-ranges till we find the one covering offMmio2. */
1446 for (;;)
1447 {
1448#ifdef IN_RING3
1449 AssertReturn(pCur->pDevInsR3 == pDevIns, NULL);
1450#else
1451 AssertReturn(pCur->pDevInsR3 == pDevIns->pDevInsForR3, NULL);
1452#endif
1453
1454 /* Does it match the offset? */
1455 if (offMmio2Page < pCur->cbReal)
1456 return &pCur->RamRange.aPages[offMmio2Page >> GUEST_PAGE_SHIFT];
1457
1458 /* Advance if we can. */
1459 AssertReturn(!(pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), NULL);
1460 offMmio2Page -= pCur->cbReal;
1461 hMmio2++;
1462 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1463 pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1464 AssertReturn(pCur, NULL);
1465 }
1466}
1467
1468
1469/**
1470 * Replaces an MMIO page with an MMIO2 page.
1471 *
1472 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1473 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1474 * backing, the caller must provide a replacement page. For various reasons the
1475 * replacement page must be an MMIO2 page.
1476 *
1477 * The caller must do required page table modifications. You can get away
1478 * without making any modifications since it's an MMIO page, the cost is an extra
1479 * \#PF which will the resync the page.
1480 *
1481 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1482 *
1483 * The caller may still get handler callback even after this call and must be
1484 * able to deal correctly with such calls. The reason for these callbacks are
1485 * either that we're executing in the recompiler (which doesn't know about this
1486 * arrangement) or that we've been restored from saved state (where we won't
1487 * save the change).
1488 *
1489 * @returns VBox status code.
1490 * @param pVM The cross context VM structure.
1491 * @param GCPhys The start address of the access handler. This
1492 * must be a fully page aligned range or we risk
1493 * messing up other handlers installed for the
1494 * start and end pages.
1495 * @param GCPhysPage The physical address of the page to turn off
1496 * access monitoring for and replace with the MMIO2
1497 * page.
1498 * @param pDevIns The device instance owning @a hMmio2.
1499 * @param hMmio2 Handle to the MMIO2 region containing the page
1500 * to remap in the the MMIO page at @a GCPhys.
1501 * @param offMmio2PageRemap The offset into @a hMmio2 of the MMIO2 page that
1502 * should serve as backing memory.
1503 *
1504 * @remark May cause a page pool flush if used on a page that is already
1505 * aliased.
1506 *
1507 * @note This trick does only work reliably if the two pages are never ever
1508 * mapped in the same page table. If they are the page pool code will
1509 * be confused should either of them be flushed. See the special case
1510 * of zero page aliasing mentioned in #3170.
1511 *
1512 */
1513VMMDECL(int) PGMHandlerPhysicalPageAliasMmio2(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage,
1514 PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2PageRemap)
1515{
1516#ifdef VBOX_WITH_PGM_NEM_MODE
1517 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1518#endif
1519 int rc = PGM_LOCK(pVM);
1520 AssertRCReturn(rc, rc);
1521
1522 /*
1523 * Resolve the MMIO2 reference.
1524 */
1525 PPGMPAGE pPageRemap = pgmPhysResolveMmio2PageLocked(pVM, pDevIns, hMmio2, offMmio2PageRemap);
1526 if (RT_LIKELY(pPageRemap))
1527 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1528 ("hMmio2=%RU64 offMmio2PageRemap=%RGp %R[pgmpage]\n", hMmio2, offMmio2PageRemap, pPageRemap),
1529 PGM_UNLOCK(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1530 else
1531 {
1532 PGM_UNLOCK(pVM);
1533 return VERR_OUT_OF_RANGE;
1534 }
1535
1536 /*
1537 * Lookup and validate the range.
1538 */
1539 PPGMPHYSHANDLER pCur;
1540 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1541 if (RT_SUCCESS(rc))
1542 {
1543 Assert(pCur->Key == GCPhys);
1544 if (RT_LIKELY( GCPhysPage >= pCur->Key
1545 && GCPhysPage <= pCur->KeyLast))
1546 {
1547 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1548 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1549 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1550 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1551 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1552
1553 /*
1554 * Validate the page.
1555 */
1556 PPGMPAGE pPage;
1557 PPGMRAMRANGE pRam;
1558 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1559 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1560 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1561 {
1562 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1563 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1564 VERR_PGM_PHYS_NOT_MMIO2);
1565 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1566 {
1567 PGM_UNLOCK(pVM);
1568 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1569 }
1570
1571 /*
1572 * The page is already mapped as some other page, reset it
1573 * to an MMIO/ZERO page before doing the new mapping.
1574 */
1575 Log(("PGMHandlerPhysicalPageAliasMmio2: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1576 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1577 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam, false /*fDoAccounting*/);
1578 pCur->cAliasedPages--;
1579 }
1580 Assert(PGM_PAGE_IS_ZERO(pPage));
1581
1582 /*
1583 * Do the actual remapping here.
1584 * This page now serves as an alias for the backing memory specified.
1585 */
1586 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: %RGp (%R[pgmpage]) alias for %RU64/%RGp (%R[pgmpage])\n",
1587 GCPhysPage, pPage, hMmio2, offMmio2PageRemap, pPageRemap ));
1588 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1589 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1590 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1591 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1592 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1593 pCur->cAliasedPages++;
1594 Assert(pCur->cAliasedPages <= pCur->cPages);
1595
1596 /* Flush its TLB entry. */
1597 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1598
1599#ifdef VBOX_WITH_NATIVE_NEM
1600 /* Tell NEM about the backing and protection change. */
1601 if (VM_IS_NEM_ENABLED(pVM))
1602 {
1603 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1604 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1605 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1606 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO),
1607 PGMPAGETYPE_MMIO2_ALIAS_MMIO, &u2State);
1608 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1609 }
1610#endif
1611 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: => %R[pgmpage]\n", pPage));
1612 PGM_UNLOCK(pVM);
1613 return VINF_SUCCESS;
1614 }
1615
1616 PGM_UNLOCK(pVM);
1617 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1618 return VERR_INVALID_PARAMETER;
1619 }
1620
1621 PGM_UNLOCK(pVM);
1622 if (rc == VERR_NOT_FOUND)
1623 {
1624 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1625 return VERR_PGM_HANDLER_NOT_FOUND;
1626 }
1627 return rc;
1628}
1629
1630
1631/**
1632 * Replaces an MMIO page with an arbitrary HC page in the shadow page tables.
1633 *
1634 * This differs from PGMHandlerPhysicalPageAliasMmio2 in that the page doesn't
1635 * need to be a known MMIO2 page and that only shadow paging may access the
1636 * page. The latter distinction is important because the only use for this
1637 * feature is for mapping the special APIC access page that VT-x uses to detect
1638 * APIC MMIO operations, the page is shared between all guest CPUs and actually
1639 * not written to. At least at the moment.
1640 *
1641 * The caller must do required page table modifications. You can get away
1642 * without making any modifications since it's an MMIO page, the cost is an extra
1643 * \#PF which will the resync the page.
1644 *
1645 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1646 *
1647 *
1648 * @returns VBox status code.
1649 * @param pVM The cross context VM structure.
1650 * @param GCPhys The start address of the access handler. This
1651 * must be a fully page aligned range or we risk
1652 * messing up other handlers installed for the
1653 * start and end pages.
1654 * @param GCPhysPage The physical address of the page to turn off
1655 * access monitoring for.
1656 * @param HCPhysPageRemap The physical address of the HC page that
1657 * serves as backing memory.
1658 *
1659 * @remark May cause a page pool flush if used on a page that is already
1660 * aliased.
1661 */
1662VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1663{
1664/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1665#ifdef VBOX_WITH_PGM_NEM_MODE
1666 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1667#endif
1668 int rc = PGM_LOCK(pVM);
1669 AssertRCReturn(rc, rc);
1670
1671 /*
1672 * Lookup and validate the range.
1673 */
1674 PPGMPHYSHANDLER pCur;
1675 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1676 if (RT_SUCCESS(rc))
1677 {
1678 Assert(pCur->Key == GCPhys);
1679 if (RT_LIKELY( GCPhysPage >= pCur->Key
1680 && GCPhysPage <= pCur->KeyLast))
1681 {
1682 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1683 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1684 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1685 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1686 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1687
1688 /*
1689 * Get and validate the pages.
1690 */
1691 PPGMPAGE pPage;
1692 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
1693 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1694 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1695 {
1696 PGM_UNLOCK(pVM);
1697 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
1698 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1699 VERR_PGM_PHYS_NOT_MMIO2);
1700 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1701 }
1702 Assert(PGM_PAGE_IS_ZERO(pPage));
1703
1704 /*
1705 * Do the actual remapping here.
1706 * This page now serves as an alias for the backing memory
1707 * specified as far as shadow paging is concerned.
1708 */
1709 LogFlow(("PGMHandlerPhysicalPageAliasHC: %RGp (%R[pgmpage]) alias for %RHp\n",
1710 GCPhysPage, pPage, HCPhysPageRemap));
1711 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
1712 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
1713 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1714 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
1715 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1716 pCur->cAliasedPages++;
1717 Assert(pCur->cAliasedPages <= pCur->cPages);
1718
1719 /* Flush its TLB entry. */
1720 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1721
1722#ifdef VBOX_WITH_NATIVE_NEM
1723 /* Tell NEM about the backing and protection change. */
1724 if (VM_IS_NEM_ENABLED(pVM))
1725 {
1726 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
1727 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1728 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1729 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1730 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO),
1731 PGMPAGETYPE_SPECIAL_ALIAS_MMIO, &u2State);
1732 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1733 }
1734#endif
1735 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
1736 PGM_UNLOCK(pVM);
1737 return VINF_SUCCESS;
1738 }
1739 PGM_UNLOCK(pVM);
1740 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1741 return VERR_INVALID_PARAMETER;
1742 }
1743 PGM_UNLOCK(pVM);
1744
1745 if (rc == VERR_NOT_FOUND)
1746 {
1747 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1748 return VERR_PGM_HANDLER_NOT_FOUND;
1749 }
1750 return rc;
1751}
1752
1753
1754/**
1755 * Checks if a physical range is handled
1756 *
1757 * @returns boolean
1758 * @param pVM The cross context VM structure.
1759 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1760 * @remarks Caller must take the PGM lock...
1761 * @thread EMT.
1762 */
1763VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVMCC pVM, RTGCPHYS GCPhys)
1764{
1765 /*
1766 * Find the handler.
1767 */
1768 PGM_LOCK_VOID(pVM);
1769 PPGMPHYSHANDLER pCur;
1770 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
1771 if (RT_SUCCESS(rc))
1772 {
1773#ifdef VBOX_STRICT
1774 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
1775 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1776 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1777 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1778 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO);
1779#endif
1780 PGM_UNLOCK(pVM);
1781 return true;
1782 }
1783 PGM_UNLOCK(pVM);
1784 return false;
1785}
1786
1787
1788/**
1789 * Checks if it's an disabled all access handler or write access handler at the
1790 * given address.
1791 *
1792 * @returns true if it's an all access handler, false if it's a write access
1793 * handler.
1794 * @param pVM The cross context VM structure.
1795 * @param GCPhys The address of the page with a disabled handler.
1796 *
1797 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
1798 */
1799bool pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys)
1800{
1801 PGM_LOCK_VOID(pVM);
1802 PPGMPHYSHANDLER pCur;
1803 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
1804 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), true);
1805
1806 /* Only whole pages can be disabled. */
1807 Assert( pCur->Key <= (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK)
1808 && pCur->KeyLast >= (GCPhys | GUEST_PAGE_OFFSET_MASK));
1809
1810 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1811 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1812 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1813 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO); /* sanity */
1814 bool const fRet = pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE;
1815 PGM_UNLOCK(pVM);
1816 return fRet;
1817}
1818
1819#ifdef VBOX_STRICT
1820
1821/**
1822 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1823 * and its AVL enumerators.
1824 */
1825typedef struct PGMAHAFIS
1826{
1827 /** The current physical address. */
1828 RTGCPHYS GCPhys;
1829 /** Number of errors. */
1830 unsigned cErrors;
1831 /** Pointer to the VM. */
1832 PVM pVM;
1833} PGMAHAFIS, *PPGMAHAFIS;
1834
1835
1836/**
1837 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1838 * that the physical addresses associated with virtual handlers are correct.
1839 *
1840 * @returns Number of mismatches.
1841 * @param pVM The cross context VM structure.
1842 */
1843VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVMCC pVM)
1844{
1845 PPGM pPGM = &pVM->pgm.s;
1846 PGMAHAFIS State;
1847 State.GCPhys = 0;
1848 State.cErrors = 0;
1849 State.pVM = pVM;
1850
1851 PGM_LOCK_ASSERT_OWNER(pVM);
1852
1853 /*
1854 * Check the RAM flags against the handlers.
1855 */
1856 PPGMPHYSHANDLERTREE const pPhysHandlerTree = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree;
1857 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
1858 {
1859 const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
1860 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1861 {
1862 PGMPAGE const *pPage = &pRam->aPages[iPage];
1863 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1864 {
1865 State.GCPhys = pRam->GCPhys + (iPage << GUEST_PAGE_SHIFT);
1866
1867 /*
1868 * Physical first - calculate the state based on the handlers
1869 * active on the page, then compare.
1870 */
1871 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
1872 {
1873 /* the first */
1874 PPGMPHYSHANDLER pPhys;
1875 int rc = pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, State.GCPhys, &pPhys);
1876 if (rc == VERR_NOT_FOUND)
1877 {
1878 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
1879 State.GCPhys, &pPhys);
1880 if (RT_SUCCESS(rc))
1881 {
1882 Assert(pPhys->Key >= State.GCPhys);
1883 if (pPhys->Key > (State.GCPhys + GUEST_PAGE_SIZE - 1))
1884 pPhys = NULL;
1885 }
1886 else
1887 AssertLogRelMsgReturn(rc == VERR_NOT_FOUND, ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
1888 }
1889 else
1890 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
1891
1892 if (pPhys)
1893 {
1894 PCPGMPHYSHANDLERTYPEINT pPhysType = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys->hType);
1895 unsigned uState = pPhysType->uState;
1896
1897 /* more? */
1898 while (pPhys->KeyLast < (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1899 {
1900 PPGMPHYSHANDLER pPhys2;
1901 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
1902 pPhys->KeyLast + 1, &pPhys2);
1903 if (rc == VERR_NOT_FOUND)
1904 break;
1905 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc KeyLast+1=%RGp\n", rc, pPhys->KeyLast + 1), 999);
1906 if (pPhys2->Key > (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1907 break;
1908 PCPGMPHYSHANDLERTYPEINT pPhysType2 = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys2->hType);
1909 uState = RT_MAX(uState, pPhysType2->uState);
1910 pPhys = pPhys2;
1911 }
1912
1913 /* compare.*/
1914 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1915 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1916 {
1917 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1918 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhysType->pszDesc));
1919 State.cErrors++;
1920 }
1921 }
1922 else
1923 {
1924 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1925 State.cErrors++;
1926 }
1927 }
1928 }
1929 } /* foreach page in ram range. */
1930 } /* foreach ram range. */
1931
1932 /*
1933 * Do the reverse check for physical handlers.
1934 */
1935 /** @todo */
1936
1937 return State.cErrors;
1938}
1939
1940#endif /* VBOX_STRICT */
1941
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette