VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 93666

Last change on this file since 93666 was 93666, checked in by vboxsync, 3 years ago

VMM/PGM: Fixed regression from r149804 when pszDesc is NULL in ring-0. bugref:10094

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 72.2 KB
Line 
1/* $Id: PGMAllHandler.cpp 93666 2022-02-09 14:31:33Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/dbgf.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/iom.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/stam.h>
31#include <VBox/vmm/dbgf.h>
32#ifdef IN_RING0
33# include <VBox/vmm/pdmdev.h>
34#endif
35#include "PGMInternal.h"
36#include <VBox/vmm/vmcc.h>
37#include "PGMInline.h"
38
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm-amd64-x86.h>
42#include <iprt/string.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <VBox/vmm/selm.h>
46
47
48/*********************************************************************************************************************************
49* Global Variables *
50*********************************************************************************************************************************/
51/** Dummy physical access handler type record. */
52CTX_SUFF(PGMPHYSHANDLERTYPEINT) const g_pgmHandlerPhysicalDummyType =
53{
54 /* .hType = */ UINT64_C(0x93b7557e1937aaff),
55 /* .enmKind = */ PGMPHYSHANDLERKIND_INVALID,
56 /* .uState = */ PGM_PAGE_HNDL_PHYS_STATE_ALL,
57 /* .fKeepPgmLock = */ true,
58 /* .fRing0DevInsIdx = */ false,
59#ifdef IN_RING0
60 /* .afPadding = */ {false},
61 /* .pfnHandler = */ pgmR0HandlerPhysicalHandlerToRing3,
62 /* .pfnPfHandler = */ pgmR0HandlerPhysicalPfHandlerToRing3,
63#elif defined(IN_RING3)
64 /* .fRing0Enabled = */ false,
65 /* .pfnHandler = */ pgmR3HandlerPhysicalHandlerInvalid,
66#else
67# error "unsupported context"
68#endif
69 /* .pszDesc = */ "dummy"
70};
71
72
73/*********************************************************************************************************************************
74* Internal Functions *
75*********************************************************************************************************************************/
76static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
77 void *pvBitmap, uint32_t offBitmap);
78static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur);
79static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur);
80
81
82#ifndef IN_RING3
83
84/**
85 * @callback_method_impl{FNPGMPHYSHANDLER,
86 * Dummy for forcing ring-3 handling of the access.}
87 */
88DECLCALLBACK(VBOXSTRICTRC)
89pgmR0HandlerPhysicalHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
90 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
91{
92 RT_NOREF(pVM, pVCpu, GCPhys, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin, uUser);
93 return VINF_EM_RAW_EMULATE_INSTR;
94}
95
96
97/**
98 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
99 * Dummy for forcing ring-3 handling of the access.}
100 */
101DECLCALLBACK(VBOXSTRICTRC)
102pgmR0HandlerPhysicalPfHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
103 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
104{
105 RT_NOREF(pVM, pVCpu, uErrorCode, pRegFrame, pvFault, GCPhysFault, uUser);
106 return VINF_EM_RAW_EMULATE_INSTR;
107}
108
109#endif /* !IN_RING3 */
110
111
112/**
113 * Creates a physical access handler.
114 *
115 * @returns VBox status code.
116 * @retval VINF_SUCCESS when successfully installed.
117 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
118 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
119 * flagged together with a pool clearing.
120 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
121 * one. A debug assertion is raised.
122 *
123 * @param pVM The cross context VM structure.
124 * @param hType The handler type registration handle.
125 * @param uUser User argument to the handlers (not pointer).
126 * @param pszDesc Description of this handler. If NULL, the type
127 * description will be used instead.
128 * @param ppPhysHandler Where to return the access handler structure on
129 * success.
130 */
131int pgmHandlerPhysicalExCreate(PVMCC pVM, PGMPHYSHANDLERTYPE hType, uint64_t uUser,
132 R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler)
133{
134 /*
135 * Validate input.
136 */
137 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
138 AssertReturn(pType, VERR_INVALID_HANDLE);
139 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
140 AssertPtr(ppPhysHandler);
141
142 Log(("pgmHandlerPhysicalExCreate: uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
143 uUser, hType, pType->enmKind, pType->pszDesc, pszDesc, R3STRING(pszDesc)));
144
145 /*
146 * Allocate and initialize the new entry.
147 */
148 PPGMPHYSHANDLER pNew;
149 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
150 if (RT_SUCCESS(rc))
151 {
152 pNew->Core.Key = NIL_RTGCPHYS;
153 pNew->Core.KeyLast = NIL_RTGCPHYS;
154 pNew->cPages = 0;
155 pNew->cAliasedPages = 0;
156 pNew->cTmpOffPages = 0;
157 pNew->uUser = uUser;
158 pNew->hType = hType;
159 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc
160#ifdef IN_RING3
161 : pType->pszDesc;
162#else
163 : pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK].pszDesc;
164#endif
165 *ppPhysHandler = pNew;
166 return VINF_SUCCESS;
167 }
168
169 return rc;
170}
171
172
173/**
174 * Duplicates a physical access handler.
175 *
176 * @returns VBox status code.
177 * @retval VINF_SUCCESS when successfully installed.
178 *
179 * @param pVM The cross context VM structure.
180 * @param pPhysHandlerSrc The source handler to duplicate
181 * @param ppPhysHandler Where to return the access handler structure on
182 * success.
183 */
184int pgmHandlerPhysicalExDup(PVMCC pVM, PPGMPHYSHANDLER pPhysHandlerSrc, PPGMPHYSHANDLER *ppPhysHandler)
185{
186 return pgmHandlerPhysicalExCreate(pVM, pPhysHandlerSrc->hType, pPhysHandlerSrc->uUser,
187 pPhysHandlerSrc->pszDesc, ppPhysHandler);
188}
189
190
191/**
192 * Register a access handler for a physical range.
193 *
194 * @returns VBox status code.
195 * @retval VINF_SUCCESS when successfully installed.
196 *
197 * @param pVM The cross context VM structure.
198 * @param pPhysHandler The physical handler.
199 * @param GCPhys Start physical address.
200 * @param GCPhysLast Last physical address. (inclusive)
201 */
202int pgmHandlerPhysicalExRegister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
203{
204 /*
205 * Validate input.
206 */
207 PGMPHYSHANDLERTYPE const hType = pPhysHandler->hType;
208 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
209 AssertReturn(pType, VERR_INVALID_HANDLE);
210 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
211
212 AssertPtr(pPhysHandler);
213
214 Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n", GCPhys, GCPhysLast,
215 hType, pType->enmKind, pType->pszDesc, pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc)));
216 AssertReturn(pPhysHandler->Core.Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
217
218 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
219 switch (pType->enmKind)
220 {
221 case PGMPHYSHANDLERKIND_WRITE:
222 break;
223 case PGMPHYSHANDLERKIND_MMIO:
224 case PGMPHYSHANDLERKIND_ALL:
225 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
226 AssertMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
227 AssertMsgReturn((GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
228 break;
229 default:
230 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
231 return VERR_INVALID_PARAMETER;
232 }
233
234 /*
235 * We require the range to be within registered ram.
236 * There is no apparent need to support ranges which cover more than one ram range.
237 */
238 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
239 if ( !pRam
240 || GCPhysLast > pRam->GCPhysLast)
241 {
242#ifdef IN_RING3
243 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
244#endif
245 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
246 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
247 }
248 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
249 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
250
251 /*
252 * Try insert into list.
253 */
254 pPhysHandler->Core.Key = GCPhys;
255 pPhysHandler->Core.KeyLast = GCPhysLast;
256 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
257
258 PGM_LOCK_VOID(pVM);
259 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pPhysHandler->Core))
260 {
261 int rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
262 if (rc == VINF_PGM_SYNC_CR3)
263 rc = VINF_PGM_GCPHYS_ALIASED;
264
265#if defined(IN_RING3) || defined(IN_RING0)
266 NEMHCNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1);
267#endif
268 PGM_UNLOCK(pVM);
269
270 if (rc != VINF_SUCCESS)
271 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
272 return rc;
273 }
274 PGM_UNLOCK(pVM);
275
276 pPhysHandler->Core.Key = NIL_RTGCPHYS;
277 pPhysHandler->Core.KeyLast = NIL_RTGCPHYS;
278
279#if defined(IN_RING3) && defined(VBOX_STRICT)
280 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
281#endif
282 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
283 GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc)));
284 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
285}
286
287
288/**
289 * Register a access handler for a physical range.
290 *
291 * @returns VBox status code.
292 * @retval VINF_SUCCESS when successfully installed.
293 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
294 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
295 * flagged together with a pool clearing.
296 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
297 * one. A debug assertion is raised.
298 *
299 * @param pVM The cross context VM structure.
300 * @param GCPhys Start physical address.
301 * @param GCPhysLast Last physical address. (inclusive)
302 * @param hType The handler type registration handle.
303 * @param uUser User argument to the handler.
304 * @param pszDesc Description of this handler. If NULL, the type
305 * description will be used instead.
306 */
307VMMDECL(int) PGMHandlerPhysicalRegister(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPHYSHANDLERTYPE hType,
308 uint64_t uUser, R3PTRTYPE(const char *) pszDesc)
309{
310#ifdef LOG_ENABLED
311 PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
312 Log(("PGMHandlerPhysicalRegister: GCPhys=%RGp GCPhysLast=%RGp uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
313 GCPhys, GCPhysLast, uUser, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
314#endif
315
316 PPGMPHYSHANDLER pNew;
317 int rc = pgmHandlerPhysicalExCreate(pVM, hType, uUser, pszDesc, &pNew);
318 if (RT_SUCCESS(rc))
319 {
320 rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast);
321 if (RT_SUCCESS(rc))
322 return rc;
323 pgmHandlerPhysicalExDestroy(pVM, pNew);
324 }
325 return rc;
326}
327
328
329/**
330 * Sets ram range flags and attempts updating shadow PTs.
331 *
332 * @returns VBox status code.
333 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
334 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
335 * the guest page aliased or/and mapped by multiple PTs. FFs set.
336 * @param pVM The cross context VM structure.
337 * @param pCur The physical handler.
338 * @param pRam The RAM range.
339 * @param pvBitmap Dirty bitmap. Optional.
340 * @param offBitmap Dirty bitmap offset.
341 */
342static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
343 void *pvBitmap, uint32_t offBitmap)
344{
345 /*
346 * Iterate the guest ram pages updating the flags and flushing PT entries
347 * mapping the page.
348 */
349 bool fFlushTLBs = false;
350 int rc = VINF_SUCCESS;
351 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
352 const unsigned uState = pCurType->uState;
353 uint32_t cPages = pCur->cPages;
354 uint32_t i = (pCur->Core.Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
355 for (;;)
356 {
357 PPGMPAGE pPage = &pRam->aPages[i];
358 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage),
359 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << GUEST_PAGE_SHIFT), pPage));
360
361 /* Only do upgrades. */
362 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
363 {
364 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
365
366 const RTGCPHYS GCPhysPage = pRam->GCPhys + (i << GUEST_PAGE_SHIFT);
367 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage,
368 false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
369 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
370 rc = rc2;
371
372#ifdef VBOX_WITH_NATIVE_NEM
373 /* Tell NEM about the protection update. */
374 if (VM_IS_NEM_ENABLED(pVM))
375 {
376 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
377 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
378 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
379 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
380 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
381 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
382 }
383#endif
384 if (pvBitmap)
385 ASMBitSet(pvBitmap, offBitmap);
386 }
387
388 /* next */
389 if (--cPages == 0)
390 break;
391 i++;
392 offBitmap++;
393 }
394
395 if (fFlushTLBs)
396 {
397 PGM_INVL_ALL_VCPU_TLBS(pVM);
398 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc));
399 }
400 else
401 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));
402
403 return rc;
404}
405
406
407/**
408 * Deregister a physical page access handler.
409 *
410 * @returns VBox status code.
411 * @param pVM The cross context VM structure.
412 * @param pPhysHandler The handler to deregister (but not free).
413 */
414int pgmHandlerPhysicalExDeregister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler)
415{
416 LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s\n",
417 pPhysHandler->Core.Key, pPhysHandler->Core.KeyLast, R3STRING(pPhysHandler->pszDesc)));
418 AssertReturn(pPhysHandler->Core.Key != NIL_RTGCPHYS, VERR_PGM_HANDLER_NOT_FOUND);
419
420 /*
421 * Remove the handler from the tree.
422 */
423 PGM_LOCK_VOID(pVM);
424 PPGMPHYSHANDLER pRemoved = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
425 pPhysHandler->Core.Key);
426 if (pRemoved == pPhysHandler)
427 {
428 /*
429 * Clear the page bits, notify the REM about this change and clear
430 * the cache.
431 */
432 pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler);
433 if (VM_IS_NEM_ENABLED(pVM))
434 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pPhysHandler);
435 pVM->pgm.s.pLastPhysHandlerR0 = 0;
436 pVM->pgm.s.pLastPhysHandlerR3 = 0;
437
438 pPhysHandler->Core.Key = NIL_RTGCPHYS;
439 pPhysHandler->Core.KeyLast = NIL_RTGCPHYS;
440
441 PGM_UNLOCK(pVM);
442
443 return VINF_SUCCESS;
444 }
445
446 /*
447 * Both of the failure conditions here are considered internal processing
448 * errors because they can only be caused by race conditions or corruption.
449 * If we ever need to handle concurrent deregistration, we have to move
450 * the NIL_RTGCPHYS check inside the PGM lock.
451 */
452 if (pRemoved)
453 RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pRemoved->Core);
454
455 PGM_UNLOCK(pVM);
456
457 if (!pRemoved)
458 AssertMsgFailed(("Didn't find range starting at %RGp in the tree!\n", pPhysHandler->Core.Key));
459 else
460 AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n",
461 pPhysHandler->Core.Key, pRemoved, pPhysHandler));
462 return VERR_PGM_HANDLER_IPE_1;
463}
464
465
466/**
467 * Destroys (frees) a physical handler.
468 *
469 * The caller must deregister it before destroying it!
470 *
471 * @returns VBox status code.
472 * @param pVM The cross context VM structure.
473 * @param pHandler The handler to free. NULL if ignored.
474 */
475int pgmHandlerPhysicalExDestroy(PVMCC pVM, PPGMPHYSHANDLER pHandler)
476{
477 if (pHandler)
478 {
479 AssertPtr(pHandler);
480 AssertReturn(pHandler->Core.Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
481 MMHyperFree(pVM, pHandler);
482 }
483 return VINF_SUCCESS;
484}
485
486
487/**
488 * Deregister a physical page access handler.
489 *
490 * @returns VBox status code.
491 * @param pVM The cross context VM structure.
492 * @param GCPhys Start physical address.
493 */
494VMMDECL(int) PGMHandlerPhysicalDeregister(PVMCC pVM, RTGCPHYS GCPhys)
495{
496 /*
497 * Find the handler.
498 */
499 PGM_LOCK_VOID(pVM);
500 PPGMPHYSHANDLER pRemoved = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
501 if (pRemoved)
502 {
503 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
504 pRemoved->Core.Key, pRemoved->Core.KeyLast, R3STRING(pRemoved->pszDesc)));
505
506 /*
507 * Clear the page bits, notify the REM about this change and clear
508 * the cache.
509 */
510 pgmHandlerPhysicalResetRamFlags(pVM, pRemoved);
511 if (VM_IS_NEM_ENABLED(pVM))
512 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pRemoved);
513 pVM->pgm.s.pLastPhysHandlerR0 = 0;
514 pVM->pgm.s.pLastPhysHandlerR3 = 0;
515
516 PGM_UNLOCK(pVM);
517
518 pRemoved->Core.Key = NIL_RTGCPHYS;
519 pgmHandlerPhysicalExDestroy(pVM, pRemoved);
520 return VINF_SUCCESS;
521 }
522
523 PGM_UNLOCK(pVM);
524
525 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
526 return VERR_PGM_HANDLER_NOT_FOUND;
527}
528
529
530/**
531 * Shared code with modify.
532 */
533static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur)
534{
535#ifdef VBOX_WITH_NATIVE_NEM
536 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
537 RTGCPHYS GCPhysStart = pCur->Core.Key;
538 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
539
540 /*
541 * Page align the range.
542 *
543 * Since we've reset (recalculated) the physical handler state of all pages
544 * we can make use of the page states to figure out whether a page should be
545 * included in the REM notification or not.
546 */
547 if ( (pCur->Core.Key & GUEST_PAGE_OFFSET_MASK)
548 || ((pCur->Core.KeyLast + 1) & GUEST_PAGE_OFFSET_MASK))
549 {
550 Assert(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO);
551
552 if (GCPhysStart & GUEST_PAGE_OFFSET_MASK)
553 {
554 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart);
555 if ( pPage
556 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
557 {
558 RTGCPHYS GCPhys = (GCPhysStart + (GUEST_PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
559 if ( GCPhys > GCPhysLast
560 || GCPhys < GCPhysStart)
561 return;
562 GCPhysStart = GCPhys;
563 }
564 else
565 GCPhysStart &= X86_PTE_PAE_PG_MASK;
566 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
567 }
568
569 if (GCPhysLast & GUEST_PAGE_OFFSET_MASK)
570 {
571 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast);
572 if ( pPage
573 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
574 {
575 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
576 if ( GCPhys < GCPhysStart
577 || GCPhys > GCPhysLast)
578 return;
579 GCPhysLast = GCPhys;
580 }
581 else
582 GCPhysLast |= GUEST_PAGE_OFFSET_MASK;
583 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
584 }
585 }
586
587 /*
588 * Tell NEM.
589 */
590 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhysStart);
591 RTGCPHYS const cb = GCPhysLast - GCPhysStart + 1;
592 uint8_t u2State = UINT8_MAX;
593 NEMHCNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, cb,
594 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysStart) : NULL, &u2State);
595 if (u2State != UINT8_MAX && pRam)
596 pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhysStart - pRam->GCPhys) >> GUEST_PAGE_SHIFT],
597 cb >> GUEST_PAGE_SHIFT, u2State);
598#else
599 RT_NOREF(pVM, pCur);
600#endif
601}
602
603
604/**
605 * pgmHandlerPhysicalResetRamFlags helper that checks for other handlers on
606 * edge pages.
607 */
608DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVMCC pVM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
609{
610 /*
611 * Look for other handlers.
612 */
613 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
614 for (;;)
615 {
616 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys, fAbove);
617 if ( !pCur
618 || ((fAbove ? pCur->Core.Key : pCur->Core.KeyLast) >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
619 break;
620 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
621 uState = RT_MAX(uState, pCurType->uState);
622
623 /* next? */
624 RTGCPHYS GCPhysNext = fAbove
625 ? pCur->Core.KeyLast + 1
626 : pCur->Core.Key - 1;
627 if ((GCPhysNext >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
628 break;
629 GCPhys = GCPhysNext;
630 }
631
632 /*
633 * Update if we found something that is a higher priority state than the current.
634 */
635 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
636 {
637 PPGMPAGE pPage;
638 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint);
639 if ( RT_SUCCESS(rc)
640 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
641 {
642 /* This should normally not be necessary. */
643 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
644 bool fFlushTLBs ;
645 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, false /*fFlushPTEs*/, &fFlushTLBs);
646 if (RT_SUCCESS(rc) && fFlushTLBs)
647 PGM_INVL_ALL_VCPU_TLBS(pVM);
648 else
649 AssertRC(rc);
650
651#ifdef VBOX_WITH_NATIVE_NEM
652 /* Tell NEM about the protection update. */
653 if (VM_IS_NEM_ENABLED(pVM))
654 {
655 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
656 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
657 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
658 PGM_RAMRANGE_CALC_PAGE_R3PTR(*ppRamHint, GCPhys),
659 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
660 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
661 }
662#endif
663 }
664 else
665 AssertRC(rc);
666 }
667}
668
669
670/**
671 * Resets an aliased page.
672 *
673 * @param pVM The cross context VM structure.
674 * @param pPage The page.
675 * @param GCPhysPage The page address in case it comes in handy.
676 * @param pRam The RAM range the page is associated with (for NEM
677 * notifications).
678 * @param fDoAccounting Whether to perform accounting. (Only set during
679 * reset where pgmR3PhysRamReset doesn't have the
680 * handler structure handy.)
681 */
682void pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam, bool fDoAccounting)
683{
684 Assert( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
685 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
686 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
687#ifdef VBOX_WITH_NATIVE_NEM
688 RTHCPHYS const HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
689#endif
690
691 /*
692 * Flush any shadow page table references *first*.
693 */
694 bool fFlushTLBs = false;
695 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
696 AssertLogRelRCReturnVoid(rc);
697 HMFlushTlbOnAllVCpus(pVM);
698
699 /*
700 * Make it an MMIO/Zero page.
701 */
702 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
703 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
704 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
705 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
706 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
707
708 /* Flush its TLB entry. */
709 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
710
711 /*
712 * Do accounting for pgmR3PhysRamReset.
713 */
714 if (fDoAccounting)
715 {
716 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysPage);
717 if (RT_LIKELY(pHandler))
718 {
719 Assert(pHandler->cAliasedPages > 0);
720 pHandler->cAliasedPages--;
721 }
722 else
723 AssertFailed();
724 }
725
726#ifdef VBOX_WITH_NATIVE_NEM
727 /*
728 * Tell NEM about the protection change.
729 */
730 if (VM_IS_NEM_ENABLED(pVM))
731 {
732 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
733 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg,
734 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
735 NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State);
736 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
737 }
738#else
739 RT_NOREF(pRam);
740#endif
741}
742
743
744/**
745 * Resets ram range flags.
746 *
747 * @returns VBox status code.
748 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
749 * @param pVM The cross context VM structure.
750 * @param pCur The physical handler.
751 *
752 * @remark We don't start messing with the shadow page tables, as we've
753 * already got code in Trap0e which deals with out of sync handler
754 * flags (originally conceived for global pages).
755 */
756static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur)
757{
758 /*
759 * Iterate the guest ram pages updating the state.
760 */
761 RTUINT cPages = pCur->cPages;
762 RTGCPHYS GCPhys = pCur->Core.Key;
763 PPGMRAMRANGE pRamHint = NULL;
764 for (;;)
765 {
766 PPGMPAGE pPage;
767 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
768 if (RT_SUCCESS(rc))
769 {
770 /* Reset aliased MMIO pages to MMIO, since this aliasing is our business.
771 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
772 bool fNemNotifiedAlready = false;
773 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
774 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
775 {
776 Assert(pCur->cAliasedPages > 0);
777 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, pRamHint, false /*fDoAccounting*/);
778 pCur->cAliasedPages--;
779 fNemNotifiedAlready = true;
780 }
781#ifdef VBOX_STRICT
782 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
783 AssertMsg(pCurType && (pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage)),
784 ("%RGp %R[pgmpage]\n", GCPhys, pPage));
785#endif
786 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
787
788#ifdef VBOX_WITH_NATIVE_NEM
789 /* Tell NEM about the protection change. */
790 if (VM_IS_NEM_ENABLED(pVM) && !fNemNotifiedAlready)
791 {
792 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
793 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
794 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
795 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamHint, GCPhys),
796 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
797 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
798 }
799#endif
800 RT_NOREF(fNemNotifiedAlready);
801 }
802 else
803 AssertRC(rc);
804
805 /* next */
806 if (--cPages == 0)
807 break;
808 GCPhys += GUEST_PAGE_SIZE;
809 }
810
811 pCur->cAliasedPages = 0;
812 pCur->cTmpOffPages = 0;
813
814 /*
815 * Check for partial start and end pages.
816 */
817 if (pCur->Core.Key & GUEST_PAGE_OFFSET_MASK)
818 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Core.Key - 1, false /* fAbove */, &pRamHint);
819 if ((pCur->Core.KeyLast & GUEST_PAGE_OFFSET_MASK) != GUEST_PAGE_OFFSET_MASK)
820 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Core.KeyLast + 1, true /* fAbove */, &pRamHint);
821}
822
823
824#if 0 /* unused */
825/**
826 * Modify a physical page access handler.
827 *
828 * Modification can only be done to the range it self, not the type or anything else.
829 *
830 * @returns VBox status code.
831 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
832 * and a new registration must be performed!
833 * @param pVM The cross context VM structure.
834 * @param GCPhysCurrent Current location.
835 * @param GCPhys New location.
836 * @param GCPhysLast New last location.
837 */
838VMMDECL(int) PGMHandlerPhysicalModify(PVMCC pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
839{
840 /*
841 * Remove it.
842 */
843 int rc;
844 PGM_LOCK_VOID(pVM);
845 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
846 if (pCur)
847 {
848 /*
849 * Clear the ram flags. (We're gonna move or free it!)
850 */
851 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
852 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
853 @todo pCurType validation
854 bool const fRestoreAsRAM = pCurType->pfnHandlerR3 /** @todo this isn't entirely correct. */
855 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO;
856
857 /*
858 * Validate the new range, modify and reinsert.
859 */
860 if (GCPhysLast >= GCPhys)
861 {
862 /*
863 * We require the range to be within registered ram.
864 * There is no apparent need to support ranges which cover more than one ram range.
865 */
866 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
867 if ( pRam
868 && GCPhys <= pRam->GCPhysLast
869 && GCPhysLast >= pRam->GCPhys)
870 {
871 pCur->Core.Key = GCPhys;
872 pCur->Core.KeyLast = GCPhysLast;
873 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> GUEST_PAGE_SHIFT;
874
875 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
876 {
877 RTGCPHYS const cb = GCPhysLast - GCPhys + 1;
878 PGMPHYSHANDLERKIND const enmKind = pCurType->enmKind;
879
880 /*
881 * Set ram flags, flush shadow PT entries and finally tell REM about this.
882 */
883 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL, 0);
884
885 /** @todo NEM: not sure we need this notification... */
886 NEMHCNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb, fRestoreAsRAM);
887
888 PGM_UNLOCK(pVM);
889
890 PGM_INVL_ALL_VCPU_TLBS(pVM);
891 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
892 GCPhysCurrent, GCPhys, GCPhysLast));
893 return VINF_SUCCESS;
894 }
895
896 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
897 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
898 }
899 else
900 {
901 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
902 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
903 }
904 }
905 else
906 {
907 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
908 rc = VERR_INVALID_PARAMETER;
909 }
910
911 /*
912 * Invalid new location, flush the cache and free it.
913 * We've only gotta notify REM and free the memory.
914 */
915 if (VM_IS_NEM_ENABLED(pVM))
916 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pCur);
917 pVM->pgm.s.pLastPhysHandlerR0 = 0;
918 pVM->pgm.s.pLastPhysHandlerR3 = 0;
919 PGMHandlerPhysicalTypeRelease(pVM, pCur->hType);
920 MMHyperFree(pVM, pCur);
921 }
922 else
923 {
924 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
925 rc = VERR_PGM_HANDLER_NOT_FOUND;
926 }
927
928 PGM_UNLOCK(pVM);
929 return rc;
930}
931#endif /* unused */
932
933
934/**
935 * Changes the user callback arguments associated with a physical access handler.
936 *
937 * @returns VBox status code.
938 * @param pVM The cross context VM structure.
939 * @param GCPhys Start physical address of the handler.
940 * @param uUser User argument to the handlers.
941 */
942VMMDECL(int) PGMHandlerPhysicalChangeUserArg(PVMCC pVM, RTGCPHYS GCPhys, uint64_t uUser)
943{
944 /*
945 * Find the handler and make the change.
946 */
947 int rc;
948 PGM_LOCK_VOID(pVM);
949 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
950 if (pCur)
951 {
952 pCur->uUser = uUser;
953 rc = VINF_SUCCESS;
954 }
955 else
956 {
957 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
958 rc = VERR_PGM_HANDLER_NOT_FOUND;
959 }
960
961 PGM_UNLOCK(pVM);
962 return rc;
963}
964
965#if 0 /* unused */
966
967/**
968 * Splits a physical access handler in two.
969 *
970 * @returns VBox status code.
971 * @param pVM The cross context VM structure.
972 * @param GCPhys Start physical address of the handler.
973 * @param GCPhysSplit The split address.
974 */
975VMMDECL(int) PGMHandlerPhysicalSplit(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
976{
977 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
978
979 /*
980 * Do the allocation without owning the lock.
981 */
982 PPGMPHYSHANDLER pNew;
983 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
984 if (RT_FAILURE(rc))
985 return rc;
986
987 /*
988 * Get the handler.
989 */
990 PGM_LOCK_VOID(pVM);
991 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
992 if (RT_LIKELY(pCur))
993 {
994 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
995 {
996 /*
997 * Create new handler node for the 2nd half.
998 */
999 *pNew = *pCur;
1000 pNew->Core.Key = GCPhysSplit;
1001 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1002
1003 pCur->Core.KeyLast = GCPhysSplit - 1;
1004 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1005
1006 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
1007 {
1008 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
1009 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
1010 PGM_UNLOCK(pVM);
1011 return VINF_SUCCESS;
1012 }
1013 AssertMsgFailed(("whu?\n"));
1014 rc = VERR_PGM_PHYS_HANDLER_IPE;
1015 }
1016 else
1017 {
1018 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
1019 rc = VERR_INVALID_PARAMETER;
1020 }
1021 }
1022 else
1023 {
1024 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1025 rc = VERR_PGM_HANDLER_NOT_FOUND;
1026 }
1027 PGM_UNLOCK(pVM);
1028 MMHyperFree(pVM, pNew);
1029 return rc;
1030}
1031
1032
1033/**
1034 * Joins up two adjacent physical access handlers which has the same callbacks.
1035 *
1036 * @returns VBox status code.
1037 * @param pVM The cross context VM structure.
1038 * @param GCPhys1 Start physical address of the first handler.
1039 * @param GCPhys2 Start physical address of the second handler.
1040 */
1041VMMDECL(int) PGMHandlerPhysicalJoin(PVMCC pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
1042{
1043 /*
1044 * Get the handlers.
1045 */
1046 int rc;
1047 PGM_LOCK_VOID(pVM);
1048 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
1049 if (RT_LIKELY(pCur1))
1050 {
1051 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1052 if (RT_LIKELY(pCur2))
1053 {
1054 /*
1055 * Make sure that they are adjacent, and that they've got the same callbacks.
1056 */
1057 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
1058 {
1059 if (RT_LIKELY(pCur1->hType == pCur2->hType))
1060 {
1061 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1062 if (RT_LIKELY(pCur3 == pCur2))
1063 {
1064 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
1065 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1066 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
1067 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1068 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1069 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1070 PGMHandlerPhysicalTypeRelease(pVM, pCur2->hType);
1071 MMHyperFree(pVM, pCur2);
1072 PGM_UNLOCK(pVM);
1073 return VINF_SUCCESS;
1074 }
1075
1076 Assert(pCur3 == pCur2);
1077 rc = VERR_PGM_PHYS_HANDLER_IPE;
1078 }
1079 else
1080 {
1081 AssertMsgFailed(("mismatching handlers\n"));
1082 rc = VERR_ACCESS_DENIED;
1083 }
1084 }
1085 else
1086 {
1087 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
1088 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1089 rc = VERR_INVALID_PARAMETER;
1090 }
1091 }
1092 else
1093 {
1094 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
1095 rc = VERR_PGM_HANDLER_NOT_FOUND;
1096 }
1097 }
1098 else
1099 {
1100 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
1101 rc = VERR_PGM_HANDLER_NOT_FOUND;
1102 }
1103 PGM_UNLOCK(pVM);
1104 return rc;
1105
1106}
1107
1108#endif /* unused */
1109
1110/**
1111 * Resets any modifications to individual pages in a physical page access
1112 * handler region.
1113 *
1114 * This is used in pair with PGMHandlerPhysicalPageTempOff(),
1115 * PGMHandlerPhysicalPageAliasMmio2() or PGMHandlerPhysicalPageAliasHC().
1116 *
1117 * @returns VBox status code.
1118 * @param pVM The cross context VM structure.
1119 * @param GCPhys The start address of the handler regions, i.e. what you
1120 * passed to PGMR3HandlerPhysicalRegister(),
1121 * PGMHandlerPhysicalRegisterEx() or
1122 * PGMHandlerPhysicalModify().
1123 */
1124VMMDECL(int) PGMHandlerPhysicalReset(PVMCC pVM, RTGCPHYS GCPhys)
1125{
1126 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
1127 PGM_LOCK_VOID(pVM);
1128
1129 /*
1130 * Find the handler.
1131 */
1132 int rc;
1133 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1134 if (RT_LIKELY(pCur))
1135 {
1136 /*
1137 * Validate kind.
1138 */
1139 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1140 switch (pCurType->enmKind)
1141 {
1142 case PGMPHYSHANDLERKIND_WRITE:
1143 case PGMPHYSHANDLERKIND_ALL:
1144 case PGMPHYSHANDLERKIND_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
1145 {
1146 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset)); /** @todo move out of switch */
1147 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1148 Assert(pRam);
1149 Assert(pRam->GCPhys <= pCur->Core.Key);
1150 Assert(pRam->GCPhysLast >= pCur->Core.KeyLast);
1151
1152 if (pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO)
1153 {
1154 /*
1155 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
1156 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
1157 * to do that now...
1158 */
1159 if (pCur->cAliasedPages)
1160 {
1161 PPGMPAGE pPage = &pRam->aPages[(pCur->Core.Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
1162 RTGCPHYS GCPhysPage = pCur->Core.Key;
1163 uint32_t cLeft = pCur->cPages;
1164 while (cLeft-- > 0)
1165 {
1166 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1167 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1168 {
1169 Assert(pCur->cAliasedPages > 0);
1170 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam, false /*fDoAccounting*/);
1171 --pCur->cAliasedPages;
1172#ifndef VBOX_STRICT
1173 if (pCur->cAliasedPages == 0)
1174 break;
1175#endif
1176 }
1177 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
1178 GCPhysPage += GUEST_PAGE_SIZE;
1179 pPage++;
1180 }
1181 Assert(pCur->cAliasedPages == 0);
1182 }
1183 }
1184 else if (pCur->cTmpOffPages > 0)
1185 {
1186 /*
1187 * Set the flags and flush shadow PT entries.
1188 */
1189 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
1190 }
1191
1192 pCur->cAliasedPages = 0;
1193 pCur->cTmpOffPages = 0;
1194
1195 rc = VINF_SUCCESS;
1196 break;
1197 }
1198
1199 /*
1200 * Invalid.
1201 */
1202 default:
1203 AssertMsgFailed(("Invalid type %d/%#x! Corruption!\n", pCurType->enmKind, pCur->hType));
1204 rc = VERR_PGM_PHYS_HANDLER_IPE;
1205 break;
1206 }
1207 }
1208 else
1209 {
1210 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1211 rc = VERR_PGM_HANDLER_NOT_FOUND;
1212 }
1213
1214 PGM_UNLOCK(pVM);
1215 return rc;
1216}
1217
1218
1219/**
1220 * Special version of PGMHandlerPhysicalReset used by MMIO2 w/ dirty page
1221 * tracking.
1222 *
1223 * @returns VBox status code.
1224 * @param pVM The cross context VM structure.
1225 * @param GCPhys The start address of the handler region.
1226 * @param pvBitmap Dirty bitmap. Caller has cleared this already, only
1227 * dirty bits will be set. Caller also made sure it's big
1228 * enough.
1229 * @param offBitmap Dirty bitmap offset.
1230 * @remarks Caller must own the PGM critical section.
1231 */
1232DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap)
1233{
1234 LogFlow(("pgmHandlerPhysicalResetMmio2WithBitmap GCPhys=%RGp\n", GCPhys));
1235 PGM_LOCK_ASSERT_OWNER(pVM);
1236
1237 /*
1238 * Find the handler.
1239 */
1240 int rc;
1241 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1242 if (RT_LIKELY(pCur))
1243 {
1244 /*
1245 * Validate kind.
1246 */
1247 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1248 if ( pCurType
1249 && pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE)
1250 {
1251 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset));
1252
1253 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1254 Assert(pRam);
1255 Assert(pRam->GCPhys <= pCur->Core.Key);
1256 Assert(pRam->GCPhysLast >= pCur->Core.KeyLast);
1257
1258 /*
1259 * Set the flags and flush shadow PT entries.
1260 */
1261 if (pCur->cTmpOffPages > 0)
1262 {
1263 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, pvBitmap, offBitmap);
1264 pCur->cTmpOffPages = 0;
1265 }
1266 else
1267 rc = VINF_SUCCESS;
1268 }
1269 else
1270 {
1271 AssertFailed();
1272 rc = VERR_WRONG_TYPE;
1273 }
1274 }
1275 else
1276 {
1277 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1278 rc = VERR_PGM_HANDLER_NOT_FOUND;
1279 }
1280
1281 return rc;
1282}
1283
1284
1285/**
1286 * Temporarily turns off the access monitoring of a page within a monitored
1287 * physical write/all page access handler region.
1288 *
1289 * Use this when no further \#PFs are required for that page. Be aware that
1290 * a page directory sync might reset the flags, and turn on access monitoring
1291 * for the page.
1292 *
1293 * The caller must do required page table modifications.
1294 *
1295 * @returns VBox status code.
1296 * @param pVM The cross context VM structure.
1297 * @param GCPhys The start address of the access handler. This
1298 * must be a fully page aligned range or we risk
1299 * messing up other handlers installed for the
1300 * start and end pages.
1301 * @param GCPhysPage The physical address of the page to turn off
1302 * access monitoring for.
1303 */
1304VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
1305{
1306 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhysPage=%RGp\n", GCPhysPage));
1307 PGM_LOCK_VOID(pVM);
1308
1309 /*
1310 * Validate the range.
1311 */
1312 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1313 if (RT_LIKELY(pCur))
1314 {
1315 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1316 && GCPhysPage <= pCur->Core.KeyLast))
1317 {
1318 Assert(!(pCur->Core.Key & GUEST_PAGE_OFFSET_MASK));
1319 Assert((pCur->Core.KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
1320
1321 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1322 AssertReturnStmt( pCurType
1323 && ( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1324 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL),
1325 PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1326
1327 /*
1328 * Change the page status.
1329 */
1330 PPGMPAGE pPage;
1331 PPGMRAMRANGE pRam;
1332 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1333 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1334 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1335 {
1336 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1337 pCur->cTmpOffPages++;
1338
1339#ifdef VBOX_WITH_NATIVE_NEM
1340 /* Tell NEM about the protection change (VGA is using this to track dirty pages). */
1341 if (VM_IS_NEM_ENABLED(pVM))
1342 {
1343 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1344 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1345 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
1346 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1347 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1348 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1349 }
1350#endif
1351 }
1352 PGM_UNLOCK(pVM);
1353 return VINF_SUCCESS;
1354 }
1355 PGM_UNLOCK(pVM);
1356 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1357 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1358 return VERR_INVALID_PARAMETER;
1359 }
1360 PGM_UNLOCK(pVM);
1361 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1362 return VERR_PGM_HANDLER_NOT_FOUND;
1363}
1364
1365
1366/**
1367 * Resolves an MMIO2 page.
1368 *
1369 * Caller as taken the PGM lock.
1370 *
1371 * @returns Pointer to the page if valid, NULL otherwise
1372 * @param pVM The cross context VM structure.
1373 * @param pDevIns The device owning it.
1374 * @param hMmio2 The MMIO2 region.
1375 * @param offMmio2Page The offset into the region.
1376 */
1377static PPGMPAGE pgmPhysResolveMmio2PageLocked(PVMCC pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2Page)
1378{
1379 /* Only works if the handle is in the handle table! */
1380 AssertReturn(hMmio2 != 0, NULL);
1381 hMmio2--;
1382
1383 /* Must check the first one for PGMREGMMIO2RANGE_F_FIRST_CHUNK. */
1384 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1385 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1386 AssertReturn(pCur, NULL);
1387 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
1388
1389 /* Loop thru the sub-ranges till we find the one covering offMmio2. */
1390 for (;;)
1391 {
1392#ifdef IN_RING3
1393 AssertReturn(pCur->pDevInsR3 == pDevIns, NULL);
1394#else
1395 AssertReturn(pCur->pDevInsR3 == pDevIns->pDevInsForR3, NULL);
1396#endif
1397
1398 /* Does it match the offset? */
1399 if (offMmio2Page < pCur->cbReal)
1400 return &pCur->RamRange.aPages[offMmio2Page >> GUEST_PAGE_SHIFT];
1401
1402 /* Advance if we can. */
1403 AssertReturn(!(pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), NULL);
1404 offMmio2Page -= pCur->cbReal;
1405 hMmio2++;
1406 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1407 pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1408 AssertReturn(pCur, NULL);
1409 }
1410}
1411
1412
1413/**
1414 * Replaces an MMIO page with an MMIO2 page.
1415 *
1416 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1417 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1418 * backing, the caller must provide a replacement page. For various reasons the
1419 * replacement page must be an MMIO2 page.
1420 *
1421 * The caller must do required page table modifications. You can get away
1422 * without making any modifications since it's an MMIO page, the cost is an extra
1423 * \#PF which will the resync the page.
1424 *
1425 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1426 *
1427 * The caller may still get handler callback even after this call and must be
1428 * able to deal correctly with such calls. The reason for these callbacks are
1429 * either that we're executing in the recompiler (which doesn't know about this
1430 * arrangement) or that we've been restored from saved state (where we won't
1431 * save the change).
1432 *
1433 * @returns VBox status code.
1434 * @param pVM The cross context VM structure.
1435 * @param GCPhys The start address of the access handler. This
1436 * must be a fully page aligned range or we risk
1437 * messing up other handlers installed for the
1438 * start and end pages.
1439 * @param GCPhysPage The physical address of the page to turn off
1440 * access monitoring for and replace with the MMIO2
1441 * page.
1442 * @param pDevIns The device instance owning @a hMmio2.
1443 * @param hMmio2 Handle to the MMIO2 region containing the page
1444 * to remap in the the MMIO page at @a GCPhys.
1445 * @param offMmio2PageRemap The offset into @a hMmio2 of the MMIO2 page that
1446 * should serve as backing memory.
1447 *
1448 * @remark May cause a page pool flush if used on a page that is already
1449 * aliased.
1450 *
1451 * @note This trick does only work reliably if the two pages are never ever
1452 * mapped in the same page table. If they are the page pool code will
1453 * be confused should either of them be flushed. See the special case
1454 * of zero page aliasing mentioned in #3170.
1455 *
1456 */
1457VMMDECL(int) PGMHandlerPhysicalPageAliasMmio2(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage,
1458 PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2PageRemap)
1459{
1460#ifdef VBOX_WITH_PGM_NEM_MODE
1461 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1462#endif
1463 PGM_LOCK_VOID(pVM);
1464
1465 /*
1466 * Resolve the MMIO2 reference.
1467 */
1468 PPGMPAGE pPageRemap = pgmPhysResolveMmio2PageLocked(pVM, pDevIns, hMmio2, offMmio2PageRemap);
1469 if (RT_LIKELY(pPageRemap))
1470 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1471 ("hMmio2=%RU64 offMmio2PageRemap=%RGp %R[pgmpage]\n", hMmio2, offMmio2PageRemap, pPageRemap),
1472 PGM_UNLOCK(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1473 else
1474 {
1475 PGM_UNLOCK(pVM);
1476 return VERR_OUT_OF_RANGE;
1477 }
1478
1479 /*
1480 * Lookup and validate the range.
1481 */
1482 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1483 if (RT_LIKELY(pCur))
1484 {
1485 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1486 && GCPhysPage <= pCur->Core.KeyLast))
1487 {
1488 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1489 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1490 AssertReturnStmt(!(pCur->Core.Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1491 AssertReturnStmt((pCur->Core.KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1492 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1493
1494 /*
1495 * Validate the page.
1496 */
1497 PPGMPAGE pPage;
1498 PPGMRAMRANGE pRam;
1499 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1500 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1501 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1502 {
1503 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1504 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1505 VERR_PGM_PHYS_NOT_MMIO2);
1506 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1507 {
1508 PGM_UNLOCK(pVM);
1509 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1510 }
1511
1512 /*
1513 * The page is already mapped as some other page, reset it
1514 * to an MMIO/ZERO page before doing the new mapping.
1515 */
1516 Log(("PGMHandlerPhysicalPageAliasMmio2: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1517 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1518 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam, false /*fDoAccounting*/);
1519 pCur->cAliasedPages--;
1520 }
1521 Assert(PGM_PAGE_IS_ZERO(pPage));
1522
1523 /*
1524 * Do the actual remapping here.
1525 * This page now serves as an alias for the backing memory specified.
1526 */
1527 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: %RGp (%R[pgmpage]) alias for %RU64/%RGp (%R[pgmpage])\n",
1528 GCPhysPage, pPage, hMmio2, offMmio2PageRemap, pPageRemap ));
1529 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1530 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1531 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1532 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1533 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1534 pCur->cAliasedPages++;
1535 Assert(pCur->cAliasedPages <= pCur->cPages);
1536
1537 /* Flush its TLB entry. */
1538 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1539
1540#ifdef VBOX_WITH_NATIVE_NEM
1541 /* Tell NEM about the backing and protection change. */
1542 if (VM_IS_NEM_ENABLED(pVM))
1543 {
1544 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1545 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1546 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1547 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO),
1548 PGMPAGETYPE_MMIO2_ALIAS_MMIO, &u2State);
1549 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1550 }
1551#endif
1552 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: => %R[pgmpage]\n", pPage));
1553 PGM_UNLOCK(pVM);
1554 return VINF_SUCCESS;
1555 }
1556
1557 PGM_UNLOCK(pVM);
1558 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1559 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1560 return VERR_INVALID_PARAMETER;
1561 }
1562
1563 PGM_UNLOCK(pVM);
1564 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1565 return VERR_PGM_HANDLER_NOT_FOUND;
1566}
1567
1568
1569/**
1570 * Replaces an MMIO page with an arbitrary HC page in the shadow page tables.
1571 *
1572 * This differs from PGMHandlerPhysicalPageAliasMmio2 in that the page doesn't
1573 * need to be a known MMIO2 page and that only shadow paging may access the
1574 * page. The latter distinction is important because the only use for this
1575 * feature is for mapping the special APIC access page that VT-x uses to detect
1576 * APIC MMIO operations, the page is shared between all guest CPUs and actually
1577 * not written to. At least at the moment.
1578 *
1579 * The caller must do required page table modifications. You can get away
1580 * without making any modifications since it's an MMIO page, the cost is an extra
1581 * \#PF which will the resync the page.
1582 *
1583 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1584 *
1585 *
1586 * @returns VBox status code.
1587 * @param pVM The cross context VM structure.
1588 * @param GCPhys The start address of the access handler. This
1589 * must be a fully page aligned range or we risk
1590 * messing up other handlers installed for the
1591 * start and end pages.
1592 * @param GCPhysPage The physical address of the page to turn off
1593 * access monitoring for.
1594 * @param HCPhysPageRemap The physical address of the HC page that
1595 * serves as backing memory.
1596 *
1597 * @remark May cause a page pool flush if used on a page that is already
1598 * aliased.
1599 */
1600VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1601{
1602/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1603#ifdef VBOX_WITH_PGM_NEM_MODE
1604 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1605#endif
1606 PGM_LOCK_VOID(pVM);
1607
1608 /*
1609 * Lookup and validate the range.
1610 */
1611 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1612 if (RT_LIKELY(pCur))
1613 {
1614 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1615 && GCPhysPage <= pCur->Core.KeyLast))
1616 {
1617 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1618 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1619 AssertReturnStmt(!(pCur->Core.Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1620 AssertReturnStmt((pCur->Core.KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1621 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1622
1623 /*
1624 * Get and validate the pages.
1625 */
1626 PPGMPAGE pPage;
1627 int rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
1628 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1629 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1630 {
1631 PGM_UNLOCK(pVM);
1632 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
1633 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1634 VERR_PGM_PHYS_NOT_MMIO2);
1635 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1636 }
1637 Assert(PGM_PAGE_IS_ZERO(pPage));
1638
1639 /*
1640 * Do the actual remapping here.
1641 * This page now serves as an alias for the backing memory
1642 * specified as far as shadow paging is concerned.
1643 */
1644 LogFlow(("PGMHandlerPhysicalPageAliasHC: %RGp (%R[pgmpage]) alias for %RHp\n",
1645 GCPhysPage, pPage, HCPhysPageRemap));
1646 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
1647 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
1648 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1649 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
1650 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1651 pCur->cAliasedPages++;
1652 Assert(pCur->cAliasedPages <= pCur->cPages);
1653
1654 /* Flush its TLB entry. */
1655 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1656
1657#ifdef VBOX_WITH_NATIVE_NEM
1658 /* Tell NEM about the backing and protection change. */
1659 if (VM_IS_NEM_ENABLED(pVM))
1660 {
1661 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
1662 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1663 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1664 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1665 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO),
1666 PGMPAGETYPE_SPECIAL_ALIAS_MMIO, &u2State);
1667 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1668 }
1669#endif
1670 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
1671 PGM_UNLOCK(pVM);
1672 return VINF_SUCCESS;
1673 }
1674 PGM_UNLOCK(pVM);
1675 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1676 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1677 return VERR_INVALID_PARAMETER;
1678 }
1679 PGM_UNLOCK(pVM);
1680
1681 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1682 return VERR_PGM_HANDLER_NOT_FOUND;
1683}
1684
1685
1686/**
1687 * Checks if a physical range is handled
1688 *
1689 * @returns boolean
1690 * @param pVM The cross context VM structure.
1691 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1692 * @remarks Caller must take the PGM lock...
1693 * @thread EMT.
1694 */
1695VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVMCC pVM, RTGCPHYS GCPhys)
1696{
1697 /*
1698 * Find the handler.
1699 */
1700 PGM_LOCK_VOID(pVM);
1701 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
1702 if (pCur)
1703 {
1704#ifdef VBOX_STRICT
1705 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1706 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1707 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1708 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1709 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO);
1710#endif
1711 PGM_UNLOCK(pVM);
1712 return true;
1713 }
1714 PGM_UNLOCK(pVM);
1715 return false;
1716}
1717
1718
1719/**
1720 * Checks if it's an disabled all access handler or write access handler at the
1721 * given address.
1722 *
1723 * @returns true if it's an all access handler, false if it's a write access
1724 * handler.
1725 * @param pVM The cross context VM structure.
1726 * @param GCPhys The address of the page with a disabled handler.
1727 *
1728 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
1729 */
1730bool pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys)
1731{
1732 PGM_LOCK_VOID(pVM);
1733 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
1734 if (!pCur)
1735 {
1736 PGM_UNLOCK(pVM);
1737 AssertFailed();
1738 return true;
1739 }
1740
1741 /* Only whole pages can be disabled. */
1742 Assert( pCur->Core.Key <= (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK)
1743 && pCur->Core.KeyLast >= (GCPhys | GUEST_PAGE_OFFSET_MASK));
1744
1745 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1746 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1747 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1748 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO); /* sanity */
1749 bool const fRet = pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE;
1750 PGM_UNLOCK(pVM);
1751 return fRet;
1752}
1753
1754#ifdef VBOX_STRICT
1755
1756/**
1757 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1758 * and its AVL enumerators.
1759 */
1760typedef struct PGMAHAFIS
1761{
1762 /** The current physical address. */
1763 RTGCPHYS GCPhys;
1764 /** Number of errors. */
1765 unsigned cErrors;
1766 /** Pointer to the VM. */
1767 PVM pVM;
1768} PGMAHAFIS, *PPGMAHAFIS;
1769
1770
1771/**
1772 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1773 * that the physical addresses associated with virtual handlers are correct.
1774 *
1775 * @returns Number of mismatches.
1776 * @param pVM The cross context VM structure.
1777 */
1778VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVMCC pVM)
1779{
1780 PPGM pPGM = &pVM->pgm.s;
1781 PGMAHAFIS State;
1782 State.GCPhys = 0;
1783 State.cErrors = 0;
1784 State.pVM = pVM;
1785
1786 PGM_LOCK_ASSERT_OWNER(pVM);
1787
1788 /*
1789 * Check the RAM flags against the handlers.
1790 */
1791 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
1792 {
1793 const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
1794 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1795 {
1796 PGMPAGE const *pPage = &pRam->aPages[iPage];
1797 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1798 {
1799 State.GCPhys = pRam->GCPhys + (iPage << GUEST_PAGE_SHIFT);
1800
1801 /*
1802 * Physical first - calculate the state based on the handlers
1803 * active on the page, then compare.
1804 */
1805 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
1806 {
1807 /* the first */
1808 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys);
1809 if (!pPhys)
1810 {
1811 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys, true);
1812 if ( pPhys
1813 && pPhys->Core.Key > (State.GCPhys + GUEST_PAGE_SIZE - 1))
1814 pPhys = NULL;
1815 Assert(!pPhys || pPhys->Core.Key >= State.GCPhys);
1816 }
1817 if (pPhys)
1818 {
1819 PCPGMPHYSHANDLERTYPEINT pPhysType = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys->hType);
1820 unsigned uState = pPhysType->uState;
1821
1822 /* more? */
1823 while (pPhys->Core.KeyLast < (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1824 {
1825 PPGMPHYSHANDLER pPhys2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers,
1826 pPhys->Core.KeyLast + 1, true);
1827 if ( !pPhys2
1828 || pPhys2->Core.Key > (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1829 break;
1830 PCPGMPHYSHANDLERTYPEINT pPhysType2 = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys2->hType);
1831 uState = RT_MAX(uState, pPhysType2->uState);
1832 pPhys = pPhys2;
1833 }
1834
1835 /* compare.*/
1836 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1837 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1838 {
1839 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1840 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhysType->pszDesc));
1841 State.cErrors++;
1842 }
1843 }
1844 else
1845 {
1846 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1847 State.cErrors++;
1848 }
1849 }
1850 }
1851 } /* foreach page in ram range. */
1852 } /* foreach ram range. */
1853
1854 /*
1855 * Do the reverse check for physical handlers.
1856 */
1857 /** @todo */
1858
1859 return State.cErrors;
1860}
1861
1862#endif /* VBOX_STRICT */
1863
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette