VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 1272

Last change on this file since 1272 was 138, checked in by vboxsync, 18 years ago

64-bit

  • Property svn:keywords set to Id
File size: 40.2 KB
Line 
1/* $Id: PGMAllHandler.cpp 138 2007-01-18 14:59:18Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/dbgf.h>
28#include <VBox/pgm.h>
29#include <VBox/iom.h>
30#include <VBox/mm.h>
31#include <VBox/em.h>
32#include <VBox/stam.h>
33#include <VBox/rem.h>
34#include <VBox/dbgf.h>
35#include <VBox/rem.h>
36#include "PGMInternal.h"
37#include <VBox/vm.h>
38
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <VBox/selm.h>
46
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51DECLINLINE(unsigned) pgmHandlerPhysicalCalcFlags(PPGMPHYSHANDLER pCur);
52static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam);
53static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur);
54static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur);
55
56
57
58/**
59 * Register a access handler for a physical range.
60 *
61 * @returns VBox status code.
62 * @retval VINF_SUCCESS when successfully installed.
63 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
64 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
65 * flagged together with a pool clearing.
66 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
67 * one. A debug assertion is raised.
68 *
69 * @param pVM VM Handle.
70 * @param enmType Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
71 * @param GCPhys Start physical address.
72 * @param GCPhysLast Last physical address. (inclusive)
73 * @param pfnHandlerR3 The R3 handler.
74 * @param pvUserR3 User argument to the R3 handler.
75 * @param pfnHandlerR0 The R0 handler.
76 * @param pvUserR0 User argument to the R0 handler.
77 * @param pfnHandlerGC The GC handler.
78 * @param pvUserGC User argument to the GC handler.
79 * This must be a GC pointer because it will be relocated!
80 * @param pszDesc Pointer to description string. This must not be freed.
81 */
82PGMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
83 HCPTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTHCPTR pvUserR3,
84 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTHCPTR pvUserR0,
85 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
86 HCPTRTYPE(const char *) pszDesc)
87{
88 Log(("PGMHandlerPhysicalRegisterEx: enmType=%d GCPhys=%VGp GCPhysLast=%VGp pfnHandlerR3=%VHv pvUserR3=%VHv pfnHandlerR0=%VHv pvUserR0=%VHv pfnHandlerGC=%VGv pvUserGC=%VGv pszDesc=%s\n",
89 enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3, pfnHandlerR0, pvUserR0, pfnHandlerGC, pvUserGC, HCSTRING(pszDesc)));
90
91 /*
92 * Validate input.
93 */
94 if (GCPhys >= GCPhysLast)
95 {
96 AssertMsgFailed(("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast));
97 return VERR_INVALID_PARAMETER;
98 }
99 switch (enmType)
100 {
101 case PGMPHYSHANDLERTYPE_MMIO:
102 case PGMPHYSHANDLERTYPE_PHYSICAL:
103 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
104 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
105 break;
106 default:
107 AssertMsgFailed(("Invalid input enmType=%d!\n", enmType));
108 return VERR_INVALID_PARAMETER;
109 }
110 if (!pfnHandlerGC)
111 {
112 AssertMsgFailed(("!pfnHandlerGC\n"));
113 return VERR_INVALID_PARAMETER;
114 }
115 if ( (RTGCUINTPTR)pvUserGC >= 0x10000
116 && MMHyperHC2GC(pVM, MMHyperGC2HC(pVM, pvUserGC)) != pvUserGC)
117 {
118 AssertMsgFailed(("Not GC pointer! pvUserGC=%VGv\n", pvUserGC));
119 return VERR_INVALID_PARAMETER;
120 }
121
122 /*
123 * We require the range to be within registered ram.
124 * There is no apparent need to support ranges which cover more than one ram range.
125 */
126 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
127 while (pRam && GCPhys > pRam->GCPhysLast)
128 pRam = CTXSUFF(pRam->pNext);
129 if ( !pRam
130 || GCPhysLast < pRam->GCPhys
131 || GCPhys > pRam->GCPhysLast)
132 {
133#ifdef IN_RING3
134 /*
135 * If this is an MMIO registration, we'll just add a range for it.
136 */
137 if ( enmType == PGMPHYSHANDLERTYPE_MMIO
138 && ( !pRam
139 || GCPhysLast < pRam->GCPhys)
140 )
141 {
142 size_t cb = GCPhysLast - GCPhys + 1;
143 Assert(cb == RT_ALIGN_Z(cb, PAGE_SIZE));
144 int rc = PGMR3PhysRegister(pVM, NULL, GCPhys, cb, MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO, NULL, pszDesc);
145 if (VBOX_FAILURE(rc))
146 return rc;
147
148 /* search again. */
149 pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
150 while (pRam && GCPhys > pRam->GCPhysLast)
151 pRam = CTXSUFF(pRam->pNext);
152 }
153
154 if ( !pRam
155 || GCPhysLast < pRam->GCPhys
156 || GCPhys > pRam->GCPhysLast)
157#endif /* IN_RING3 */
158 {
159#ifdef IN_RING3
160 DBGFR3Info(pVM, "phys", NULL, NULL);
161#endif
162 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
163 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
164 }
165 }
166
167 /*
168 * Allocate and initialize the new entry.
169 */
170 PPGMPHYSHANDLER pNew;
171 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
172 if (VBOX_FAILURE(rc))
173 return rc;
174
175 pNew->Core.Key = GCPhys;
176 pNew->Core.KeyLast = GCPhysLast;
177 pNew->enmType = enmType;
178 pNew->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
179 pNew->pfnHandlerR3 = pfnHandlerR3;
180 pNew->pvUserR3 = pvUserR3;
181 pNew->pfnHandlerR0 = pfnHandlerR0;
182 pNew->pvUserR0 = pvUserR0;
183 pNew->pfnHandlerGC = pfnHandlerGC;
184 pNew->pvUserGC = pvUserGC;
185 pNew->pszDesc = pszDesc;
186
187 pgmLock(pVM);
188
189 /*
190 * Try insert into list.
191 */
192 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pNew->Core))
193 {
194 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam);
195 if (rc == VINF_PGM_GCPHYS_ALIASED)
196 {
197 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
198 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
199 }
200 pVM->pgm.s.fPhysCacheFlushPending = true;
201#ifndef IN_RING3
202 REMNotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
203#else
204 REMR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
205#endif
206 pgmUnlock(pVM);
207 if (rc != VINF_SUCCESS)
208 Log(("PGMHandlerPhysicalRegisterEx: returns %Vrc (%VGp-%VGp)\n", rc, GCPhys, GCPhysLast));
209 return rc;
210 }
211 pgmUnlock(pVM);
212
213#if defined(IN_RING3) && defined(VBOX_STRICT)
214 DBGFR3Info(pVM, "handlers", "phys nostats", NULL);
215#endif
216 AssertMsgFailed(("Conflict! GCPhys=%VGp GCPhysLast=%VGp pszDesc=%s\n", GCPhys, GCPhysLast, pszDesc));
217 MMHyperFree(pVM, pNew);
218 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
219}
220
221
222/**
223 * Sets ram range flags and attempts updating shadow PTs.
224 *
225 * @returns VBox status code.
226 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
227 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
228 * the guest page aliased or/and mapped by multiple PTs.
229 * @param pVM The VM handle.
230 * @param pCur The physical handler.
231 */
232static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
233{
234 /*
235 * Iterate the guest ram pages updating the flags and flushing PT entries
236 * mapping the page.
237 */
238 bool fFlushTLBs = false;
239#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE)
240 int rc = VINF_SUCCESS;
241#else
242 const int rc = VINF_PGM_GCPHYS_ALIASED;
243#endif
244 const unsigned fFlags = pgmHandlerPhysicalCalcFlags(pCur); Assert(!(fFlags & X86_PTE_PAE_PG_MASK));
245 RTUINT cPages = pCur->cPages;
246 RTUINT i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
247 for (;;)
248 {
249 /* Physical chunk in dynamically allocated range not present? */
250 if (RT_UNLIKELY(!(pRam->aHCPhys[i] & X86_PTE_PAE_PG_MASK)))
251 {
252 RTGCPHYS GCPhys = pRam->GCPhys + (i << PAGE_SHIFT);
253#ifdef IN_RING3
254 int rc2 = pgmr3PhysGrowRange(pVM, GCPhys);
255#else
256 int rc2 = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
257#endif
258 if (rc2 != VINF_SUCCESS)
259 return rc2;
260 }
261
262 if ((pRam->aHCPhys[i] & fFlags) != fFlags)
263 {
264 pRam->aHCPhys[i] |= fFlags;
265
266 Assert(pRam->aHCPhys[i] & X86_PTE_PAE_PG_MASK);
267
268#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
269 /* This code also makes ASSUMPTIONS about the cRefs and stuff. */
270 Assert(MM_RAM_FLAGS_IDX_SHIFT < MM_RAM_FLAGS_CREFS_SHIFT);
271 const uint16_t u16 = pRam->aHCPhys[i] >> MM_RAM_FLAGS_IDX_SHIFT;
272 if (u16)
273 {
274 if ((u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) != MM_RAM_FLAGS_CREFS_PHYSEXT)
275 pgmPoolTrackFlushGCPhysPT(pVM,
276 &pRam->aHCPhys[i],
277 u16 & MM_RAM_FLAGS_IDX_MASK,
278 u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT));
279 else if (u16 != ((MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) | MM_RAM_FLAGS_IDX_OVERFLOWED))
280 pgmPoolTrackFlushGCPhysPTs(pVM, &pRam->aHCPhys[i], u16 & MM_RAM_FLAGS_IDX_MASK);
281 else
282 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, &pRam->aHCPhys[i]);
283 fFlushTLBs = true;
284 }
285#elif defined(PGMPOOL_WITH_CACHE)
286 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, &pRam->aHCPhys[i]);
287 fFlushTLBs = true;
288#endif
289 }
290
291 /* next */
292 if (--cPages == 0)
293 break;
294 i++;
295 }
296
297 if (fFlushTLBs && rc == VINF_SUCCESS)
298 {
299 PGM_INVL_GUEST_TLBS();
300 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs\n"));
301 }
302 else
303 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Vrc\n", rc));
304 return rc;
305}
306
307
308/**
309 * Register a physical page access handler.
310 *
311 * @returns VBox status code.
312 * @param pVM VM Handle.
313 * @param GCPhys Start physical address.
314 */
315PGMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
316{
317 /*
318 * Find the handler.
319 */
320 pgmLock(pVM);
321 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
322 if (pCur)
323 {
324 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %#VGp-%#VGp %s\n",
325 pCur->Core.Key, pCur->Core.KeyLast, HCSTRING(pCur->pszDesc)));
326
327 /*
328 * Clear the page bits and notify the REM about this change.
329 */
330 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
331 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
332 pgmUnlock(pVM);
333 MMHyperFree(pVM, pCur);
334 return VINF_SUCCESS;
335 }
336 pgmUnlock(pVM);
337
338 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
339 return VERR_PGM_HANDLER_NOT_FOUND;
340}
341
342
343/**
344 * Shared code with modify.
345 */
346static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur)
347{
348 RTGCPHYS GCPhysStart = pCur->Core.Key;
349 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
350
351 /*
352 * Page align the range.
353 */
354 if ( (pCur->Core.Key & PAGE_OFFSET_MASK)
355 || ((pCur->Core.KeyLast + 1) & PAGE_OFFSET_MASK))
356 {
357 if (GCPhysStart & PAGE_OFFSET_MASK)
358 {
359 if (PGMRamTestFlags(&pVM->pgm.s, GCPhysStart, MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_TEMP_OFF))
360 {
361 RTGCPHYS GCPhys = (GCPhysStart + (PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
362 if ( GCPhys > GCPhysLast
363 || GCPhys < GCPhysStart)
364 return;
365 GCPhysStart = GCPhys;
366 }
367 else
368 GCPhysStart = GCPhysStart & X86_PTE_PAE_PG_MASK;
369 }
370 if (GCPhysLast & PAGE_OFFSET_MASK)
371 {
372 if (PGMRamTestFlags(&pVM->pgm.s, GCPhysLast, MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_TEMP_OFF))
373 {
374 RTGCPHYS GCPhys = (GCPhysStart & X86_PTE_PAE_PG_MASK) - 1;
375 if ( GCPhys < GCPhysStart
376 || GCPhys > GCPhysLast)
377 return;
378 GCPhysLast = GCPhys;
379 }
380 else
381 GCPhysLast += PAGE_SIZE - 1 - (GCPhysLast & PAGE_OFFSET_MASK);
382 }
383 }
384
385 /*
386 * Tell REM.
387 */
388 RTHCPTR pvRange = 0;
389 if (pCur->pfnHandlerR3 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO)
390 PGMRamGCPhys2HCPtr(&pVM->pgm.s, GCPhysStart, &pvRange); /* ASSUMES it doesn't change pvRange on failure. */
391#ifndef IN_RING3
392 REMNotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, pvRange);
393#else
394 REMR3NotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, pvRange);
395#endif
396}
397
398
399/**
400 * Resets ram range flags.
401 *
402 * @returns VBox status code.
403 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
404 * @param pVM The VM handle.
405 * @param pCur The physical handler.
406 *
407 * @remark We don't start messing with the shadow page tables, as we've already got code
408 * in Trap0e which deals with out of sync handler flags (originally conceived for
409 * global pages).
410 */
411static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur)
412{
413 /*
414 * Iterate the guest ram pages updating the flags and flushing PT entries
415 * mapping the page.
416 */
417 RTUINT cPages = pCur->cPages;
418 RTGCPHYS GCPhys = pCur->Core.Key;
419 PPGMRAMRANGE pRamHint = NULL;
420 PPGM pPGM = &pVM->pgm.s;
421 for (;;)
422 {
423 PGMRamFlagsClearByGCPhysWithHint(pPGM, GCPhys,
424 MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL,
425 &pRamHint);
426 /* next */
427 if (--cPages == 0)
428 break;
429 GCPhys += PAGE_SIZE;
430 }
431
432 /*
433 * Check for partial start page.
434 */
435 if (pCur->Core.Key & PAGE_OFFSET_MASK)
436 {
437 RTGCPHYS GCPhys = pCur->Core.Key - 1;
438 for (;;)
439 {
440 PPGMPHYSHANDLER pBelow = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys, false);
441 if ( !pBelow
442 || (pBelow->Core.KeyLast >> PAGE_SHIFT) != (pCur->Core.Key >> PAGE_SHIFT))
443 break;
444 PGMRamFlagsSetByGCPhysWithHint(pPGM, GCPhys, pgmHandlerPhysicalCalcFlags(pCur), &pRamHint);
445
446 /* next? */
447 if ( (pBelow->Core.Key >> PAGE_SHIFT) != (pCur->Core.Key >> PAGE_SHIFT)
448 || !(pBelow->Core.Key & PAGE_OFFSET_MASK))
449 break;
450 GCPhys = pBelow->Core.Key - 1;
451 }
452 }
453
454 /*
455 * Check for partial end page.
456 */
457 if ((pCur->Core.KeyLast & PAGE_OFFSET_MASK) != PAGE_SIZE - 1)
458 {
459 RTGCPHYS GCPhys = pCur->Core.KeyLast + 1;
460 for (;;)
461 {
462 PPGMPHYSHANDLER pAbove = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys, true);
463 if ( !pAbove
464 || (pAbove->Core.Key >> PAGE_SHIFT) != (pCur->Core.KeyLast >> PAGE_SHIFT))
465 break;
466 PGMRamFlagsSetByGCPhysWithHint(pPGM, GCPhys, pgmHandlerPhysicalCalcFlags(pCur), &pRamHint);
467
468 /* next? */
469 if ( (pAbove->Core.KeyLast >> PAGE_SHIFT) != (pCur->Core.KeyLast >> PAGE_SHIFT)
470 || (pAbove->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_SIZE - 1)
471 break;
472 GCPhys = pAbove->Core.KeyLast + 1;
473 }
474 }
475}
476
477
478/**
479 * Modify a physical page access handler.
480 *
481 * Modification can only be done to the range it self, not the type or anything else.
482 *
483 * @returns VBox status code.
484 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
485 * and a new registration must be performed!
486 * @param pVM VM handle.
487 * @param GCPhysCurrent Current location.
488 * @param GCPhys New location.
489 * @param GCPhysLast New last location.
490 */
491PGMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
492{
493 /*
494 * Remove it.
495 */
496 int rc;
497 pgmLock(pVM);
498 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhysCurrent);
499 if (pCur)
500 {
501 /*
502 * Clear the ram flags. (We're gonna move or free it!)
503 */
504 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
505 RTHCPTR pvRange = 0;
506 if (pCur->pfnHandlerR3 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO)
507 PGMRamGCPhys2HCPtr(&pVM->pgm.s, GCPhysCurrent, &pvRange); /* ASSUMES it doesn't change pvRange on failure. */
508
509 /*
510 * Validate the new range, modify and reinsert.
511 */
512 if (GCPhysLast >= GCPhys)
513 {
514 /*
515 * We require the range to be within registered ram.
516 * There is no apparent need to support ranges which cover more than one ram range.
517 */
518 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
519 while (pRam && GCPhys > pRam->GCPhysLast)
520 pRam = CTXSUFF(pRam->pNext);
521 if ( pRam
522 && GCPhys <= pRam->GCPhysLast
523 && GCPhysLast >= pRam->GCPhys)
524 {
525 pCur->Core.Key = GCPhys;
526 pCur->Core.KeyLast = GCPhysLast;
527 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> PAGE_SHIFT;
528
529 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pCur->Core))
530 {
531 /*
532 * Set ram flags, flush shadow PT entries and finally tell REM about this.
533 */
534 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
535 if (rc == VINF_PGM_GCPHYS_ALIASED)
536 {
537 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
538 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
539 }
540 pVM->pgm.s.fPhysCacheFlushPending = true;
541
542#ifndef IN_RING3
543 REMNotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
544 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, pvRange);
545#else
546 REMR3NotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
547 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, pvRange);
548#endif
549 pgmUnlock(pVM);
550 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%VGp -> GCPhys=%VGp GCPhysLast=%VGp\n",
551 GCPhysCurrent, GCPhys, GCPhysLast));
552 return VINF_SUCCESS;
553 }
554 AssertMsgFailed(("Conflict! GCPhys=%VGp GCPhysLast=%VGp\n", GCPhys, GCPhysLast));
555 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
556 }
557 else
558 {
559 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
560 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
561 }
562 }
563 else
564 {
565 AssertMsgFailed(("Invalid range %VGp-%VGp\n", GCPhys, GCPhysLast));
566 rc = VERR_INVALID_PARAMETER;
567 }
568
569 /*
570 * Invalid new location, free it.
571 * We've only gotta notify REM and free the memory.
572 */
573 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
574 MMHyperFree(pVM, pCur);
575 }
576 else
577 {
578 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhysCurrent));
579 rc = VERR_PGM_HANDLER_NOT_FOUND;
580 }
581
582 pgmUnlock(pVM);
583 return rc;
584}
585
586
587/**
588 * Changes the callbacks associated with a physical access handler.
589 *
590 * @returns VBox status code.
591 * @param pVM VM Handle.
592 * @param GCPhys Start physical address.
593 * @param pfnHandlerR3 The R3 handler.
594 * @param pvUserR3 User argument to the R3 handler.
595 * @param pfnHandlerR0 The R0 handler.
596 * @param pvUserR0 User argument to the R0 handler.
597 * @param pfnHandlerGC The GC handler.
598 * @param pvUserGC User argument to the GC handler.
599 * This must be a GC pointer because it will be relocated!
600 * @param pszDesc Pointer to description string. This must not be freed.
601 */
602PGMDECL(int) PGMHandlerPhysicalChangeCallbacks(PVM pVM, RTGCPHYS GCPhys,
603 HCPTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTHCPTR pvUserR3,
604 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTHCPTR pvUserR0,
605 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
606 HCPTRTYPE(const char *) pszDesc)
607{
608 /*
609 * Get the handler.
610 */
611 int rc = VINF_SUCCESS;
612 pgmLock(pVM);
613 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
614 if (pCur)
615 {
616 /*
617 * Change callbacks.
618 */
619 pCur->pfnHandlerR3 = pfnHandlerR3;
620 pCur->pvUserR3 = pvUserR3;
621 pCur->pfnHandlerR0 = pfnHandlerR0;
622 pCur->pvUserR0 = pvUserR0;
623 pCur->pfnHandlerGC = pfnHandlerGC;
624 pCur->pvUserGC = pvUserGC;
625 pCur->pszDesc = pszDesc;
626 }
627 else
628 {
629 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
630 rc = VERR_PGM_HANDLER_NOT_FOUND;
631 }
632
633 pgmUnlock(pVM);
634 return rc;
635}
636
637
638/**
639 * Splitts a physical access handler in two.
640 *
641 * @returns VBox status code.
642 * @param pVM VM Handle.
643 * @param GCPhys Start physical address of the handler.
644 * @param GCPhysSplit The split address.
645 */
646PGMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
647{
648 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
649
650 /*
651 * Do the allocation without owning the lock.
652 */
653 PPGMPHYSHANDLER pNew;
654 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
655 if (VBOX_FAILURE(rc))
656 return rc;
657
658 /*
659 * Get the handler.
660 */
661 pgmLock(pVM);
662 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
663 if (pCur)
664 {
665 if (GCPhysSplit <= pCur->Core.KeyLast)
666 {
667 /*
668 * Create new handler node for the 2nd half.
669 */
670 *pNew = *pCur;
671 pNew->Core.Key = GCPhysSplit;
672 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
673
674 pCur->Core.KeyLast = GCPhysSplit - 1;
675 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
676
677 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pNew->Core))
678 {
679 LogFlow(("PGMHandlerPhysicalSplit: %VGp-%VGp and %VGp-%VGp\n",
680 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
681 pgmUnlock(pVM);
682 return VINF_SUCCESS;
683 }
684 AssertMsgFailed(("whu?\n"));
685 rc = VERR_INTERNAL_ERROR;
686 }
687 else
688 {
689 AssertMsgFailed(("outside range: %VGp-%VGp split %VGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
690 rc = VERR_INVALID_PARAMETER;
691 }
692 }
693 else
694 {
695 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
696 rc = VERR_PGM_HANDLER_NOT_FOUND;
697 }
698 pgmUnlock(pVM);
699 MMHyperFree(pVM, pNew);
700 return rc;
701}
702
703
704/**
705 * Joins up two adjacent physical access handlers which has the same callbacks.
706 *
707 * @returns VBox status code.
708 * @param pVM VM Handle.
709 * @param GCPhys1 Start physical address of the first handler.
710 * @param GCPhys2 Start physical address of the second handler.
711 */
712PGMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
713{
714 /*
715 * Get the handlers.
716 */
717 int rc;
718 pgmLock(pVM);
719 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys1);
720 if (pCur1)
721 {
722 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys2);
723 if (pCur2)
724 {
725 /*
726 * Make sure that they are adjacent, and that they've got the same callbacks.
727 */
728 if (pCur1->Core.KeyLast + 1 == pCur2->Core.Key)
729 {
730 if ( pCur1->pfnHandlerGC == pCur2->pfnHandlerGC
731 && pCur1->pfnHandlerR0 == pCur2->pfnHandlerR0
732 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3)
733 {
734 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys2);
735 if (pCur3 == pCur2)
736 {
737 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
738 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
739 LogFlow(("PGMHandlerPhysicalJoin: %VGp-%VGp %VGp-%VGp\n",
740 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
741 pgmUnlock(pVM);
742 MMHyperFree(pVM, pCur2);
743 return VINF_SUCCESS;
744 }
745 Assert(pCur3 == pCur2);
746 rc = VERR_INTERNAL_ERROR;
747 }
748 else
749 {
750 AssertMsgFailed(("mismatching handlers\n"));
751 rc = VERR_ACCESS_DENIED;
752 }
753 }
754 else
755 {
756 AssertMsgFailed(("not adjacent: %VGp-%VGp %VGp-%VGp\n",
757 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
758 rc = VERR_INVALID_PARAMETER;
759 }
760 }
761 else
762 {
763 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys2));
764 rc = VERR_PGM_HANDLER_NOT_FOUND;
765 }
766 }
767 else
768 {
769 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys1));
770 rc = VERR_PGM_HANDLER_NOT_FOUND;
771 }
772 pgmUnlock(pVM);
773 return rc;
774
775}
776
777
778/**
779 * Resets any modifications to individual pages in a physical
780 * page access handler region.
781 *
782 * This is used in pair with PGMHandlerPhysicalModify().
783 *
784 * @returns VBox status code.
785 * @param pVM VM Handle
786 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
787 */
788PGMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys)
789{
790 /*
791 * Find the handler.
792 */
793 pgmLock(pVM);
794 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
795 if (pCur)
796 {
797 /*
798 * Validate type.
799 */
800 switch (pCur->enmType)
801 {
802 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
803 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
804 {
805 /*
806 * Set the flags and flush shadow PT entries.
807 */
808 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlePhysicalReset);
809 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
810 while (pRam && GCPhys > pRam->GCPhysLast)
811 pRam = CTXSUFF(pRam->pNext);
812 int rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
813 if (rc == VINF_PGM_GCPHYS_ALIASED)
814 {
815 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
816 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
817 }
818 pVM->pgm.s.fPhysCacheFlushPending = true;
819 pgmUnlock(pVM);
820 return VINF_SUCCESS;
821 }
822
823 /*
824 * Invalid.
825 */
826 case PGMPHYSHANDLERTYPE_PHYSICAL:
827 case PGMPHYSHANDLERTYPE_MMIO:
828 AssertMsgFailed(("Can't reset type %d!\n", pCur->enmType));
829 pgmUnlock(pVM);
830 return VERR_INTERNAL_ERROR;
831
832 default:
833 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCur->enmType));
834 pgmUnlock(pVM);
835 return VERR_INTERNAL_ERROR;
836 }
837 }
838 pgmUnlock(pVM);
839 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
840 return VERR_PGM_HANDLER_NOT_FOUND;
841}
842
843
844/**
845 * Search for virtual handler with matching physical address
846 *
847 * @returns VBox status code
848 * @param pVM The VM handle.
849 * @param GCPhys GC physical address to search for.
850 * @param ppVirt Where to store the pointer to the virtual handler structure.
851 * @param piPage Where to store the pointer to the index of the cached physical page.
852 */
853int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage)
854{
855 STAM_PROFILE_START(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
856 Assert(ppVirt);
857
858 PPGMPHYS2VIRTHANDLER pCur;
859 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->PhysToVirtHandlers, GCPhys);
860 if (pCur)
861 {
862 /* found a match! */
863#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
864 AssertRelease(pCur->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD);
865#endif
866 *ppVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
867 *piPage = pCur - &(*ppVirt)->aPhysToVirt[0];
868
869 LogFlow(("PHYS2VIRT: found match for %VGp -> %VGv *piPage=%#x\n",
870 GCPhys, (*ppVirt)->GCPtr, *piPage));
871 STAM_PROFILE_STOP(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
872 return VINF_SUCCESS;
873 }
874
875 *ppVirt = NULL;
876 STAM_PROFILE_STOP(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
877 return VERR_PGM_HANDLER_NOT_FOUND;
878}
879
880
881/**
882 * Deal with aliases in phys2virt.
883 *
884 * @param pVM The VM handle.
885 * @param pPhys2Virt The node we failed insert.
886 */
887static void pgmHandlerVirtualInsertAliased(PVM pVM, PPGMPHYS2VIRTHANDLER pPhys2Virt)
888{
889 /*
890 * First find the node which is conflicting with us.
891 */
892 /** @todo Deal with partial overlapping. (Unlikly situation, so I'm too lazy to do anything about it now.) */
893 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
894 if (!pHead)
895 {
896 /** @todo do something clever here... */
897#ifdef IN_RING3
898 LogRel(("pgmHandlerVirtualInsertAliased: %VGp-%VGp\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
899#endif
900 pPhys2Virt->offNextAlias = 0;
901 return;
902 }
903#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
904 AssertReleaseMsg(pHead != pPhys2Virt, ("%VGp-%VGp offVirtHandler=%#RX32\n",
905 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler));
906#endif
907
908 /** @todo check if the current head node covers the ground we do. This is highly unlikely
909 * and I'm too lazy to implement this now as it will require sorting the list and stuff like that. */
910
911 /*
912 * Insert ourselves as the next node.
913 */
914 if (!(pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
915 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IN_TREE;
916 else
917 {
918 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pHead + (pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
919 pPhys2Virt->offNextAlias = ((intptr_t)pNext - (intptr_t)pPhys2Virt)
920 | PGMPHYS2VIRTHANDLER_IN_TREE;
921 }
922 pHead->offNextAlias = ((intptr_t)pPhys2Virt - (intptr_t)pHead)
923 | (pHead->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
924 Log(("pgmHandlerVirtualInsertAliased: %VGp-%VGp offNextAlias=%#RX32\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
925}
926
927
928/**
929 * Resets one virtual handler range.
930 *
931 * @returns 0
932 * @param pNode Pointer to a PGMVIRTHANDLER.
933 * @param pvUser The VM handle.
934 */
935DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
936{
937 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
938 PVM pVM = (PVM)pvUser;
939
940 /*
941 * Calc flags.
942 */
943 unsigned fFlags;
944 switch (pCur->enmType)
945 {
946 case PGMVIRTHANDLERTYPE_EIP:
947 case PGMVIRTHANDLERTYPE_NORMAL: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER; break;
948 case PGMVIRTHANDLERTYPE_WRITE: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_WRITE; break;
949 case PGMVIRTHANDLERTYPE_ALL: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_ALL; break;
950 /* hypervisor handlers need no flags and wouldn't have nowhere to put them in any case. */
951 case PGMVIRTHANDLERTYPE_HYPERVISOR:
952 return 0;
953 default:
954 AssertMsgFailed(("Invalid type %d\n", pCur->enmType));
955 return 0;
956 }
957
958 /*
959 * Iterate the pages and apply the flags.
960 */
961 PPGMRAMRANGE pRamHint = NULL;
962 RTGCUINTPTR offPage = ((RTGCUINTPTR)pCur->GCPtr & PAGE_OFFSET_MASK);
963 RTGCUINTPTR cbLeft = pCur->cb;
964 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
965 {
966 PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
967 if (pPhys2Virt->Core.Key != NIL_RTGCPHYS)
968 {
969 /* Update the flags. */
970 int rc = PGMRamFlagsSetByGCPhysWithHint(&pVM->pgm.s, pPhys2Virt->Core.Key, fFlags, &pRamHint);
971 AssertRC(rc);
972
973 /* Need to insert the page in the Phys2Virt lookup tree? */
974 if (pPhys2Virt->Core.KeyLast == NIL_RTGCPHYS)
975 {
976#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
977 AssertRelease(!pPhys2Virt->offNextAlias);
978#endif
979 unsigned cbPhys = cbLeft;
980 if (cbPhys > PAGE_SIZE - offPage)
981 cbPhys = PAGE_SIZE - offPage;
982 else
983 Assert(iPage == pCur->cPages - 1);
984 pPhys2Virt->Core.KeyLast = pPhys2Virt->Core.Key + cbPhys - 1; /* inclusive */
985 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IS_HEAD | PGMPHYS2VIRTHANDLER_IN_TREE;
986 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core))
987 pgmHandlerVirtualInsertAliased(pVM, pPhys2Virt);
988#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
989 else
990 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core,
991 ("%VGp-%VGp offNextAlias=%#RX32\n",
992 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
993#endif
994 Log2(("PHYS2VIRT: Insert physical range %VGp-%VGp offNextAlias=%#RX32 %s\n",
995 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
996 }
997 }
998 cbLeft -= PAGE_SIZE - offPage;
999 offPage = 0;
1000 }
1001
1002 return 0;
1003}
1004
1005
1006#ifndef IN_RING3
1007
1008# ifdef IN_RING0
1009/** @todo try combine this with iom and em. */
1010
1011/**
1012 * Read callback for disassembly function; supports reading bytes that cross a page boundary
1013 *
1014 * @returns VBox status code.
1015 * @param pSrc GC source pointer
1016 * @param pDest HC destination pointer
1017 * @param size Number of bytes to read
1018 * @param dwUserdata Callback specific user data (pCpu)
1019 *
1020 */
1021DECLCALLBACK(int32_t) pgmReadBytes(RTHCUINTPTR pSrc, uint8_t *pDest, uint32_t size, RTHCUINTPTR dwUserdata)
1022{
1023 DISCPUSTATE *pCpu = (DISCPUSTATE *)dwUserdata;
1024 PVM pVM = (PVM)pCpu->dwUserData[0];
1025
1026 int rc = PGMPhysReadGCPtr(pVM, pDest, pSrc, size);
1027 AssertRC(rc);
1028 return rc;
1029}
1030
1031inline int pgmDisCoreOne(PVM pVM, DISCPUSTATE *pCpu, RTGCUINTPTR InstrGC, uint32_t *pOpsize)
1032{
1033 return DISCoreOneEx(InstrGC, pCpu->mode, pgmReadBytes, pVM, pCpu, pOpsize);
1034}
1035
1036# else /* !IN_RING0 (i.e. in IN_GC) */
1037inline int pgmDisCoreOne(PVM pVM, DISCPUSTATE *pCpu, RTGCUINTPTR InstrGC, uint32_t *pOpsize)
1038{
1039 return DISCoreOne(pCpu, InstrGC, pOpsize);
1040}
1041
1042#endif /* !IN_RING0 (i.e. in IN_GC) */
1043
1044
1045/**
1046 * \#PF Handler callback for Guest ROM range write access.
1047 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
1048 *
1049 * @returns VBox status code (appropritate for trap handling and GC return).
1050 * @param pVM VM Handle.
1051 * @param uErrorCode CPU Error code.
1052 * @param pRegFrame Trap register frame.
1053 * @param pvFault The fault address (cr2).
1054 * @param GCPhysFault The GC physical address corresponding to pvFault.
1055 * @param pvUser User argument.
1056 */
1057PGMDECL(int) pgmGuestROMWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1058{
1059 DISCPUSTATE Cpu;
1060 Cpu.mode = SELMIsSelector32Bit(pVM, pRegFrame->cs, &pRegFrame->csHid) ? CPUMODE_32BIT : CPUMODE_16BIT;
1061 if (Cpu.mode == CPUMODE_32BIT)
1062 {
1063 RTGCPTR GCPtrCode;
1064 int rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &GCPtrCode);
1065 if (VBOX_SUCCESS(rc))
1066 {
1067 uint32_t cbOp;
1068 rc = pgmDisCoreOne(pVM, &Cpu, (RTGCUINTPTR)GCPtrCode, &cbOp);
1069 if (VBOX_SUCCESS(rc))
1070 {
1071 /* ASSUMES simple instructions.
1072 * For instance 'pop [ROM_ADDRESS]' or 'and [ROM_ADDRESS], eax' better
1073 * not occure or we'll screw up the cpu state.
1074 */
1075 /** @todo We're assuming too much here I think. */
1076 if (!(Cpu.prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
1077 {
1078 /*
1079 * Move on to the next instruction.
1080 */
1081 pRegFrame->eip += cbOp;
1082 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestROMWriteHandled);
1083 return VINF_SUCCESS;
1084 }
1085 LogFlow(("pgmGuestROMWriteHandler: wrong prefix!!\n"));
1086 }
1087 }
1088 }
1089
1090 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestROMWriteUnhandled);
1091 return VINF_EM_RAW_EMULATE_INSTR;
1092}
1093#endif /* !IN_RING3 */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette