VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 4050

Last change on this file since 4050 was 2981, checked in by vboxsync, 17 years ago

InnoTek -> innotek: all the headers and comments.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.2 KB
Line 
1/* $Id: PGMAllHandler.cpp 2981 2007-06-01 16:01:28Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/dbgf.h>
28#include <VBox/pgm.h>
29#include <VBox/iom.h>
30#include <VBox/mm.h>
31#include <VBox/em.h>
32#include <VBox/stam.h>
33#include <VBox/rem.h>
34#include <VBox/dbgf.h>
35#include <VBox/rem.h>
36#include "PGMInternal.h"
37#include <VBox/vm.h>
38
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <VBox/selm.h>
46
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51DECLINLINE(unsigned) pgmHandlerPhysicalCalcFlags(PPGMPHYSHANDLER pCur);
52static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam);
53static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur);
54static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur);
55
56
57
58/**
59 * Register a access handler for a physical range.
60 *
61 * @returns VBox status code.
62 * @retval VINF_SUCCESS when successfully installed.
63 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
64 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
65 * flagged together with a pool clearing.
66 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
67 * one. A debug assertion is raised.
68 *
69 * @param pVM VM Handle.
70 * @param enmType Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
71 * @param GCPhys Start physical address.
72 * @param GCPhysLast Last physical address. (inclusive)
73 * @param pfnHandlerR3 The R3 handler.
74 * @param pvUserR3 User argument to the R3 handler.
75 * @param pfnHandlerR0 The R0 handler.
76 * @param pvUserR0 User argument to the R0 handler.
77 * @param pfnHandlerGC The GC handler.
78 * @param pvUserGC User argument to the GC handler.
79 * This must be a GC pointer because it will be relocated!
80 * @param pszDesc Pointer to description string. This must not be freed.
81 */
82PGMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
83 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
84 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
85 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
86 R3PTRTYPE(const char *) pszDesc)
87{
88 Log(("PGMHandlerPhysicalRegisterEx: enmType=%d GCPhys=%VGp GCPhysLast=%VGp pfnHandlerR3=%VHv pvUserR3=%VHv pfnHandlerR0=%VHv pvUserR0=%VHv pfnHandlerGC=%VGv pvUserGC=%VGv pszDesc=%s\n",
89 enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3, pfnHandlerR0, pvUserR0, pfnHandlerGC, pvUserGC, HCSTRING(pszDesc)));
90
91 /*
92 * Validate input.
93 */
94 if (GCPhys >= GCPhysLast)
95 {
96 AssertMsgFailed(("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast));
97 return VERR_INVALID_PARAMETER;
98 }
99 switch (enmType)
100 {
101 case PGMPHYSHANDLERTYPE_MMIO:
102 case PGMPHYSHANDLERTYPE_PHYSICAL:
103 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
104 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
105 break;
106 default:
107 AssertMsgFailed(("Invalid input enmType=%d!\n", enmType));
108 return VERR_INVALID_PARAMETER;
109 }
110 if ( (RTGCUINTPTR)pvUserGC >= 0x10000
111 && MMHyperHC2GC(pVM, MMHyperGC2HC(pVM, pvUserGC)) != pvUserGC)
112 {
113 AssertMsgFailed(("Not GC pointer! pvUserGC=%VGv\n", pvUserGC));
114 return VERR_INVALID_PARAMETER;
115 }
116 AssertReturn(pfnHandlerR3 || pfnHandlerR0 || pfnHandlerGC, VERR_INVALID_PARAMETER);
117
118 /*
119 * We require the range to be within registered ram.
120 * There is no apparent need to support ranges which cover more than one ram range.
121 */
122 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
123 while (pRam && GCPhys > pRam->GCPhysLast)
124 pRam = CTXSUFF(pRam->pNext);
125 if ( !pRam
126 || GCPhysLast < pRam->GCPhys
127 || GCPhys > pRam->GCPhysLast)
128 {
129#ifdef IN_RING3
130 /*
131 * If this is an MMIO registration, we'll just add a range for it.
132 */
133 if ( enmType == PGMPHYSHANDLERTYPE_MMIO
134 && ( !pRam
135 || GCPhysLast < pRam->GCPhys)
136 )
137 {
138 size_t cb = GCPhysLast - GCPhys + 1;
139 Assert(cb == RT_ALIGN_Z(cb, PAGE_SIZE));
140 int rc = PGMR3PhysRegister(pVM, NULL, GCPhys, cb, MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO, NULL, pszDesc);
141 if (VBOX_FAILURE(rc))
142 return rc;
143
144 /* search again. */
145 pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
146 while (pRam && GCPhys > pRam->GCPhysLast)
147 pRam = CTXSUFF(pRam->pNext);
148 }
149
150 if ( !pRam
151 || GCPhysLast < pRam->GCPhys
152 || GCPhys > pRam->GCPhysLast)
153#endif /* IN_RING3 */
154 {
155#ifdef IN_RING3
156 DBGFR3Info(pVM, "phys", NULL, NULL);
157#endif
158 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
159 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
160 }
161 }
162
163 /*
164 * Allocate and initialize the new entry.
165 */
166 PPGMPHYSHANDLER pNew;
167 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
168 if (VBOX_FAILURE(rc))
169 return rc;
170
171 pNew->Core.Key = GCPhys;
172 pNew->Core.KeyLast = GCPhysLast;
173 pNew->enmType = enmType;
174 pNew->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
175 pNew->pfnHandlerR3 = pfnHandlerR3;
176 pNew->pvUserR3 = pvUserR3;
177 pNew->pfnHandlerR0 = pfnHandlerR0;
178 pNew->pvUserR0 = pvUserR0;
179 pNew->pfnHandlerGC = pfnHandlerGC;
180 pNew->pvUserGC = pvUserGC;
181 pNew->pszDesc = pszDesc;
182
183 pgmLock(pVM);
184
185 /*
186 * Try insert into list.
187 */
188 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pNew->Core))
189 {
190 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam);
191 if (rc == VINF_PGM_GCPHYS_ALIASED)
192 {
193 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
194 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
195 }
196 pVM->pgm.s.fPhysCacheFlushPending = true;
197#ifndef IN_RING3
198 REMNotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
199#else
200 REMR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
201#endif
202 pgmUnlock(pVM);
203 if (rc != VINF_SUCCESS)
204 Log(("PGMHandlerPhysicalRegisterEx: returns %Vrc (%VGp-%VGp)\n", rc, GCPhys, GCPhysLast));
205 return rc;
206 }
207 pgmUnlock(pVM);
208
209#if defined(IN_RING3) && defined(VBOX_STRICT)
210 DBGFR3Info(pVM, "handlers", "phys nostats", NULL);
211#endif
212 AssertMsgFailed(("Conflict! GCPhys=%VGp GCPhysLast=%VGp pszDesc=%s\n", GCPhys, GCPhysLast, pszDesc));
213 MMHyperFree(pVM, pNew);
214 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
215}
216
217
218/**
219 * Sets ram range flags and attempts updating shadow PTs.
220 *
221 * @returns VBox status code.
222 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
223 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
224 * the guest page aliased or/and mapped by multiple PTs.
225 * @param pVM The VM handle.
226 * @param pCur The physical handler.
227 */
228static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
229{
230 /*
231 * Iterate the guest ram pages updating the flags and flushing PT entries
232 * mapping the page.
233 */
234 bool fFlushTLBs = false;
235#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE)
236 int rc = VINF_SUCCESS;
237#else
238 const int rc = VINF_PGM_GCPHYS_ALIASED;
239#endif
240 const unsigned fFlags = pgmHandlerPhysicalCalcFlags(pCur); Assert(!(fFlags & X86_PTE_PAE_PG_MASK));
241 RTUINT cPages = pCur->cPages;
242 RTUINT i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
243 for (;;)
244 {
245 /* Physical chunk in dynamically allocated range not present? */
246 if (RT_UNLIKELY(!(pRam->aHCPhys[i] & X86_PTE_PAE_PG_MASK)))
247 {
248 RTGCPHYS GCPhys = pRam->GCPhys + (i << PAGE_SHIFT);
249#ifdef IN_RING3
250 int rc2 = pgmr3PhysGrowRange(pVM, GCPhys);
251#else
252 int rc2 = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
253#endif
254 if (rc2 != VINF_SUCCESS)
255 return rc2;
256 }
257
258 if ((pRam->aHCPhys[i] & fFlags) != fFlags)
259 {
260 pRam->aHCPhys[i] |= fFlags;
261
262 Assert(pRam->aHCPhys[i] & X86_PTE_PAE_PG_MASK);
263
264#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
265 /* This code also makes ASSUMPTIONS about the cRefs and stuff. */
266 Assert(MM_RAM_FLAGS_IDX_SHIFT < MM_RAM_FLAGS_CREFS_SHIFT);
267 const uint16_t u16 = pRam->aHCPhys[i] >> MM_RAM_FLAGS_IDX_SHIFT;
268 if (u16)
269 {
270 if ((u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) != MM_RAM_FLAGS_CREFS_PHYSEXT)
271 pgmPoolTrackFlushGCPhysPT(pVM,
272 &pRam->aHCPhys[i],
273 u16 & MM_RAM_FLAGS_IDX_MASK,
274 u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT));
275 else if (u16 != ((MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) | MM_RAM_FLAGS_IDX_OVERFLOWED))
276 pgmPoolTrackFlushGCPhysPTs(pVM, &pRam->aHCPhys[i], u16 & MM_RAM_FLAGS_IDX_MASK);
277 else
278 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, &pRam->aHCPhys[i]);
279 fFlushTLBs = true;
280 }
281#elif defined(PGMPOOL_WITH_CACHE)
282 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, &pRam->aHCPhys[i]);
283 fFlushTLBs = true;
284#endif
285 }
286
287 /* next */
288 if (--cPages == 0)
289 break;
290 i++;
291 }
292
293 if (fFlushTLBs && rc == VINF_SUCCESS)
294 {
295 PGM_INVL_GUEST_TLBS();
296 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs\n"));
297 }
298 else
299 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Vrc\n", rc));
300 return rc;
301}
302
303
304/**
305 * Register a physical page access handler.
306 *
307 * @returns VBox status code.
308 * @param pVM VM Handle.
309 * @param GCPhys Start physical address.
310 */
311PGMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
312{
313 /*
314 * Find the handler.
315 */
316 pgmLock(pVM);
317 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
318 if (pCur)
319 {
320 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %#VGp-%#VGp %s\n",
321 pCur->Core.Key, pCur->Core.KeyLast, HCSTRING(pCur->pszDesc)));
322
323 /*
324 * Clear the page bits and notify the REM about this change.
325 */
326 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
327 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
328 pgmUnlock(pVM);
329 MMHyperFree(pVM, pCur);
330 return VINF_SUCCESS;
331 }
332 pgmUnlock(pVM);
333
334 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
335 return VERR_PGM_HANDLER_NOT_FOUND;
336}
337
338
339/**
340 * Shared code with modify.
341 */
342static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur)
343{
344 RTGCPHYS GCPhysStart = pCur->Core.Key;
345 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
346
347 /*
348 * Page align the range.
349 */
350 if ( (pCur->Core.Key & PAGE_OFFSET_MASK)
351 || ((pCur->Core.KeyLast + 1) & PAGE_OFFSET_MASK))
352 {
353 if (GCPhysStart & PAGE_OFFSET_MASK)
354 {
355 if (PGMRamTestFlags(&pVM->pgm.s, GCPhysStart, MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_TEMP_OFF))
356 {
357 RTGCPHYS GCPhys = (GCPhysStart + (PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
358 if ( GCPhys > GCPhysLast
359 || GCPhys < GCPhysStart)
360 return;
361 GCPhysStart = GCPhys;
362 }
363 else
364 GCPhysStart = GCPhysStart & X86_PTE_PAE_PG_MASK;
365 }
366 if (GCPhysLast & PAGE_OFFSET_MASK)
367 {
368 if (PGMRamTestFlags(&pVM->pgm.s, GCPhysLast, MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_TEMP_OFF))
369 {
370 RTGCPHYS GCPhys = (GCPhysStart & X86_PTE_PAE_PG_MASK) - 1;
371 if ( GCPhys < GCPhysStart
372 || GCPhys > GCPhysLast)
373 return;
374 GCPhysLast = GCPhys;
375 }
376 else
377 GCPhysLast += PAGE_SIZE - 1 - (GCPhysLast & PAGE_OFFSET_MASK);
378 }
379 }
380
381 /*
382 * Tell REM.
383 */
384 RTHCPTR pvRange = 0;
385 if (pCur->pfnHandlerR3 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO)
386 PGMRamGCPhys2HCPtr(&pVM->pgm.s, GCPhysStart, &pvRange); /* ASSUMES it doesn't change pvRange on failure. */
387#ifndef IN_RING3
388 REMNotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, pvRange);
389#else
390 REMR3NotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, pvRange);
391#endif
392}
393
394
395/**
396 * Resets ram range flags.
397 *
398 * @returns VBox status code.
399 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
400 * @param pVM The VM handle.
401 * @param pCur The physical handler.
402 *
403 * @remark We don't start messing with the shadow page tables, as we've already got code
404 * in Trap0e which deals with out of sync handler flags (originally conceived for
405 * global pages).
406 */
407static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur)
408{
409 /*
410 * Iterate the guest ram pages updating the flags and flushing PT entries
411 * mapping the page.
412 */
413 RTUINT cPages = pCur->cPages;
414 RTGCPHYS GCPhys = pCur->Core.Key;
415 PPGMRAMRANGE pRamHint = NULL;
416 PPGM pPGM = &pVM->pgm.s;
417 for (;;)
418 {
419 PGMRamFlagsClearByGCPhysWithHint(pPGM, GCPhys,
420 MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL,
421 &pRamHint);
422 /* next */
423 if (--cPages == 0)
424 break;
425 GCPhys += PAGE_SIZE;
426 }
427
428 /*
429 * Check for partial start page.
430 */
431 if (pCur->Core.Key & PAGE_OFFSET_MASK)
432 {
433 RTGCPHYS GCPhys = pCur->Core.Key - 1;
434 for (;;)
435 {
436 PPGMPHYSHANDLER pBelow = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys, false);
437 if ( !pBelow
438 || (pBelow->Core.KeyLast >> PAGE_SHIFT) != (pCur->Core.Key >> PAGE_SHIFT))
439 break;
440 PGMRamFlagsSetByGCPhysWithHint(pPGM, GCPhys, pgmHandlerPhysicalCalcFlags(pCur), &pRamHint);
441
442 /* next? */
443 if ( (pBelow->Core.Key >> PAGE_SHIFT) != (pCur->Core.Key >> PAGE_SHIFT)
444 || !(pBelow->Core.Key & PAGE_OFFSET_MASK))
445 break;
446 GCPhys = pBelow->Core.Key - 1;
447 }
448 }
449
450 /*
451 * Check for partial end page.
452 */
453 if ((pCur->Core.KeyLast & PAGE_OFFSET_MASK) != PAGE_SIZE - 1)
454 {
455 RTGCPHYS GCPhys = pCur->Core.KeyLast + 1;
456 for (;;)
457 {
458 PPGMPHYSHANDLER pAbove = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys, true);
459 if ( !pAbove
460 || (pAbove->Core.Key >> PAGE_SHIFT) != (pCur->Core.KeyLast >> PAGE_SHIFT))
461 break;
462 PGMRamFlagsSetByGCPhysWithHint(pPGM, GCPhys, pgmHandlerPhysicalCalcFlags(pCur), &pRamHint);
463
464 /* next? */
465 if ( (pAbove->Core.KeyLast >> PAGE_SHIFT) != (pCur->Core.KeyLast >> PAGE_SHIFT)
466 || (pAbove->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_SIZE - 1)
467 break;
468 GCPhys = pAbove->Core.KeyLast + 1;
469 }
470 }
471}
472
473
474/**
475 * Modify a physical page access handler.
476 *
477 * Modification can only be done to the range it self, not the type or anything else.
478 *
479 * @returns VBox status code.
480 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
481 * and a new registration must be performed!
482 * @param pVM VM handle.
483 * @param GCPhysCurrent Current location.
484 * @param GCPhys New location.
485 * @param GCPhysLast New last location.
486 */
487PGMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
488{
489 /*
490 * Remove it.
491 */
492 int rc;
493 pgmLock(pVM);
494 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhysCurrent);
495 if (pCur)
496 {
497 /*
498 * Clear the ram flags. (We're gonna move or free it!)
499 */
500 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
501 RTHCPTR pvRange = 0;
502 if (pCur->pfnHandlerR3 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO)
503 PGMRamGCPhys2HCPtr(&pVM->pgm.s, GCPhysCurrent, &pvRange); /* ASSUMES it doesn't change pvRange on failure. */
504
505 /*
506 * Validate the new range, modify and reinsert.
507 */
508 if (GCPhysLast >= GCPhys)
509 {
510 /*
511 * We require the range to be within registered ram.
512 * There is no apparent need to support ranges which cover more than one ram range.
513 */
514 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
515 while (pRam && GCPhys > pRam->GCPhysLast)
516 pRam = CTXSUFF(pRam->pNext);
517 if ( pRam
518 && GCPhys <= pRam->GCPhysLast
519 && GCPhysLast >= pRam->GCPhys)
520 {
521 pCur->Core.Key = GCPhys;
522 pCur->Core.KeyLast = GCPhysLast;
523 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> PAGE_SHIFT;
524
525 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pCur->Core))
526 {
527 /*
528 * Set ram flags, flush shadow PT entries and finally tell REM about this.
529 */
530 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
531 if (rc == VINF_PGM_GCPHYS_ALIASED)
532 {
533 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
534 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
535 }
536 pVM->pgm.s.fPhysCacheFlushPending = true;
537
538#ifndef IN_RING3
539 REMNotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
540 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, pvRange);
541#else
542 REMR3NotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
543 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, pvRange);
544#endif
545 pgmUnlock(pVM);
546 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%VGp -> GCPhys=%VGp GCPhysLast=%VGp\n",
547 GCPhysCurrent, GCPhys, GCPhysLast));
548 return VINF_SUCCESS;
549 }
550 AssertMsgFailed(("Conflict! GCPhys=%VGp GCPhysLast=%VGp\n", GCPhys, GCPhysLast));
551 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
552 }
553 else
554 {
555 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
556 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
557 }
558 }
559 else
560 {
561 AssertMsgFailed(("Invalid range %VGp-%VGp\n", GCPhys, GCPhysLast));
562 rc = VERR_INVALID_PARAMETER;
563 }
564
565 /*
566 * Invalid new location, free it.
567 * We've only gotta notify REM and free the memory.
568 */
569 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
570 MMHyperFree(pVM, pCur);
571 }
572 else
573 {
574 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhysCurrent));
575 rc = VERR_PGM_HANDLER_NOT_FOUND;
576 }
577
578 pgmUnlock(pVM);
579 return rc;
580}
581
582
583/**
584 * Changes the callbacks associated with a physical access handler.
585 *
586 * @returns VBox status code.
587 * @param pVM VM Handle.
588 * @param GCPhys Start physical address.
589 * @param pfnHandlerR3 The R3 handler.
590 * @param pvUserR3 User argument to the R3 handler.
591 * @param pfnHandlerR0 The R0 handler.
592 * @param pvUserR0 User argument to the R0 handler.
593 * @param pfnHandlerGC The GC handler.
594 * @param pvUserGC User argument to the GC handler.
595 * This must be a GC pointer because it will be relocated!
596 * @param pszDesc Pointer to description string. This must not be freed.
597 */
598PGMDECL(int) PGMHandlerPhysicalChangeCallbacks(PVM pVM, RTGCPHYS GCPhys,
599 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
600 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
601 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
602 R3PTRTYPE(const char *) pszDesc)
603{
604 /*
605 * Get the handler.
606 */
607 int rc = VINF_SUCCESS;
608 pgmLock(pVM);
609 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
610 if (pCur)
611 {
612 /*
613 * Change callbacks.
614 */
615 pCur->pfnHandlerR3 = pfnHandlerR3;
616 pCur->pvUserR3 = pvUserR3;
617 pCur->pfnHandlerR0 = pfnHandlerR0;
618 pCur->pvUserR0 = pvUserR0;
619 pCur->pfnHandlerGC = pfnHandlerGC;
620 pCur->pvUserGC = pvUserGC;
621 pCur->pszDesc = pszDesc;
622 }
623 else
624 {
625 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
626 rc = VERR_PGM_HANDLER_NOT_FOUND;
627 }
628
629 pgmUnlock(pVM);
630 return rc;
631}
632
633
634/**
635 * Splitts a physical access handler in two.
636 *
637 * @returns VBox status code.
638 * @param pVM VM Handle.
639 * @param GCPhys Start physical address of the handler.
640 * @param GCPhysSplit The split address.
641 */
642PGMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
643{
644 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
645
646 /*
647 * Do the allocation without owning the lock.
648 */
649 PPGMPHYSHANDLER pNew;
650 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
651 if (VBOX_FAILURE(rc))
652 return rc;
653
654 /*
655 * Get the handler.
656 */
657 pgmLock(pVM);
658 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
659 if (pCur)
660 {
661 if (GCPhysSplit <= pCur->Core.KeyLast)
662 {
663 /*
664 * Create new handler node for the 2nd half.
665 */
666 *pNew = *pCur;
667 pNew->Core.Key = GCPhysSplit;
668 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
669
670 pCur->Core.KeyLast = GCPhysSplit - 1;
671 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
672
673 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pNew->Core))
674 {
675 LogFlow(("PGMHandlerPhysicalSplit: %VGp-%VGp and %VGp-%VGp\n",
676 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
677 pgmUnlock(pVM);
678 return VINF_SUCCESS;
679 }
680 AssertMsgFailed(("whu?\n"));
681 rc = VERR_INTERNAL_ERROR;
682 }
683 else
684 {
685 AssertMsgFailed(("outside range: %VGp-%VGp split %VGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
686 rc = VERR_INVALID_PARAMETER;
687 }
688 }
689 else
690 {
691 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
692 rc = VERR_PGM_HANDLER_NOT_FOUND;
693 }
694 pgmUnlock(pVM);
695 MMHyperFree(pVM, pNew);
696 return rc;
697}
698
699
700/**
701 * Joins up two adjacent physical access handlers which has the same callbacks.
702 *
703 * @returns VBox status code.
704 * @param pVM VM Handle.
705 * @param GCPhys1 Start physical address of the first handler.
706 * @param GCPhys2 Start physical address of the second handler.
707 */
708PGMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
709{
710 /*
711 * Get the handlers.
712 */
713 int rc;
714 pgmLock(pVM);
715 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys1);
716 if (pCur1)
717 {
718 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys2);
719 if (pCur2)
720 {
721 /*
722 * Make sure that they are adjacent, and that they've got the same callbacks.
723 */
724 if (pCur1->Core.KeyLast + 1 == pCur2->Core.Key)
725 {
726 if ( pCur1->pfnHandlerGC == pCur2->pfnHandlerGC
727 && pCur1->pfnHandlerR0 == pCur2->pfnHandlerR0
728 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3)
729 {
730 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys2);
731 if (pCur3 == pCur2)
732 {
733 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
734 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
735 LogFlow(("PGMHandlerPhysicalJoin: %VGp-%VGp %VGp-%VGp\n",
736 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
737 pgmUnlock(pVM);
738 MMHyperFree(pVM, pCur2);
739 return VINF_SUCCESS;
740 }
741 Assert(pCur3 == pCur2);
742 rc = VERR_INTERNAL_ERROR;
743 }
744 else
745 {
746 AssertMsgFailed(("mismatching handlers\n"));
747 rc = VERR_ACCESS_DENIED;
748 }
749 }
750 else
751 {
752 AssertMsgFailed(("not adjacent: %VGp-%VGp %VGp-%VGp\n",
753 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
754 rc = VERR_INVALID_PARAMETER;
755 }
756 }
757 else
758 {
759 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys2));
760 rc = VERR_PGM_HANDLER_NOT_FOUND;
761 }
762 }
763 else
764 {
765 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys1));
766 rc = VERR_PGM_HANDLER_NOT_FOUND;
767 }
768 pgmUnlock(pVM);
769 return rc;
770
771}
772
773
774/**
775 * Resets any modifications to individual pages in a physical
776 * page access handler region.
777 *
778 * This is used in pair with PGMHandlerPhysicalModify().
779 *
780 * @returns VBox status code.
781 * @param pVM VM Handle
782 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
783 */
784PGMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys)
785{
786 /*
787 * Find the handler.
788 */
789 pgmLock(pVM);
790 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
791 if (pCur)
792 {
793 /*
794 * Validate type.
795 */
796 switch (pCur->enmType)
797 {
798 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
799 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
800 {
801 /*
802 * Set the flags and flush shadow PT entries.
803 */
804 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlePhysicalReset);
805 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
806 while (pRam && GCPhys > pRam->GCPhysLast)
807 pRam = CTXSUFF(pRam->pNext);
808 int rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
809 if (rc == VINF_PGM_GCPHYS_ALIASED)
810 {
811 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
812 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
813 }
814 pVM->pgm.s.fPhysCacheFlushPending = true;
815 pgmUnlock(pVM);
816 return VINF_SUCCESS;
817 }
818
819 /*
820 * Invalid.
821 */
822 case PGMPHYSHANDLERTYPE_PHYSICAL:
823 case PGMPHYSHANDLERTYPE_MMIO:
824 AssertMsgFailed(("Can't reset type %d!\n", pCur->enmType));
825 pgmUnlock(pVM);
826 return VERR_INTERNAL_ERROR;
827
828 default:
829 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCur->enmType));
830 pgmUnlock(pVM);
831 return VERR_INTERNAL_ERROR;
832 }
833 }
834 pgmUnlock(pVM);
835 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
836 return VERR_PGM_HANDLER_NOT_FOUND;
837}
838
839
840/**
841 * Search for virtual handler with matching physical address
842 *
843 * @returns VBox status code
844 * @param pVM The VM handle.
845 * @param GCPhys GC physical address to search for.
846 * @param ppVirt Where to store the pointer to the virtual handler structure.
847 * @param piPage Where to store the pointer to the index of the cached physical page.
848 */
849int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage)
850{
851 STAM_PROFILE_START(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
852 Assert(ppVirt);
853
854 PPGMPHYS2VIRTHANDLER pCur;
855 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->PhysToVirtHandlers, GCPhys);
856 if (pCur)
857 {
858 /* found a match! */
859#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
860 AssertRelease(pCur->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD);
861#endif
862 *ppVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
863 *piPage = pCur - &(*ppVirt)->aPhysToVirt[0];
864
865 LogFlow(("PHYS2VIRT: found match for %VGp -> %VGv *piPage=%#x\n",
866 GCPhys, (*ppVirt)->GCPtr, *piPage));
867 STAM_PROFILE_STOP(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
868 return VINF_SUCCESS;
869 }
870
871 *ppVirt = NULL;
872 STAM_PROFILE_STOP(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
873 return VERR_PGM_HANDLER_NOT_FOUND;
874}
875
876
877/**
878 * Deal with aliases in phys2virt.
879 *
880 * @param pVM The VM handle.
881 * @param pPhys2Virt The node we failed insert.
882 */
883static void pgmHandlerVirtualInsertAliased(PVM pVM, PPGMPHYS2VIRTHANDLER pPhys2Virt)
884{
885 /*
886 * First find the node which is conflicting with us.
887 */
888 /** @todo Deal with partial overlapping. (Unlikly situation, so I'm too lazy to do anything about it now.) */
889 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
890 if (!pHead)
891 {
892 /** @todo do something clever here... */
893#ifdef IN_RING3
894 LogRel(("pgmHandlerVirtualInsertAliased: %VGp-%VGp\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
895#endif
896 pPhys2Virt->offNextAlias = 0;
897 return;
898 }
899#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
900 AssertReleaseMsg(pHead != pPhys2Virt, ("%VGp-%VGp offVirtHandler=%#RX32\n",
901 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler));
902#endif
903
904 /** @todo check if the current head node covers the ground we do. This is highly unlikely
905 * and I'm too lazy to implement this now as it will require sorting the list and stuff like that. */
906
907 /*
908 * Insert ourselves as the next node.
909 */
910 if (!(pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
911 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IN_TREE;
912 else
913 {
914 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pHead + (pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
915 pPhys2Virt->offNextAlias = ((intptr_t)pNext - (intptr_t)pPhys2Virt)
916 | PGMPHYS2VIRTHANDLER_IN_TREE;
917 }
918 pHead->offNextAlias = ((intptr_t)pPhys2Virt - (intptr_t)pHead)
919 | (pHead->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
920 Log(("pgmHandlerVirtualInsertAliased: %VGp-%VGp offNextAlias=%#RX32\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
921}
922
923
924/**
925 * Resets one virtual handler range.
926 *
927 * @returns 0
928 * @param pNode Pointer to a PGMVIRTHANDLER.
929 * @param pvUser The VM handle.
930 */
931DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
932{
933 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
934 PVM pVM = (PVM)pvUser;
935
936 /*
937 * Calc flags.
938 */
939 unsigned fFlags;
940 switch (pCur->enmType)
941 {
942 case PGMVIRTHANDLERTYPE_EIP:
943 case PGMVIRTHANDLERTYPE_NORMAL: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER; break;
944 case PGMVIRTHANDLERTYPE_WRITE: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_WRITE; break;
945 case PGMVIRTHANDLERTYPE_ALL: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_ALL; break;
946 /* hypervisor handlers need no flags and wouldn't have nowhere to put them in any case. */
947 case PGMVIRTHANDLERTYPE_HYPERVISOR:
948 return 0;
949 default:
950 AssertMsgFailed(("Invalid type %d\n", pCur->enmType));
951 return 0;
952 }
953
954 /*
955 * Iterate the pages and apply the flags.
956 */
957 PPGMRAMRANGE pRamHint = NULL;
958 RTGCUINTPTR offPage = ((RTGCUINTPTR)pCur->GCPtr & PAGE_OFFSET_MASK);
959 RTGCUINTPTR cbLeft = pCur->cb;
960 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
961 {
962 PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
963 if (pPhys2Virt->Core.Key != NIL_RTGCPHYS)
964 {
965 /* Update the flags. */
966 int rc = PGMRamFlagsSetByGCPhysWithHint(&pVM->pgm.s, pPhys2Virt->Core.Key, fFlags, &pRamHint);
967 AssertRC(rc);
968
969 /* Need to insert the page in the Phys2Virt lookup tree? */
970 if (pPhys2Virt->Core.KeyLast == NIL_RTGCPHYS)
971 {
972#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
973 AssertRelease(!pPhys2Virt->offNextAlias);
974#endif
975 unsigned cbPhys = cbLeft;
976 if (cbPhys > PAGE_SIZE - offPage)
977 cbPhys = PAGE_SIZE - offPage;
978 else
979 Assert(iPage == pCur->cPages - 1);
980 pPhys2Virt->Core.KeyLast = pPhys2Virt->Core.Key + cbPhys - 1; /* inclusive */
981 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IS_HEAD | PGMPHYS2VIRTHANDLER_IN_TREE;
982 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core))
983 pgmHandlerVirtualInsertAliased(pVM, pPhys2Virt);
984#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
985 else
986 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core,
987 ("%VGp-%VGp offNextAlias=%#RX32\n",
988 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
989#endif
990 Log2(("PHYS2VIRT: Insert physical range %VGp-%VGp offNextAlias=%#RX32 %s\n",
991 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
992 }
993 }
994 cbLeft -= PAGE_SIZE - offPage;
995 offPage = 0;
996 }
997
998 return 0;
999}
1000
1001
1002#ifndef IN_RING3
1003
1004# ifdef IN_RING0
1005/** @todo try combine this with iom and em. */
1006
1007/**
1008 * Read callback for disassembly function; supports reading bytes that cross a page boundary
1009 *
1010 * @returns VBox status code.
1011 * @param pSrc GC source pointer
1012 * @param pDest HC destination pointer
1013 * @param size Number of bytes to read
1014 * @param dwUserdata Callback specific user data (pCpu)
1015 *
1016 */
1017DECLCALLBACK(int32_t) pgmReadBytes(RTHCUINTPTR pSrc, uint8_t *pDest, uint32_t size, RTHCUINTPTR dwUserdata)
1018{
1019 DISCPUSTATE *pCpu = (DISCPUSTATE *)dwUserdata;
1020 PVM pVM = (PVM)pCpu->dwUserData[0];
1021
1022 int rc = PGMPhysReadGCPtr(pVM, pDest, pSrc, size);
1023 AssertRC(rc);
1024 return rc;
1025}
1026
1027inline int pgmDisCoreOne(PVM pVM, DISCPUSTATE *pCpu, RTGCUINTPTR InstrGC, uint32_t *pOpsize)
1028{
1029 return DISCoreOneEx(InstrGC, pCpu->mode, pgmReadBytes, pVM, pCpu, pOpsize);
1030}
1031
1032# else /* !IN_RING0 (i.e. in IN_GC) */
1033inline int pgmDisCoreOne(PVM pVM, DISCPUSTATE *pCpu, RTGCUINTPTR InstrGC, uint32_t *pOpsize)
1034{
1035 return DISCoreOne(pCpu, InstrGC, pOpsize);
1036}
1037
1038#endif /* !IN_RING0 (i.e. in IN_GC) */
1039
1040
1041/**
1042 * \#PF Handler callback for Guest ROM range write access.
1043 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
1044 *
1045 * @returns VBox status code (appropritate for trap handling and GC return).
1046 * @param pVM VM Handle.
1047 * @param uErrorCode CPU Error code.
1048 * @param pRegFrame Trap register frame.
1049 * @param pvFault The fault address (cr2).
1050 * @param GCPhysFault The GC physical address corresponding to pvFault.
1051 * @param pvUser User argument.
1052 */
1053PGMDECL(int) pgmGuestROMWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1054{
1055 DISCPUSTATE Cpu;
1056 Cpu.mode = SELMIsSelector32Bit(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) ? CPUMODE_32BIT : CPUMODE_16BIT;
1057 if (Cpu.mode == CPUMODE_32BIT)
1058 {
1059 RTGCPTR GCPtrCode;
1060 int rc = SELMValidateAndConvertCSAddr(pVM, pRegFrame->eflags, pRegFrame->ss, pRegFrame->cs, &pRegFrame->csHid, (RTGCPTR)pRegFrame->eip, &GCPtrCode);
1061 if (VBOX_SUCCESS(rc))
1062 {
1063 uint32_t cbOp;
1064 rc = pgmDisCoreOne(pVM, &Cpu, (RTGCUINTPTR)GCPtrCode, &cbOp);
1065 if (VBOX_SUCCESS(rc))
1066 {
1067 /* ASSUMES simple instructions.
1068 * For instance 'pop [ROM_ADDRESS]' or 'and [ROM_ADDRESS], eax' better
1069 * not occure or we'll screw up the cpu state.
1070 */
1071 /** @todo We're assuming too much here I think. */
1072 if (!(Cpu.prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
1073 {
1074 /*
1075 * Move on to the next instruction.
1076 */
1077 pRegFrame->eip += cbOp;
1078 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestROMWriteHandled);
1079 return VINF_SUCCESS;
1080 }
1081 LogFlow(("pgmGuestROMWriteHandler: wrong prefix!!\n"));
1082 }
1083 }
1084 }
1085
1086 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestROMWriteUnhandled);
1087 return VINF_EM_RAW_EMULATE_INSTR;
1088}
1089#endif /* !IN_RING3 */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette