VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 95085

Last change on this file since 95085 was 94800, checked in by vboxsync, 3 years ago

VMM/IEM,PGM: TLB work, esp. on the data one. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 146.2 KB
Line 
1/* $Id: PGMAllPhys.cpp 94800 2022-05-03 21:49:43Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_PHYS
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/vmm/em.h>
30#include <VBox/vmm/nem.h>
31#include "PGMInternal.h"
32#include <VBox/vmm/vmcc.h>
33#include "PGMInline.h"
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/string.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
51 * Checks if valid physical access handler return code (normal handler, not PF).
52 *
53 * Checks if the given strict status code is one of the expected ones for a
54 * physical access handler in the current context.
55 *
56 * @returns true or false.
57 * @param a_rcStrict The status code.
58 * @param a_fWrite Whether it is a write or read being serviced.
59 *
60 * @remarks We wish to keep the list of statuses here as short as possible.
61 * When changing, please make sure to update the PGMPhysRead,
62 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
63 */
64#ifdef IN_RING3
65# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
66 ( (a_rcStrict) == VINF_SUCCESS \
67 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
68#elif defined(IN_RING0)
69#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
70 ( (a_rcStrict) == VINF_SUCCESS \
71 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
72 \
73 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
74 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
75 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
76 \
77 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
78 || (a_rcStrict) == VINF_EM_DBG_STOP \
79 || (a_rcStrict) == VINF_EM_DBG_EVENT \
80 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
81 || (a_rcStrict) == VINF_EM_OFF \
82 || (a_rcStrict) == VINF_EM_SUSPEND \
83 || (a_rcStrict) == VINF_EM_RESET \
84 )
85#else
86# error "Context?"
87#endif
88
89/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
90 * Checks if valid virtual access handler return code (normal handler, not PF).
91 *
92 * Checks if the given strict status code is one of the expected ones for a
93 * virtual access handler in the current context.
94 *
95 * @returns true or false.
96 * @param a_rcStrict The status code.
97 * @param a_fWrite Whether it is a write or read being serviced.
98 *
99 * @remarks We wish to keep the list of statuses here as short as possible.
100 * When changing, please make sure to update the PGMPhysRead,
101 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
102 */
103#ifdef IN_RING3
104# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
105 ( (a_rcStrict) == VINF_SUCCESS \
106 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
107#elif defined(IN_RING0)
108# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
109 (false /* no virtual handlers in ring-0! */ )
110#else
111# error "Context?"
112#endif
113
114
115
116/**
117 * Calculate the actual table size.
118 *
119 * The memory is layed out like this:
120 * - PGMPHYSHANDLERTREE (8 bytes)
121 * - Allocation bitmap (8-byte size align)
122 * - Slab of PGMPHYSHANDLER. Start is 64 byte aligned.
123 */
124uint32_t pgmHandlerPhysicalCalcTableSizes(uint32_t *pcEntries, uint32_t *pcbTreeAndBitmap)
125{
126 /*
127 * A minimum of 64 entries and a maximum of ~64K.
128 */
129 uint32_t cEntries = *pcEntries;
130 if (cEntries <= 64)
131 cEntries = 64;
132 else if (cEntries >= _64K)
133 cEntries = _64K;
134 else
135 cEntries = RT_ALIGN_32(cEntries, 16);
136
137 /*
138 * Do the initial calculation.
139 */
140 uint32_t cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
141 uint32_t cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
142 uint32_t cbTable = cEntries * sizeof(PGMPHYSHANDLER);
143 uint32_t cbTotal = cbTreeAndBitmap + cbTable;
144
145 /*
146 * Align the total and try use up extra space from that.
147 */
148 uint32_t cbTotalAligned = RT_ALIGN_32(cbTotal, RT_MAX(HOST_PAGE_SIZE, _16K));
149 uint32_t cAvail = cbTotalAligned - cbTotal;
150 cAvail /= sizeof(PGMPHYSHANDLER);
151 if (cAvail >= 1)
152 for (;;)
153 {
154 cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
155 cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
156 cbTable = cEntries * sizeof(PGMPHYSHANDLER);
157 cbTotal = cbTreeAndBitmap + cbTable;
158 if (cbTotal <= cbTotalAligned)
159 break;
160 cEntries--;
161 Assert(cEntries >= 16);
162 }
163
164 /*
165 * Return the result.
166 */
167 *pcbTreeAndBitmap = cbTreeAndBitmap;
168 *pcEntries = cEntries;
169 return cbTotalAligned;
170}
171
172
173/**
174 * Looks up a ROM range by its PGMROMRANGE::GCPhys value.
175 */
176DECLINLINE(PPGMROMRANGE) pgmPhysRomLookupByBase(PVMCC pVM, RTGCPHYS GCPhys)
177{
178 for (PPGMROMRANGE pRom = pVM->pgm.s.CTX_SUFF(pRomRanges); pRom; pRom = pRom->CTX_SUFF(pNext))
179 if (pRom->GCPhys == GCPhys)
180 return pRom;
181 return NULL;
182}
183
184#ifndef IN_RING3
185
186/**
187 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
188 * \#PF access handler callback for guest ROM range write access.}
189 *
190 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
191 */
192DECLCALLBACK(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
193 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
194
195{
196 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
197 AssertReturn(pRom, VINF_EM_RAW_EMULATE_INSTR);
198 uint32_t const iPage = (GCPhysFault - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
199 int rc;
200 RT_NOREF(uErrorCode, pvFault);
201
202 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
203
204 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
205 switch (pRom->aPages[iPage].enmProt)
206 {
207 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
208 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
209 {
210 /*
211 * If it's a simple instruction which doesn't change the cpu state
212 * we will simply skip it. Otherwise we'll have to defer it to REM.
213 */
214 uint32_t cbOp;
215 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
216 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
217 if ( RT_SUCCESS(rc)
218 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
219 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
220 {
221 switch (pDis->bOpCode)
222 {
223 /** @todo Find other instructions we can safely skip, possibly
224 * adding this kind of detection to DIS or EM. */
225 case OP_MOV:
226 pRegFrame->rip += cbOp;
227 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteHandled);
228 return VINF_SUCCESS;
229 }
230 }
231 break;
232 }
233
234 case PGMROMPROT_READ_RAM_WRITE_RAM:
235 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
236 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
237 AssertRC(rc);
238 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
239
240 case PGMROMPROT_READ_ROM_WRITE_RAM:
241 /* Handle it in ring-3 because it's *way* easier there. */
242 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
243 break;
244
245 default:
246 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
247 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
248 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
249 }
250
251 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteUnhandled);
252 return VINF_EM_RAW_EMULATE_INSTR;
253}
254
255#endif /* !IN_RING3 */
256
257
258/**
259 * @callback_method_impl{FNPGMPHYSHANDLER,
260 * Access handler callback for ROM write accesses.}
261 *
262 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
263 */
264DECLCALLBACK(VBOXSTRICTRC)
265pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
266 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
267{
268 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
269 AssertReturn(pRom, VERR_INTERNAL_ERROR_3);
270 uint32_t const iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
271 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
272 PPGMROMPAGE const pRomPage = &pRom->aPages[iPage];
273
274 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
275 RT_NOREF(pVCpu, pvPhys, enmOrigin);
276
277 if (enmAccessType == PGMACCESSTYPE_READ)
278 {
279 switch (pRomPage->enmProt)
280 {
281 /*
282 * Take the default action.
283 */
284 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
285 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
286 case PGMROMPROT_READ_ROM_WRITE_RAM:
287 case PGMROMPROT_READ_RAM_WRITE_RAM:
288 return VINF_PGM_HANDLER_DO_DEFAULT;
289
290 default:
291 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
292 pRom->aPages[iPage].enmProt, iPage, GCPhys),
293 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
294 }
295 }
296 else
297 {
298 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
299 switch (pRomPage->enmProt)
300 {
301 /*
302 * Ignore writes.
303 */
304 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
305 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
306 return VINF_SUCCESS;
307
308 /*
309 * Write to the RAM page.
310 */
311 case PGMROMPROT_READ_ROM_WRITE_RAM:
312 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
313 {
314 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
315 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> GUEST_PAGE_SHIFT) == iPage);
316
317 /*
318 * Take the lock, do lazy allocation, map the page and copy the data.
319 *
320 * Note that we have to bypass the mapping TLB since it works on
321 * guest physical addresses and entering the shadow page would
322 * kind of screw things up...
323 */
324 PGM_LOCK_VOID(pVM);
325
326 PPGMPAGE pShadowPage = &pRomPage->Shadow;
327 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
328 {
329 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
330 AssertLogRelMsgReturnStmt(pShadowPage, ("%RGp\n", GCPhys), PGM_UNLOCK(pVM), VERR_PGM_PHYS_PAGE_GET_IPE);
331 }
332
333 void *pvDstPage;
334 int rc;
335#if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
336 if (PGM_IS_IN_NEM_MODE(pVM) && PGMROMPROT_IS_ROM(pRomPage->enmProt))
337 {
338 pvDstPage = &pRom->pbR3Alternate[GCPhys - pRom->GCPhys];
339 rc = VINF_SUCCESS;
340 }
341 else
342#endif
343 {
344 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
345 if (RT_SUCCESS(rc))
346 pvDstPage = (uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK);
347 }
348 if (RT_SUCCESS(rc))
349 {
350 memcpy((uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK), pvBuf, cbBuf);
351 pRomPage->LiveSave.fWrittenTo = true;
352
353 AssertMsg( rc == VINF_SUCCESS
354 || ( rc == VINF_PGM_SYNC_CR3
355 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
356 , ("%Rrc\n", rc));
357 rc = VINF_SUCCESS;
358 }
359
360 PGM_UNLOCK(pVM);
361 return rc;
362 }
363
364 default:
365 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
366 pRom->aPages[iPage].enmProt, iPage, GCPhys),
367 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
368 }
369 }
370}
371
372
373/**
374 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler.
375 */
376static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uint64_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr)
377{
378 /*
379 * Get the MMIO2 range.
380 */
381 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), VERR_INTERNAL_ERROR_3);
382 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3);
383 PPGMREGMMIO2RANGE pMmio2 = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2 - 1];
384 Assert(pMmio2->idMmio2 == hMmio2);
385 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
386 VERR_INTERNAL_ERROR_4);
387
388 /*
389 * Get the page and make sure it's an MMIO2 page.
390 */
391 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
392 AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR);
393 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR);
394
395 /*
396 * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty.
397 * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single
398 * page is dirty, saving the need for additional storage (bitmap).)
399 */
400 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY;
401
402 /*
403 * Disable the handler for this page.
404 */
405 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->RamRange.GCPhys, GCPhys & X86_PTE_PG_MASK);
406 AssertRC(rc);
407#ifndef IN_RING3
408 if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0)
409 {
410 rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
411 AssertMsgReturn(rc == VINF_SUCCESS, ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc);
412 }
413#else
414 RT_NOREF(pVCpu, GCPtr);
415#endif
416 return VINF_SUCCESS;
417}
418
419
420#ifndef IN_RING3
421/**
422 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
423 * \#PF access handler callback for guest MMIO2 dirty page tracing.}
424 *
425 * @remarks The @a uUser is the MMIO2 index.
426 */
427DECLCALLBACK(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
428 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
429{
430 RT_NOREF(pVCpu, uErrorCode, pRegFrame);
431 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
432 if (RT_SUCCESS(rcStrict))
433 {
434 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhysFault, pvFault);
435 PGM_UNLOCK(pVM);
436 }
437 return rcStrict;
438}
439#endif /* !IN_RING3 */
440
441
442/**
443 * @callback_method_impl{FNPGMPHYSHANDLER,
444 * Access handler callback for MMIO2 dirty page tracing.}
445 *
446 * @remarks The @a uUser is the MMIO2 index.
447 */
448DECLCALLBACK(VBOXSTRICTRC)
449pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
450 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
451{
452 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
453 if (RT_SUCCESS(rcStrict))
454 {
455 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhys, ~(RTGCPTR)0);
456 PGM_UNLOCK(pVM);
457 if (rcStrict == VINF_SUCCESS)
458 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
459 }
460 RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
461 return rcStrict;
462}
463
464
465/**
466 * Invalidates the RAM range TLBs.
467 *
468 * @param pVM The cross context VM structure.
469 */
470void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
471{
472 PGM_LOCK_VOID(pVM);
473 RT_ZERO(pVM->pgm.s.apRamRangesTlbR3);
474 RT_ZERO(pVM->pgm.s.apRamRangesTlbR0);
475 PGM_UNLOCK(pVM);
476}
477
478
479/**
480 * Tests if a value of type RTGCPHYS is negative if the type had been signed
481 * instead of unsigned.
482 *
483 * @returns @c true if negative, @c false if positive or zero.
484 * @param a_GCPhys The value to test.
485 * @todo Move me to iprt/types.h.
486 */
487#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
488
489
490/**
491 * Slow worker for pgmPhysGetRange.
492 *
493 * @copydoc pgmPhysGetRange
494 */
495PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
496{
497 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
498
499 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
500 while (pRam)
501 {
502 RTGCPHYS off = GCPhys - pRam->GCPhys;
503 if (off < pRam->cb)
504 {
505 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
506 return pRam;
507 }
508 if (RTGCPHYS_IS_NEGATIVE(off))
509 pRam = pRam->CTX_SUFF(pLeft);
510 else
511 pRam = pRam->CTX_SUFF(pRight);
512 }
513 return NULL;
514}
515
516
517/**
518 * Slow worker for pgmPhysGetRangeAtOrAbove.
519 *
520 * @copydoc pgmPhysGetRangeAtOrAbove
521 */
522PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
523{
524 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
525
526 PPGMRAMRANGE pLastLeft = NULL;
527 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
528 while (pRam)
529 {
530 RTGCPHYS off = GCPhys - pRam->GCPhys;
531 if (off < pRam->cb)
532 {
533 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
534 return pRam;
535 }
536 if (RTGCPHYS_IS_NEGATIVE(off))
537 {
538 pLastLeft = pRam;
539 pRam = pRam->CTX_SUFF(pLeft);
540 }
541 else
542 pRam = pRam->CTX_SUFF(pRight);
543 }
544 return pLastLeft;
545}
546
547
548/**
549 * Slow worker for pgmPhysGetPage.
550 *
551 * @copydoc pgmPhysGetPage
552 */
553PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
554{
555 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
556
557 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
558 while (pRam)
559 {
560 RTGCPHYS off = GCPhys - pRam->GCPhys;
561 if (off < pRam->cb)
562 {
563 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
564 return &pRam->aPages[off >> GUEST_PAGE_SHIFT];
565 }
566
567 if (RTGCPHYS_IS_NEGATIVE(off))
568 pRam = pRam->CTX_SUFF(pLeft);
569 else
570 pRam = pRam->CTX_SUFF(pRight);
571 }
572 return NULL;
573}
574
575
576/**
577 * Slow worker for pgmPhysGetPageEx.
578 *
579 * @copydoc pgmPhysGetPageEx
580 */
581int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
582{
583 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
584
585 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
586 while (pRam)
587 {
588 RTGCPHYS off = GCPhys - pRam->GCPhys;
589 if (off < pRam->cb)
590 {
591 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
592 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
593 return VINF_SUCCESS;
594 }
595
596 if (RTGCPHYS_IS_NEGATIVE(off))
597 pRam = pRam->CTX_SUFF(pLeft);
598 else
599 pRam = pRam->CTX_SUFF(pRight);
600 }
601
602 *ppPage = NULL;
603 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
604}
605
606
607/**
608 * Slow worker for pgmPhysGetPageAndRangeEx.
609 *
610 * @copydoc pgmPhysGetPageAndRangeEx
611 */
612int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
613{
614 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
615
616 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
617 while (pRam)
618 {
619 RTGCPHYS off = GCPhys - pRam->GCPhys;
620 if (off < pRam->cb)
621 {
622 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
623 *ppRam = pRam;
624 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
625 return VINF_SUCCESS;
626 }
627
628 if (RTGCPHYS_IS_NEGATIVE(off))
629 pRam = pRam->CTX_SUFF(pLeft);
630 else
631 pRam = pRam->CTX_SUFF(pRight);
632 }
633
634 *ppRam = NULL;
635 *ppPage = NULL;
636 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
637}
638
639
640/**
641 * Checks if Address Gate 20 is enabled or not.
642 *
643 * @returns true if enabled.
644 * @returns false if disabled.
645 * @param pVCpu The cross context virtual CPU structure.
646 */
647VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
648{
649 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
650 return pVCpu->pgm.s.fA20Enabled;
651}
652
653
654/**
655 * Validates a GC physical address.
656 *
657 * @returns true if valid.
658 * @returns false if invalid.
659 * @param pVM The cross context VM structure.
660 * @param GCPhys The physical address to validate.
661 */
662VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
663{
664 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
665 return pPage != NULL;
666}
667
668
669/**
670 * Checks if a GC physical address is a normal page,
671 * i.e. not ROM, MMIO or reserved.
672 *
673 * @returns true if normal.
674 * @returns false if invalid, ROM, MMIO or reserved page.
675 * @param pVM The cross context VM structure.
676 * @param GCPhys The physical address to check.
677 */
678VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
679{
680 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
681 return pPage
682 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
683}
684
685
686/**
687 * Converts a GC physical address to a HC physical address.
688 *
689 * @returns VINF_SUCCESS on success.
690 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
691 * page but has no physical backing.
692 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
693 * GC physical address.
694 *
695 * @param pVM The cross context VM structure.
696 * @param GCPhys The GC physical address to convert.
697 * @param pHCPhys Where to store the HC physical address on success.
698 */
699VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
700{
701 PGM_LOCK_VOID(pVM);
702 PPGMPAGE pPage;
703 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
704 if (RT_SUCCESS(rc))
705 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
706 PGM_UNLOCK(pVM);
707 return rc;
708}
709
710
711/**
712 * Invalidates all page mapping TLBs.
713 *
714 * @param pVM The cross context VM structure.
715 */
716void pgmPhysInvalidatePageMapTLB(PVMCC pVM)
717{
718 PGM_LOCK_VOID(pVM);
719 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushes);
720
721 /* Clear the R3 & R0 TLBs completely. */
722 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
723 {
724 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
725 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
726 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
727 }
728
729 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
730 {
731 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
732 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
733 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
734 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
735 }
736
737 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
738 PGM_UNLOCK(pVM);
739}
740
741
742/**
743 * Invalidates a page mapping TLB entry
744 *
745 * @param pVM The cross context VM structure.
746 * @param GCPhys GCPhys entry to flush
747 *
748 * @note Caller is responsible for calling IEMTlbInvalidateAllPhysicalAllCpus
749 * when needed.
750 */
751void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys)
752{
753 PGM_LOCK_ASSERT_OWNER(pVM);
754
755 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushEntry);
756
757 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
758
759 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
760 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
761 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
762
763 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
764 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
765 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
766 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
767}
768
769
770/**
771 * Makes sure that there is at least one handy page ready for use.
772 *
773 * This will also take the appropriate actions when reaching water-marks.
774 *
775 * @returns VBox status code.
776 * @retval VINF_SUCCESS on success.
777 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
778 *
779 * @param pVM The cross context VM structure.
780 *
781 * @remarks Must be called from within the PGM critical section. It may
782 * nip back to ring-3/0 in some cases.
783 */
784static int pgmPhysEnsureHandyPage(PVMCC pVM)
785{
786 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
787
788 /*
789 * Do we need to do anything special?
790 */
791#ifdef IN_RING3
792 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
793#else
794 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
795#endif
796 {
797 /*
798 * Allocate pages only if we're out of them, or in ring-3, almost out.
799 */
800#ifdef IN_RING3
801 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
802#else
803 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
804#endif
805 {
806 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
807 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
808#ifdef IN_RING3
809 int rc = PGMR3PhysAllocateHandyPages(pVM);
810#else
811 int rc = pgmR0PhysAllocateHandyPages(pVM, VMMGetCpuId(pVM), false /*fRing3*/);
812#endif
813 if (RT_UNLIKELY(rc != VINF_SUCCESS))
814 {
815 if (RT_FAILURE(rc))
816 return rc;
817 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
818 if (!pVM->pgm.s.cHandyPages)
819 {
820 LogRel(("PGM: no more handy pages!\n"));
821 return VERR_EM_NO_MEMORY;
822 }
823 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
824 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
825#ifndef IN_RING3
826 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
827#endif
828 }
829 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
830 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
831 ("%u\n", pVM->pgm.s.cHandyPages),
832 VERR_PGM_HANDY_PAGE_IPE);
833 }
834 else
835 {
836 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
837 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
838#ifndef IN_RING3
839 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
840 {
841 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
842 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
843 }
844#endif
845 }
846 }
847
848 return VINF_SUCCESS;
849}
850
851
852/**
853 * Replace a zero or shared page with new page that we can write to.
854 *
855 * @returns The following VBox status codes.
856 * @retval VINF_SUCCESS on success, pPage is modified.
857 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
858 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
859 *
860 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
861 *
862 * @param pVM The cross context VM structure.
863 * @param pPage The physical page tracking structure. This will
864 * be modified on success.
865 * @param GCPhys The address of the page.
866 *
867 * @remarks Must be called from within the PGM critical section. It may
868 * nip back to ring-3/0 in some cases.
869 *
870 * @remarks This function shouldn't really fail, however if it does
871 * it probably means we've screwed up the size of handy pages and/or
872 * the low-water mark. Or, that some device I/O is causing a lot of
873 * pages to be allocated while while the host is in a low-memory
874 * condition. This latter should be handled elsewhere and in a more
875 * controlled manner, it's on the @bugref{3170} todo list...
876 */
877int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
878{
879 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
880
881 /*
882 * Prereqs.
883 */
884 PGM_LOCK_ASSERT_OWNER(pVM);
885 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
886 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
887
888# ifdef PGM_WITH_LARGE_PAGES
889 /*
890 * Try allocate a large page if applicable.
891 */
892 if ( PGMIsUsingLargePages(pVM)
893 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
894 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
895 {
896 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
897 PPGMPAGE pBasePage;
898
899 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
900 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
901 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
902 {
903 rc = pgmPhysAllocLargePage(pVM, GCPhys);
904 if (rc == VINF_SUCCESS)
905 return rc;
906 }
907 /* Mark the base as type page table, so we don't check over and over again. */
908 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
909
910 /* fall back to 4KB pages. */
911 }
912# endif
913
914 /*
915 * Flush any shadow page table mappings of the page.
916 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
917 */
918 bool fFlushTLBs = false;
919 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
920 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
921
922 /*
923 * Ensure that we've got a page handy, take it and use it.
924 */
925 int rc2 = pgmPhysEnsureHandyPage(pVM);
926 if (RT_FAILURE(rc2))
927 {
928 if (fFlushTLBs)
929 PGM_INVL_ALL_VCPU_TLBS(pVM);
930 Assert(rc2 == VERR_EM_NO_MEMORY);
931 return rc2;
932 }
933 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
934 PGM_LOCK_ASSERT_OWNER(pVM);
935 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
936 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
937
938 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
939 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
940 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_GMMPAGEDESC_PHYS);
941 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
942 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
943 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
944
945 /*
946 * There are one or two action to be taken the next time we allocate handy pages:
947 * - Tell the GMM (global memory manager) what the page is being used for.
948 * (Speeds up replacement operations - sharing and defragmenting.)
949 * - If the current backing is shared, it must be freed.
950 */
951 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
952 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
953
954 void const *pvSharedPage = NULL;
955 if (PGM_PAGE_IS_SHARED(pPage))
956 {
957 /* Mark this shared page for freeing/dereferencing. */
958 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
959 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
960
961 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
962 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
963 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageReplaceShared));
964 pVM->pgm.s.cSharedPages--;
965
966 /* Grab the address of the page so we can make a copy later on. (safe) */
967 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
968 AssertRC(rc);
969 }
970 else
971 {
972 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
973 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero);
974 pVM->pgm.s.cZeroPages--;
975 }
976
977 /*
978 * Do the PGMPAGE modifications.
979 */
980 pVM->pgm.s.cPrivatePages++;
981 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
982 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
983 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
984 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
985 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
986 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
987
988 /* Copy the shared page contents to the replacement page. */
989 if (pvSharedPage)
990 {
991 /* Get the virtual address of the new page. */
992 PGMPAGEMAPLOCK PgMpLck;
993 void *pvNewPage;
994 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
995 if (RT_SUCCESS(rc))
996 {
997 memcpy(pvNewPage, pvSharedPage, GUEST_PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
998 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
999 }
1000 }
1001
1002 if ( fFlushTLBs
1003 && rc != VINF_PGM_GCPHYS_ALIASED)
1004 PGM_INVL_ALL_VCPU_TLBS(pVM);
1005
1006 /*
1007 * Notify NEM about the mapping change for this page.
1008 *
1009 * Note! Shadow ROM pages are complicated as they can definitely be
1010 * allocated while not visible, so play safe.
1011 */
1012 if (VM_IS_NEM_ENABLED(pVM))
1013 {
1014 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1015 if ( enmType != PGMPAGETYPE_ROM_SHADOW
1016 || pgmPhysGetPage(pVM, GCPhys) == pPage)
1017 {
1018 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1019 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
1020 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1021 if (RT_SUCCESS(rc))
1022 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1023 else
1024 rc = rc2;
1025 }
1026 }
1027
1028 return rc;
1029}
1030
1031#ifdef PGM_WITH_LARGE_PAGES
1032
1033/**
1034 * Replace a 2 MB range of zero pages with new pages that we can write to.
1035 *
1036 * @returns The following VBox status codes.
1037 * @retval VINF_SUCCESS on success, pPage is modified.
1038 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1039 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
1040 *
1041 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
1042 *
1043 * @param pVM The cross context VM structure.
1044 * @param GCPhys The address of the page.
1045 *
1046 * @remarks Must be called from within the PGM critical section. It may block
1047 * on GMM and host mutexes/locks, leaving HM context.
1048 */
1049int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
1050{
1051 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
1052 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
1053 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1054
1055 /*
1056 * Check Prereqs.
1057 */
1058 PGM_LOCK_ASSERT_OWNER(pVM);
1059 Assert(PGMIsUsingLargePages(pVM));
1060
1061 /*
1062 * All the pages must be unallocated RAM pages, i.e. mapping the ZERO page.
1063 */
1064 PPGMPAGE pFirstPage;
1065 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
1066 if ( RT_SUCCESS(rc)
1067 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM
1068 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
1069 {
1070 /*
1071 * Further they should have PDE type set to PGM_PAGE_PDE_TYPE_DONTCARE,
1072 * since they are unallocated.
1073 */
1074 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
1075 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
1076 if (uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE)
1077 {
1078 /*
1079 * Now, make sure all the other pages in the 2 MB is in the same state.
1080 */
1081 GCPhys = GCPhysBase;
1082 unsigned cLeft = _2M / GUEST_PAGE_SIZE;
1083 while (cLeft-- > 0)
1084 {
1085 PPGMPAGE pSubPage = pgmPhysGetPage(pVM, GCPhys);
1086 if ( pSubPage
1087 && PGM_PAGE_GET_TYPE(pSubPage) == PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
1088 && PGM_PAGE_GET_STATE(pSubPage) == PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
1089 {
1090 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
1091 GCPhys += GUEST_PAGE_SIZE;
1092 }
1093 else
1094 {
1095 LogFlow(("pgmPhysAllocLargePage: Found page %RGp with wrong attributes (type=%d; state=%d); cancel check.\n",
1096 GCPhys, pSubPage ? PGM_PAGE_GET_TYPE(pSubPage) : -1, pSubPage ? PGM_PAGE_GET_STATE(pSubPage) : -1));
1097
1098 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
1099 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
1100 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
1101 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1102 }
1103 }
1104
1105 /*
1106 * Do the allocation.
1107 */
1108# ifdef IN_RING3
1109 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhysBase, NULL);
1110# elif defined(IN_RING0)
1111 rc = pgmR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase);
1112# else
1113# error "Port me"
1114# endif
1115 if (RT_SUCCESS(rc))
1116 {
1117 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
1118 pVM->pgm.s.cLargePages++;
1119 return VINF_SUCCESS;
1120 }
1121
1122 /* If we fail once, it most likely means the host's memory is too
1123 fragmented; don't bother trying again. */
1124 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
1125 return rc;
1126 }
1127 }
1128 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1129}
1130
1131
1132/**
1133 * Recheck the entire 2 MB range to see if we can use it again as a large page.
1134 *
1135 * @returns The following VBox status codes.
1136 * @retval VINF_SUCCESS on success, the large page can be used again
1137 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
1138 *
1139 * @param pVM The cross context VM structure.
1140 * @param GCPhys The address of the page.
1141 * @param pLargePage Page structure of the base page
1142 */
1143int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
1144{
1145 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
1146
1147 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1148
1149 GCPhys &= X86_PDE2M_PAE_PG_MASK;
1150
1151 /* Check the base page. */
1152 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
1153 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
1154 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
1155 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1156 {
1157 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
1158 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1159 }
1160
1161 STAM_PROFILE_START(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1162 /* Check all remaining pages in the 2 MB range. */
1163 unsigned i;
1164 GCPhys += GUEST_PAGE_SIZE;
1165 for (i = 1; i < _2M / GUEST_PAGE_SIZE; i++)
1166 {
1167 PPGMPAGE pPage;
1168 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1169 AssertRCBreak(rc);
1170
1171 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1172 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1173 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1174 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1175 {
1176 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
1177 break;
1178 }
1179
1180 GCPhys += GUEST_PAGE_SIZE;
1181 }
1182 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1183
1184 if (i == _2M / GUEST_PAGE_SIZE)
1185 {
1186 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
1187 pVM->pgm.s.cLargePagesDisabled--;
1188 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
1189 return VINF_SUCCESS;
1190 }
1191
1192 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1193}
1194
1195#endif /* PGM_WITH_LARGE_PAGES */
1196
1197
1198/**
1199 * Deal with a write monitored page.
1200 *
1201 * @returns VBox strict status code.
1202 *
1203 * @param pVM The cross context VM structure.
1204 * @param pPage The physical page tracking structure.
1205 * @param GCPhys The guest physical address of the page.
1206 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
1207 * very unlikely situation where it is okay that we let NEM
1208 * fix the page access in a lazy fasion.
1209 *
1210 * @remarks Called from within the PGM critical section.
1211 */
1212void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1213{
1214 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
1215 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1216 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1217 Assert(pVM->pgm.s.cMonitoredPages > 0);
1218 pVM->pgm.s.cMonitoredPages--;
1219 pVM->pgm.s.cWrittenToPages++;
1220
1221#ifdef VBOX_WITH_NATIVE_NEM
1222 /*
1223 * Notify NEM about the protection change so we won't spin forever.
1224 *
1225 * Note! NEM need to be handle to lazily correct page protection as we cannot
1226 * really get it 100% right here it seems. The page pool does this too.
1227 */
1228 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
1229 {
1230 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1231 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1232 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1233 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1234 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
1235 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1236 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1237 }
1238#else
1239 RT_NOREF(GCPhys);
1240#endif
1241}
1242
1243
1244/**
1245 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
1246 *
1247 * @returns VBox strict status code.
1248 * @retval VINF_SUCCESS on success.
1249 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1250 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1251 *
1252 * @param pVM The cross context VM structure.
1253 * @param pPage The physical page tracking structure.
1254 * @param GCPhys The address of the page.
1255 *
1256 * @remarks Called from within the PGM critical section.
1257 */
1258int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1259{
1260 PGM_LOCK_ASSERT_OWNER(pVM);
1261 switch (PGM_PAGE_GET_STATE(pPage))
1262 {
1263 case PGM_PAGE_STATE_WRITE_MONITORED:
1264 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
1265 RT_FALL_THRU();
1266 default: /* to shut up GCC */
1267 case PGM_PAGE_STATE_ALLOCATED:
1268 return VINF_SUCCESS;
1269
1270 /*
1271 * Zero pages can be dummy pages for MMIO or reserved memory,
1272 * so we need to check the flags before joining cause with
1273 * shared page replacement.
1274 */
1275 case PGM_PAGE_STATE_ZERO:
1276 if (PGM_PAGE_IS_MMIO(pPage))
1277 return VERR_PGM_PHYS_PAGE_RESERVED;
1278 RT_FALL_THRU();
1279 case PGM_PAGE_STATE_SHARED:
1280 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1281
1282 /* Not allowed to write to ballooned pages. */
1283 case PGM_PAGE_STATE_BALLOONED:
1284 return VERR_PGM_PHYS_PAGE_BALLOONED;
1285 }
1286}
1287
1288
1289/**
1290 * Internal usage: Map the page specified by its GMM ID.
1291 *
1292 * This is similar to pgmPhysPageMap
1293 *
1294 * @returns VBox status code.
1295 *
1296 * @param pVM The cross context VM structure.
1297 * @param idPage The Page ID.
1298 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
1299 * @param ppv Where to store the mapping address.
1300 *
1301 * @remarks Called from within the PGM critical section. The mapping is only
1302 * valid while you are inside this section.
1303 */
1304int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1305{
1306 /*
1307 * Validation.
1308 */
1309 PGM_LOCK_ASSERT_OWNER(pVM);
1310 AssertReturn(HCPhys && !(HCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1311 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1312 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1313
1314#ifdef IN_RING0
1315# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1316 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)GUEST_PAGE_OFFSET_MASK, ppv);
1317# else
1318 return GMMR0PageIdToVirt(pVM, idPage, ppv);
1319# endif
1320
1321#else
1322 /*
1323 * Find/make Chunk TLB entry for the mapping chunk.
1324 */
1325 PPGMCHUNKR3MAP pMap;
1326 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1327 if (pTlbe->idChunk == idChunk)
1328 {
1329 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1330 pMap = pTlbe->pChunk;
1331 }
1332 else
1333 {
1334 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1335
1336 /*
1337 * Find the chunk, map it if necessary.
1338 */
1339 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1340 if (pMap)
1341 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1342 else
1343 {
1344 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1345 if (RT_FAILURE(rc))
1346 return rc;
1347 }
1348
1349 /*
1350 * Enter it into the Chunk TLB.
1351 */
1352 pTlbe->idChunk = idChunk;
1353 pTlbe->pChunk = pMap;
1354 }
1355
1356 *ppv = (uint8_t *)pMap->pv + ((idPage & GMM_PAGEID_IDX_MASK) << GUEST_PAGE_SHIFT);
1357 return VINF_SUCCESS;
1358#endif
1359}
1360
1361
1362/**
1363 * Maps a page into the current virtual address space so it can be accessed.
1364 *
1365 * @returns VBox status code.
1366 * @retval VINF_SUCCESS on success.
1367 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1368 *
1369 * @param pVM The cross context VM structure.
1370 * @param pPage The physical page tracking structure.
1371 * @param GCPhys The address of the page.
1372 * @param ppMap Where to store the address of the mapping tracking structure.
1373 * @param ppv Where to store the mapping address of the page. The page
1374 * offset is masked off!
1375 *
1376 * @remarks Called from within the PGM critical section.
1377 */
1378static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1379{
1380 PGM_LOCK_ASSERT_OWNER(pVM);
1381 NOREF(GCPhys);
1382
1383 /*
1384 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1385 */
1386 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1387 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1388 {
1389 /* Decode the page id to a page in a MMIO2 ram range. */
1390 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1391 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1392 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1393 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1394 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1395 pPage->s.idPage, pPage->s.uStateY),
1396 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1397 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1398 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1399 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1400 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> GUEST_PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1401 *ppMap = NULL;
1402# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1403 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1404# elif defined(IN_RING0)
1405 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1406 return VINF_SUCCESS;
1407# else
1408 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1409 return VINF_SUCCESS;
1410# endif
1411 }
1412
1413# ifdef VBOX_WITH_PGM_NEM_MODE
1414 if (pVM->pgm.s.fNemMode)
1415 {
1416# ifdef IN_RING3
1417 /*
1418 * Find the corresponding RAM range and use that to locate the mapping address.
1419 */
1420 /** @todo Use the page ID for some kind of indexing as we do with MMIO2 above. */
1421 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
1422 AssertLogRelMsgReturn(pRam, ("%RTGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
1423 size_t const idxPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
1424 Assert(pPage == &pRam->aPages[idxPage]);
1425 *ppMap = NULL;
1426 *ppv = (uint8_t *)pRam->pvR3 + (idxPage << GUEST_PAGE_SHIFT);
1427 return VINF_SUCCESS;
1428# else
1429 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1430# endif
1431 }
1432# endif
1433
1434 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1435 if (idChunk == NIL_GMM_CHUNKID)
1436 {
1437 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1438 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1439 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1440 {
1441 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1442 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1443 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1444 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1445 *ppv = pVM->pgm.s.abZeroPg;
1446 }
1447 else
1448 *ppv = pVM->pgm.s.abZeroPg;
1449 *ppMap = NULL;
1450 return VINF_SUCCESS;
1451 }
1452
1453# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1454 /*
1455 * Just use the physical address.
1456 */
1457 *ppMap = NULL;
1458 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1459
1460# elif defined(IN_RING0)
1461 /*
1462 * Go by page ID thru GMMR0.
1463 */
1464 *ppMap = NULL;
1465 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
1466
1467# else
1468 /*
1469 * Find/make Chunk TLB entry for the mapping chunk.
1470 */
1471 PPGMCHUNKR3MAP pMap;
1472 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1473 if (pTlbe->idChunk == idChunk)
1474 {
1475 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1476 pMap = pTlbe->pChunk;
1477 AssertPtr(pMap->pv);
1478 }
1479 else
1480 {
1481 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1482
1483 /*
1484 * Find the chunk, map it if necessary.
1485 */
1486 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1487 if (pMap)
1488 {
1489 AssertPtr(pMap->pv);
1490 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1491 }
1492 else
1493 {
1494 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1495 if (RT_FAILURE(rc))
1496 return rc;
1497 AssertPtr(pMap->pv);
1498 }
1499
1500 /*
1501 * Enter it into the Chunk TLB.
1502 */
1503 pTlbe->idChunk = idChunk;
1504 pTlbe->pChunk = pMap;
1505 }
1506
1507 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << GUEST_PAGE_SHIFT);
1508 *ppMap = pMap;
1509 return VINF_SUCCESS;
1510# endif /* !IN_RING0 */
1511}
1512
1513
1514/**
1515 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1516 *
1517 * This is typically used is paths where we cannot use the TLB methods (like ROM
1518 * pages) or where there is no point in using them since we won't get many hits.
1519 *
1520 * @returns VBox strict status code.
1521 * @retval VINF_SUCCESS on success.
1522 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1523 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1524 *
1525 * @param pVM The cross context VM structure.
1526 * @param pPage The physical page tracking structure.
1527 * @param GCPhys The address of the page.
1528 * @param ppv Where to store the mapping address of the page. The page
1529 * offset is masked off!
1530 *
1531 * @remarks Called from within the PGM critical section. The mapping is only
1532 * valid while you are inside section.
1533 */
1534int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1535{
1536 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1537 if (RT_SUCCESS(rc))
1538 {
1539 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1540 PPGMPAGEMAP pMapIgnore;
1541 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1542 if (RT_FAILURE(rc2)) /* preserve rc */
1543 rc = rc2;
1544 }
1545 return rc;
1546}
1547
1548
1549/**
1550 * Maps a page into the current virtual address space so it can be accessed for
1551 * both writing and reading.
1552 *
1553 * This is typically used is paths where we cannot use the TLB methods (like ROM
1554 * pages) or where there is no point in using them since we won't get many hits.
1555 *
1556 * @returns VBox status code.
1557 * @retval VINF_SUCCESS on success.
1558 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1559 *
1560 * @param pVM The cross context VM structure.
1561 * @param pPage The physical page tracking structure. Must be in the
1562 * allocated state.
1563 * @param GCPhys The address of the page.
1564 * @param ppv Where to store the mapping address of the page. The page
1565 * offset is masked off!
1566 *
1567 * @remarks Called from within the PGM critical section. The mapping is only
1568 * valid while you are inside section.
1569 */
1570int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1571{
1572 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1573 PPGMPAGEMAP pMapIgnore;
1574 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1575}
1576
1577
1578/**
1579 * Maps a page into the current virtual address space so it can be accessed for
1580 * reading.
1581 *
1582 * This is typically used is paths where we cannot use the TLB methods (like ROM
1583 * pages) or where there is no point in using them since we won't get many hits.
1584 *
1585 * @returns VBox status code.
1586 * @retval VINF_SUCCESS on success.
1587 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1588 *
1589 * @param pVM The cross context VM structure.
1590 * @param pPage The physical page tracking structure.
1591 * @param GCPhys The address of the page.
1592 * @param ppv Where to store the mapping address of the page. The page
1593 * offset is masked off!
1594 *
1595 * @remarks Called from within the PGM critical section. The mapping is only
1596 * valid while you are inside this section.
1597 */
1598int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1599{
1600 PPGMPAGEMAP pMapIgnore;
1601 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1602}
1603
1604
1605/**
1606 * Load a guest page into the ring-3 physical TLB.
1607 *
1608 * @returns VBox status code.
1609 * @retval VINF_SUCCESS on success
1610 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1611 * @param pVM The cross context VM structure.
1612 * @param GCPhys The guest physical address in question.
1613 */
1614int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
1615{
1616 PGM_LOCK_ASSERT_OWNER(pVM);
1617
1618 /*
1619 * Find the ram range and page and hand it over to the with-page function.
1620 * 99.8% of requests are expected to be in the first range.
1621 */
1622 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1623 if (!pPage)
1624 {
1625 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1626 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1627 }
1628
1629 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1630}
1631
1632
1633/**
1634 * Load a guest page into the ring-3 physical TLB.
1635 *
1636 * @returns VBox status code.
1637 * @retval VINF_SUCCESS on success
1638 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1639 *
1640 * @param pVM The cross context VM structure.
1641 * @param pPage Pointer to the PGMPAGE structure corresponding to
1642 * GCPhys.
1643 * @param GCPhys The guest physical address in question.
1644 */
1645int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1646{
1647 PGM_LOCK_ASSERT_OWNER(pVM);
1648 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1649
1650 /*
1651 * Map the page.
1652 * Make a special case for the zero page as it is kind of special.
1653 */
1654 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1655 if ( !PGM_PAGE_IS_ZERO(pPage)
1656 && !PGM_PAGE_IS_BALLOONED(pPage))
1657 {
1658 void *pv;
1659 PPGMPAGEMAP pMap;
1660 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1661 if (RT_FAILURE(rc))
1662 return rc;
1663# ifndef IN_RING0
1664 pTlbe->pMap = pMap;
1665# endif
1666 pTlbe->pv = pv;
1667 Assert(!((uintptr_t)pTlbe->pv & GUEST_PAGE_OFFSET_MASK));
1668 }
1669 else
1670 {
1671 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1672# ifndef IN_RING0
1673 pTlbe->pMap = NULL;
1674# endif
1675 pTlbe->pv = pVM->pgm.s.abZeroPg;
1676 }
1677# ifdef PGM_WITH_PHYS_TLB
1678 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1679 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1680 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1681 else
1682 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1683# else
1684 pTlbe->GCPhys = NIL_RTGCPHYS;
1685# endif
1686 pTlbe->pPage = pPage;
1687 return VINF_SUCCESS;
1688}
1689
1690
1691/**
1692 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1693 * own the PGM lock and therefore not need to lock the mapped page.
1694 *
1695 * @returns VBox status code.
1696 * @retval VINF_SUCCESS on success.
1697 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1698 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1699 *
1700 * @param pVM The cross context VM structure.
1701 * @param GCPhys The guest physical address of the page that should be mapped.
1702 * @param pPage Pointer to the PGMPAGE structure for the page.
1703 * @param ppv Where to store the address corresponding to GCPhys.
1704 *
1705 * @internal
1706 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1707 */
1708int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1709{
1710 int rc;
1711 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1712 PGM_LOCK_ASSERT_OWNER(pVM);
1713 pVM->pgm.s.cDeprecatedPageLocks++;
1714
1715 /*
1716 * Make sure the page is writable.
1717 */
1718 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1719 {
1720 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1721 if (RT_FAILURE(rc))
1722 return rc;
1723 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1724 }
1725 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1726
1727 /*
1728 * Get the mapping address.
1729 */
1730 PPGMPAGEMAPTLBE pTlbe;
1731 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1732 if (RT_FAILURE(rc))
1733 return rc;
1734 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1735 return VINF_SUCCESS;
1736}
1737
1738
1739/**
1740 * Locks a page mapping for writing.
1741 *
1742 * @param pVM The cross context VM structure.
1743 * @param pPage The page.
1744 * @param pTlbe The mapping TLB entry for the page.
1745 * @param pLock The lock structure (output).
1746 */
1747DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1748{
1749# ifndef IN_RING0
1750 PPGMPAGEMAP pMap = pTlbe->pMap;
1751 if (pMap)
1752 pMap->cRefs++;
1753# else
1754 RT_NOREF(pTlbe);
1755# endif
1756
1757 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1758 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1759 {
1760 if (cLocks == 0)
1761 pVM->pgm.s.cWriteLockedPages++;
1762 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1763 }
1764 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1765 {
1766 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1767 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1768# ifndef IN_RING0
1769 if (pMap)
1770 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1771# endif
1772 }
1773
1774 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1775# ifndef IN_RING0
1776 pLock->pvMap = pMap;
1777# else
1778 pLock->pvMap = NULL;
1779# endif
1780}
1781
1782/**
1783 * Locks a page mapping for reading.
1784 *
1785 * @param pVM The cross context VM structure.
1786 * @param pPage The page.
1787 * @param pTlbe The mapping TLB entry for the page.
1788 * @param pLock The lock structure (output).
1789 */
1790DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1791{
1792# ifndef IN_RING0
1793 PPGMPAGEMAP pMap = pTlbe->pMap;
1794 if (pMap)
1795 pMap->cRefs++;
1796# else
1797 RT_NOREF(pTlbe);
1798# endif
1799
1800 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1801 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1802 {
1803 if (cLocks == 0)
1804 pVM->pgm.s.cReadLockedPages++;
1805 PGM_PAGE_INC_READ_LOCKS(pPage);
1806 }
1807 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1808 {
1809 PGM_PAGE_INC_READ_LOCKS(pPage);
1810 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1811# ifndef IN_RING0
1812 if (pMap)
1813 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1814# endif
1815 }
1816
1817 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1818# ifndef IN_RING0
1819 pLock->pvMap = pMap;
1820# else
1821 pLock->pvMap = NULL;
1822# endif
1823}
1824
1825
1826/**
1827 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1828 * own the PGM lock and have access to the page structure.
1829 *
1830 * @returns VBox status code.
1831 * @retval VINF_SUCCESS on success.
1832 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1833 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1834 *
1835 * @param pVM The cross context VM structure.
1836 * @param GCPhys The guest physical address of the page that should be mapped.
1837 * @param pPage Pointer to the PGMPAGE structure for the page.
1838 * @param ppv Where to store the address corresponding to GCPhys.
1839 * @param pLock Where to store the lock information that
1840 * pgmPhysReleaseInternalPageMappingLock needs.
1841 *
1842 * @internal
1843 */
1844int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1845{
1846 int rc;
1847 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1848 PGM_LOCK_ASSERT_OWNER(pVM);
1849
1850 /*
1851 * Make sure the page is writable.
1852 */
1853 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1854 {
1855 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1856 if (RT_FAILURE(rc))
1857 return rc;
1858 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1859 }
1860 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1861
1862 /*
1863 * Do the job.
1864 */
1865 PPGMPAGEMAPTLBE pTlbe;
1866 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1867 if (RT_FAILURE(rc))
1868 return rc;
1869 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1870 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1871 return VINF_SUCCESS;
1872}
1873
1874
1875/**
1876 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1877 * own the PGM lock and have access to the page structure.
1878 *
1879 * @returns VBox status code.
1880 * @retval VINF_SUCCESS on success.
1881 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1882 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1883 *
1884 * @param pVM The cross context VM structure.
1885 * @param GCPhys The guest physical address of the page that should be mapped.
1886 * @param pPage Pointer to the PGMPAGE structure for the page.
1887 * @param ppv Where to store the address corresponding to GCPhys.
1888 * @param pLock Where to store the lock information that
1889 * pgmPhysReleaseInternalPageMappingLock needs.
1890 *
1891 * @internal
1892 */
1893int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1894{
1895 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1896 PGM_LOCK_ASSERT_OWNER(pVM);
1897 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1898
1899 /*
1900 * Do the job.
1901 */
1902 PPGMPAGEMAPTLBE pTlbe;
1903 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1904 if (RT_FAILURE(rc))
1905 return rc;
1906 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1907 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1908 return VINF_SUCCESS;
1909}
1910
1911
1912/**
1913 * Requests the mapping of a guest page into the current context.
1914 *
1915 * This API should only be used for very short term, as it will consume scarse
1916 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1917 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1918 *
1919 * This API will assume your intention is to write to the page, and will
1920 * therefore replace shared and zero pages. If you do not intend to modify
1921 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1922 *
1923 * @returns VBox status code.
1924 * @retval VINF_SUCCESS on success.
1925 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1926 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1927 *
1928 * @param pVM The cross context VM structure.
1929 * @param GCPhys The guest physical address of the page that should be
1930 * mapped.
1931 * @param ppv Where to store the address corresponding to GCPhys.
1932 * @param pLock Where to store the lock information that
1933 * PGMPhysReleasePageMappingLock needs.
1934 *
1935 * @remarks The caller is responsible for dealing with access handlers.
1936 * @todo Add an informational return code for pages with access handlers?
1937 *
1938 * @remark Avoid calling this API from within critical sections (other than
1939 * the PGM one) because of the deadlock risk. External threads may
1940 * need to delegate jobs to the EMTs.
1941 * @remarks Only one page is mapped! Make no assumption about what's after or
1942 * before the returned page!
1943 * @thread Any thread.
1944 */
1945VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1946{
1947 int rc = PGM_LOCK(pVM);
1948 AssertRCReturn(rc, rc);
1949
1950 /*
1951 * Query the Physical TLB entry for the page (may fail).
1952 */
1953 PPGMPAGEMAPTLBE pTlbe;
1954 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1955 if (RT_SUCCESS(rc))
1956 {
1957 /*
1958 * If the page is shared, the zero page, or being write monitored
1959 * it must be converted to a page that's writable if possible.
1960 */
1961 PPGMPAGE pPage = pTlbe->pPage;
1962 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1963 {
1964 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1965 if (RT_SUCCESS(rc))
1966 {
1967 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1968 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1969 }
1970 }
1971 if (RT_SUCCESS(rc))
1972 {
1973 /*
1974 * Now, just perform the locking and calculate the return address.
1975 */
1976 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1977 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1978 }
1979 }
1980
1981 PGM_UNLOCK(pVM);
1982 return rc;
1983}
1984
1985
1986/**
1987 * Requests the mapping of a guest page into the current context.
1988 *
1989 * This API should only be used for very short term, as it will consume scarse
1990 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1991 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1992 *
1993 * @returns VBox status code.
1994 * @retval VINF_SUCCESS on success.
1995 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1996 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1997 *
1998 * @param pVM The cross context VM structure.
1999 * @param GCPhys The guest physical address of the page that should be
2000 * mapped.
2001 * @param ppv Where to store the address corresponding to GCPhys.
2002 * @param pLock Where to store the lock information that
2003 * PGMPhysReleasePageMappingLock needs.
2004 *
2005 * @remarks The caller is responsible for dealing with access handlers.
2006 * @todo Add an informational return code for pages with access handlers?
2007 *
2008 * @remarks Avoid calling this API from within critical sections (other than
2009 * the PGM one) because of the deadlock risk.
2010 * @remarks Only one page is mapped! Make no assumption about what's after or
2011 * before the returned page!
2012 * @thread Any thread.
2013 */
2014VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
2015{
2016 int rc = PGM_LOCK(pVM);
2017 AssertRCReturn(rc, rc);
2018
2019 /*
2020 * Query the Physical TLB entry for the page (may fail).
2021 */
2022 PPGMPAGEMAPTLBE pTlbe;
2023 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
2024 if (RT_SUCCESS(rc))
2025 {
2026 /* MMIO pages doesn't have any readable backing. */
2027 PPGMPAGE pPage = pTlbe->pPage;
2028 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
2029 rc = VERR_PGM_PHYS_PAGE_RESERVED;
2030 else
2031 {
2032 /*
2033 * Now, just perform the locking and calculate the return address.
2034 */
2035 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
2036 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
2037 }
2038 }
2039
2040 PGM_UNLOCK(pVM);
2041 return rc;
2042}
2043
2044
2045/**
2046 * Requests the mapping of a guest page given by virtual address into the current context.
2047 *
2048 * This API should only be used for very short term, as it will consume
2049 * scarse resources (R0 and GC) in the mapping cache. When you're done
2050 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2051 *
2052 * This API will assume your intention is to write to the page, and will
2053 * therefore replace shared and zero pages. If you do not intend to modify
2054 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
2055 *
2056 * @returns VBox status code.
2057 * @retval VINF_SUCCESS on success.
2058 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2059 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2060 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2061 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2062 *
2063 * @param pVCpu The cross context virtual CPU structure.
2064 * @param GCPtr The guest physical address of the page that should be
2065 * mapped.
2066 * @param ppv Where to store the address corresponding to GCPhys.
2067 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2068 *
2069 * @remark Avoid calling this API from within critical sections (other than
2070 * the PGM one) because of the deadlock risk.
2071 * @thread EMT
2072 */
2073VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
2074{
2075 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2076 RTGCPHYS GCPhys;
2077 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2078 if (RT_SUCCESS(rc))
2079 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2080 return rc;
2081}
2082
2083
2084/**
2085 * Requests the mapping of a guest page given by virtual address into the current context.
2086 *
2087 * This API should only be used for very short term, as it will consume
2088 * scarse resources (R0 and GC) in the mapping cache. When you're done
2089 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2090 *
2091 * @returns VBox status code.
2092 * @retval VINF_SUCCESS on success.
2093 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2094 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2095 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2096 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2097 *
2098 * @param pVCpu The cross context virtual CPU structure.
2099 * @param GCPtr The guest physical address of the page that should be
2100 * mapped.
2101 * @param ppv Where to store the address corresponding to GCPtr.
2102 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2103 *
2104 * @remark Avoid calling this API from within critical sections (other than
2105 * the PGM one) because of the deadlock risk.
2106 * @thread EMT
2107 */
2108VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
2109{
2110 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2111 RTGCPHYS GCPhys;
2112 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2113 if (RT_SUCCESS(rc))
2114 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2115 return rc;
2116}
2117
2118
2119/**
2120 * Release the mapping of a guest page.
2121 *
2122 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
2123 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
2124 *
2125 * @param pVM The cross context VM structure.
2126 * @param pLock The lock structure initialized by the mapping function.
2127 */
2128VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2129{
2130# ifndef IN_RING0
2131 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
2132# endif
2133 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2134 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2135
2136 pLock->uPageAndType = 0;
2137 pLock->pvMap = NULL;
2138
2139 PGM_LOCK_VOID(pVM);
2140 if (fWriteLock)
2141 {
2142 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2143 Assert(cLocks > 0);
2144 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2145 {
2146 if (cLocks == 1)
2147 {
2148 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2149 pVM->pgm.s.cWriteLockedPages--;
2150 }
2151 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2152 }
2153
2154 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2155 { /* probably extremely likely */ }
2156 else
2157 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2158 }
2159 else
2160 {
2161 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2162 Assert(cLocks > 0);
2163 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2164 {
2165 if (cLocks == 1)
2166 {
2167 Assert(pVM->pgm.s.cReadLockedPages > 0);
2168 pVM->pgm.s.cReadLockedPages--;
2169 }
2170 PGM_PAGE_DEC_READ_LOCKS(pPage);
2171 }
2172 }
2173
2174# ifndef IN_RING0
2175 if (pMap)
2176 {
2177 Assert(pMap->cRefs >= 1);
2178 pMap->cRefs--;
2179 }
2180# endif
2181 PGM_UNLOCK(pVM);
2182}
2183
2184
2185#ifdef IN_RING3
2186/**
2187 * Release the mapping of multiple guest pages.
2188 *
2189 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
2190 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
2191 *
2192 * @param pVM The cross context VM structure.
2193 * @param cPages Number of pages to unlock.
2194 * @param paLocks Array of locks lock structure initialized by the mapping
2195 * function.
2196 */
2197VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
2198{
2199 Assert(cPages > 0);
2200 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2201#ifdef VBOX_STRICT
2202 for (uint32_t i = 1; i < cPages; i++)
2203 {
2204 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
2205 AssertPtr(paLocks[i].uPageAndType);
2206 }
2207#endif
2208
2209 PGM_LOCK_VOID(pVM);
2210 if (fWriteLock)
2211 {
2212 /*
2213 * Write locks:
2214 */
2215 for (uint32_t i = 0; i < cPages; i++)
2216 {
2217 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2218 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2219 Assert(cLocks > 0);
2220 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2221 {
2222 if (cLocks == 1)
2223 {
2224 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2225 pVM->pgm.s.cWriteLockedPages--;
2226 }
2227 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2228 }
2229
2230 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2231 { /* probably extremely likely */ }
2232 else
2233 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2234
2235 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2236 if (pMap)
2237 {
2238 Assert(pMap->cRefs >= 1);
2239 pMap->cRefs--;
2240 }
2241
2242 /* Yield the lock: */
2243 if ((i & 1023) == 1023 && i + 1 < cPages)
2244 {
2245 PGM_UNLOCK(pVM);
2246 PGM_LOCK_VOID(pVM);
2247 }
2248 }
2249 }
2250 else
2251 {
2252 /*
2253 * Read locks:
2254 */
2255 for (uint32_t i = 0; i < cPages; i++)
2256 {
2257 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2258 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2259 Assert(cLocks > 0);
2260 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2261 {
2262 if (cLocks == 1)
2263 {
2264 Assert(pVM->pgm.s.cReadLockedPages > 0);
2265 pVM->pgm.s.cReadLockedPages--;
2266 }
2267 PGM_PAGE_DEC_READ_LOCKS(pPage);
2268 }
2269
2270 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2271 if (pMap)
2272 {
2273 Assert(pMap->cRefs >= 1);
2274 pMap->cRefs--;
2275 }
2276
2277 /* Yield the lock: */
2278 if ((i & 1023) == 1023 && i + 1 < cPages)
2279 {
2280 PGM_UNLOCK(pVM);
2281 PGM_LOCK_VOID(pVM);
2282 }
2283 }
2284 }
2285 PGM_UNLOCK(pVM);
2286
2287 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
2288}
2289#endif /* IN_RING3 */
2290
2291
2292/**
2293 * Release the internal mapping of a guest page.
2294 *
2295 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2296 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2297 *
2298 * @param pVM The cross context VM structure.
2299 * @param pLock The lock structure initialized by the mapping function.
2300 *
2301 * @remarks Caller must hold the PGM lock.
2302 */
2303void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2304{
2305 PGM_LOCK_ASSERT_OWNER(pVM);
2306 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2307}
2308
2309
2310/**
2311 * Converts a GC physical address to a HC ring-3 pointer.
2312 *
2313 * @returns VINF_SUCCESS on success.
2314 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2315 * page but has no physical backing.
2316 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2317 * GC physical address.
2318 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2319 * a dynamic ram chunk boundary
2320 *
2321 * @param pVM The cross context VM structure.
2322 * @param GCPhys The GC physical address to convert.
2323 * @param pR3Ptr Where to store the R3 pointer on success.
2324 *
2325 * @deprecated Avoid when possible!
2326 */
2327int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2328{
2329/** @todo this is kind of hacky and needs some more work. */
2330#ifndef DEBUG_sandervl
2331 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2332#endif
2333
2334 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2335 PGM_LOCK_VOID(pVM);
2336
2337 PPGMRAMRANGE pRam;
2338 PPGMPAGE pPage;
2339 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2340 if (RT_SUCCESS(rc))
2341 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2342
2343 PGM_UNLOCK(pVM);
2344 Assert(rc <= VINF_SUCCESS);
2345 return rc;
2346}
2347
2348
2349/**
2350 * Converts a guest pointer to a GC physical address.
2351 *
2352 * This uses the current CR3/CR0/CR4 of the guest.
2353 *
2354 * @returns VBox status code.
2355 * @param pVCpu The cross context virtual CPU structure.
2356 * @param GCPtr The guest pointer to convert.
2357 * @param pGCPhys Where to store the GC physical address.
2358 */
2359VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2360{
2361 PGMPTWALK Walk;
2362 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2363 if (pGCPhys && RT_SUCCESS(rc))
2364 *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK);
2365 return rc;
2366}
2367
2368
2369/**
2370 * Converts a guest pointer to a HC physical address.
2371 *
2372 * This uses the current CR3/CR0/CR4 of the guest.
2373 *
2374 * @returns VBox status code.
2375 * @param pVCpu The cross context virtual CPU structure.
2376 * @param GCPtr The guest pointer to convert.
2377 * @param pHCPhys Where to store the HC physical address.
2378 */
2379VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2380{
2381 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2382 PGMPTWALK Walk;
2383 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2384 if (RT_SUCCESS(rc))
2385 rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK), pHCPhys);
2386 return rc;
2387}
2388
2389
2390
2391#undef LOG_GROUP
2392#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2393
2394
2395#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2396/**
2397 * Cache PGMPhys memory access
2398 *
2399 * @param pVM The cross context VM structure.
2400 * @param pCache Cache structure pointer
2401 * @param GCPhys GC physical address
2402 * @param pbR3 HC pointer corresponding to physical page
2403 *
2404 * @thread EMT.
2405 */
2406static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2407{
2408 uint32_t iCacheIndex;
2409
2410 Assert(VM_IS_EMT(pVM));
2411
2412 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
2413 pbR3 = (uint8_t *)((uintptr_t)pbR3 & ~(uintptr_t)GUEST_PAGE_OFFSET_MASK);
2414
2415 iCacheIndex = ((GCPhys >> GUEST_PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2416
2417 ASMBitSet(&pCache->aEntries, iCacheIndex);
2418
2419 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2420 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2421}
2422#endif /* IN_RING3 */
2423
2424
2425/**
2426 * Deals with reading from a page with one or more ALL access handlers.
2427 *
2428 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2429 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2430 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2431 *
2432 * @param pVM The cross context VM structure.
2433 * @param pPage The page descriptor.
2434 * @param GCPhys The physical address to start reading at.
2435 * @param pvBuf Where to put the bits we read.
2436 * @param cb How much to read - less or equal to a page.
2437 * @param enmOrigin The origin of this call.
2438 */
2439static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2440 PGMACCESSORIGIN enmOrigin)
2441{
2442 /*
2443 * The most frequent access here is MMIO and shadowed ROM.
2444 * The current code ASSUMES all these access handlers covers full pages!
2445 */
2446
2447 /*
2448 * Whatever we do we need the source page, map it first.
2449 */
2450 PGMPAGEMAPLOCK PgMpLck;
2451 const void *pvSrc = NULL;
2452 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2453/** @todo Check how this can work for MMIO pages? */
2454 if (RT_FAILURE(rc))
2455 {
2456 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2457 GCPhys, pPage, rc));
2458 memset(pvBuf, 0xff, cb);
2459 return VINF_SUCCESS;
2460 }
2461
2462 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2463
2464 /*
2465 * Deal with any physical handlers.
2466 */
2467 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2468 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2469 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2470 {
2471 PPGMPHYSHANDLER pCur;
2472 rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2473 if (RT_SUCCESS(rc))
2474 {
2475 Assert(pCur && GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2476 Assert((pCur->Key & GUEST_PAGE_OFFSET_MASK) == 0);
2477 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
2478#ifndef IN_RING3
2479 if (enmOrigin != PGMACCESSORIGIN_IEM)
2480 {
2481 /* Cannot reliably handle informational status codes in this context */
2482 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2483 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2484 }
2485#endif
2486 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2487 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler; Assert(pfnHandler);
2488 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2489 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2490
2491 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pCur->pszDesc) ));
2492 STAM_PROFILE_START(&pCur->Stat, h);
2493 PGM_LOCK_ASSERT_OWNER(pVM);
2494
2495 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2496 PGM_UNLOCK(pVM);
2497 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, uUser);
2498 PGM_LOCK_VOID(pVM);
2499
2500 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2501 pCur = NULL; /* might not be valid anymore. */
2502 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
2503 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2504 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
2505 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2506 {
2507 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2508 return rcStrict;
2509 }
2510 }
2511 else if (rc == VERR_NOT_FOUND)
2512 AssertLogRelMsgFailed(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb));
2513 else
2514 AssertLogRelMsgFailedReturn(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb), rc);
2515 }
2516
2517 /*
2518 * Take the default action.
2519 */
2520 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2521 {
2522 memcpy(pvBuf, pvSrc, cb);
2523 rcStrict = VINF_SUCCESS;
2524 }
2525 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2526 return rcStrict;
2527}
2528
2529
2530/**
2531 * Read physical memory.
2532 *
2533 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2534 * want to ignore those.
2535 *
2536 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2537 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2538 * @retval VINF_SUCCESS in all context - read completed.
2539 *
2540 * @retval VINF_EM_OFF in RC and R0 - read completed.
2541 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
2542 * @retval VINF_EM_RESET in RC and R0 - read completed.
2543 * @retval VINF_EM_HALT in RC and R0 - read completed.
2544 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
2545 *
2546 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
2547 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
2548 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2549 *
2550 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
2551 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2552 *
2553 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2554 *
2555 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2556 * haven't been cleared for strict status codes yet.
2557 *
2558 * @param pVM The cross context VM structure.
2559 * @param GCPhys Physical address start reading from.
2560 * @param pvBuf Where to put the read bits.
2561 * @param cbRead How many bytes to read.
2562 * @param enmOrigin The origin of this call.
2563 */
2564VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2565{
2566 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2567 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2568
2569 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysRead));
2570 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2571
2572 PGM_LOCK_VOID(pVM);
2573
2574 /*
2575 * Copy loop on ram ranges.
2576 */
2577 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2578 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2579 for (;;)
2580 {
2581 /* Inside range or not? */
2582 if (pRam && GCPhys >= pRam->GCPhys)
2583 {
2584 /*
2585 * Must work our way thru this page by page.
2586 */
2587 RTGCPHYS off = GCPhys - pRam->GCPhys;
2588 while (off < pRam->cb)
2589 {
2590 unsigned iPage = off >> GUEST_PAGE_SHIFT;
2591 PPGMPAGE pPage = &pRam->aPages[iPage];
2592 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
2593 if (cb > cbRead)
2594 cb = cbRead;
2595
2596 /*
2597 * Normal page? Get the pointer to it.
2598 */
2599 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2600 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2601 {
2602 /*
2603 * Get the pointer to the page.
2604 */
2605 PGMPAGEMAPLOCK PgMpLck;
2606 const void *pvSrc;
2607 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2608 if (RT_SUCCESS(rc))
2609 {
2610 memcpy(pvBuf, pvSrc, cb);
2611 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2612 }
2613 else
2614 {
2615 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2616 pRam->GCPhys + off, pPage, rc));
2617 memset(pvBuf, 0xff, cb);
2618 }
2619 }
2620 /*
2621 * Have ALL/MMIO access handlers.
2622 */
2623 else
2624 {
2625 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2626 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2627 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2628 else
2629 {
2630 memset(pvBuf, 0xff, cb);
2631 PGM_UNLOCK(pVM);
2632 return rcStrict2;
2633 }
2634 }
2635
2636 /* next page */
2637 if (cb >= cbRead)
2638 {
2639 PGM_UNLOCK(pVM);
2640 return rcStrict;
2641 }
2642 cbRead -= cb;
2643 off += cb;
2644 pvBuf = (char *)pvBuf + cb;
2645 } /* walk pages in ram range. */
2646
2647 GCPhys = pRam->GCPhysLast + 1;
2648 }
2649 else
2650 {
2651 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2652
2653 /*
2654 * Unassigned address space.
2655 */
2656 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2657 if (cb >= cbRead)
2658 {
2659 memset(pvBuf, 0xff, cbRead);
2660 break;
2661 }
2662 memset(pvBuf, 0xff, cb);
2663
2664 cbRead -= cb;
2665 pvBuf = (char *)pvBuf + cb;
2666 GCPhys += cb;
2667 }
2668
2669 /* Advance range if necessary. */
2670 while (pRam && GCPhys > pRam->GCPhysLast)
2671 pRam = pRam->CTX_SUFF(pNext);
2672 } /* Ram range walk */
2673
2674 PGM_UNLOCK(pVM);
2675 return rcStrict;
2676}
2677
2678
2679/**
2680 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2681 *
2682 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2683 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2684 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2685 *
2686 * @param pVM The cross context VM structure.
2687 * @param pPage The page descriptor.
2688 * @param GCPhys The physical address to start writing at.
2689 * @param pvBuf What to write.
2690 * @param cbWrite How much to write - less or equal to a page.
2691 * @param enmOrigin The origin of this call.
2692 */
2693static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2694 PGMACCESSORIGIN enmOrigin)
2695{
2696 PGMPAGEMAPLOCK PgMpLck;
2697 void *pvDst = NULL;
2698 VBOXSTRICTRC rcStrict;
2699
2700 /*
2701 * Give priority to physical handlers (like #PF does).
2702 *
2703 * Hope for a lonely physical handler first that covers the whole write
2704 * area. This should be a pretty frequent case with MMIO and the heavy
2705 * usage of full page handlers in the page pool.
2706 */
2707 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2708 PPGMPHYSHANDLER pCur;
2709 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2710 if (RT_SUCCESS(rcStrict))
2711 {
2712 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2713#ifndef IN_RING3
2714 if (enmOrigin != PGMACCESSORIGIN_IEM)
2715 /* Cannot reliably handle informational status codes in this context */
2716 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2717#endif
2718 size_t cbRange = pCur->KeyLast - GCPhys + 1;
2719 if (cbRange > cbWrite)
2720 cbRange = cbWrite;
2721
2722 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->pfnHandler);
2723 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2724 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2725 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2726 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2727 else
2728 rcStrict = VINF_SUCCESS;
2729 if (RT_SUCCESS(rcStrict))
2730 {
2731 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2732 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2733 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2734 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2735 STAM_PROFILE_START(&pCur->Stat, h);
2736
2737 /* Most handlers will want to release the PGM lock for deadlock prevention
2738 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2739 dirty page trackers will want to keep it for performance reasons. */
2740 PGM_LOCK_ASSERT_OWNER(pVM);
2741 if (pCurType->fKeepPgmLock)
2742 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2743 else
2744 {
2745 PGM_UNLOCK(pVM);
2746 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2747 PGM_LOCK_VOID(pVM);
2748 }
2749
2750 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2751 pCur = NULL; /* might not be valid anymore. */
2752 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2753 {
2754 if (pvDst)
2755 memcpy(pvDst, pvBuf, cbRange);
2756 rcStrict = VINF_SUCCESS;
2757 }
2758 else
2759 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
2760 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2761 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
2762 }
2763 else
2764 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2765 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2766 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2767 {
2768 if (pvDst)
2769 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2770 return rcStrict;
2771 }
2772
2773 /* more fun to be had below */
2774 cbWrite -= cbRange;
2775 GCPhys += cbRange;
2776 pvBuf = (uint8_t *)pvBuf + cbRange;
2777 pvDst = (uint8_t *)pvDst + cbRange;
2778 }
2779 else if (rcStrict == VERR_NOT_FOUND) /* The handler is somewhere else in the page, deal with it below. */
2780 rcStrict = VINF_SUCCESS;
2781 else
2782 AssertMsgFailedReturn(("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2783 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all GUEST_PAGE_SIZEed! */
2784
2785 /*
2786 * Deal with all the odd ends (used to be deal with virt+phys).
2787 */
2788 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
2789
2790 /* We need a writable destination page. */
2791 if (!pvDst)
2792 {
2793 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2794 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
2795 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
2796 rc2);
2797 }
2798
2799 /** @todo clean up this code some more now there are no virtual handlers any
2800 * more. */
2801 /* The loop state (big + ugly). */
2802 PPGMPHYSHANDLER pPhys = NULL;
2803 uint32_t offPhys = GUEST_PAGE_SIZE;
2804 uint32_t offPhysLast = GUEST_PAGE_SIZE;
2805 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2806
2807 /* The loop. */
2808 for (;;)
2809 {
2810 if (fMorePhys && !pPhys)
2811 {
2812 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pPhys);
2813 if (RT_SUCCESS_NP(rcStrict))
2814 {
2815 offPhys = 0;
2816 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2817 }
2818 else
2819 {
2820 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2821
2822 rcStrict = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2823 GCPhys, &pPhys);
2824 AssertMsgReturn(RT_SUCCESS(rcStrict) || rcStrict == VERR_NOT_FOUND,
2825 ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2826
2827 if ( RT_SUCCESS(rcStrict)
2828 && pPhys->Key <= GCPhys + (cbWrite - 1))
2829 {
2830 offPhys = pPhys->Key - GCPhys;
2831 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2832 Assert(pPhys->KeyLast - pPhys->Key < _4G);
2833 }
2834 else
2835 {
2836 pPhys = NULL;
2837 fMorePhys = false;
2838 offPhys = offPhysLast = GUEST_PAGE_SIZE;
2839 }
2840 }
2841 }
2842
2843 /*
2844 * Handle access to space without handlers (that's easy).
2845 */
2846 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
2847 uint32_t cbRange = (uint32_t)cbWrite;
2848 Assert(cbRange == cbWrite);
2849
2850 /*
2851 * Physical handler.
2852 */
2853 if (!offPhys)
2854 {
2855#ifndef IN_RING3
2856 if (enmOrigin != PGMACCESSORIGIN_IEM)
2857 /* Cannot reliably handle informational status codes in this context */
2858 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2859#endif
2860 if (cbRange > offPhysLast + 1)
2861 cbRange = offPhysLast + 1;
2862
2863 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pPhys);
2864 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2865 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pPhys->uUser
2866 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pPhys->uUser);
2867
2868 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2869 STAM_PROFILE_START(&pPhys->Stat, h);
2870
2871 /* Most handlers will want to release the PGM lock for deadlock prevention
2872 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2873 dirty page trackers will want to keep it for performance reasons. */
2874 PGM_LOCK_ASSERT_OWNER(pVM);
2875 if (pCurType->fKeepPgmLock)
2876 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2877 else
2878 {
2879 PGM_UNLOCK(pVM);
2880 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2881 PGM_LOCK_VOID(pVM);
2882 }
2883
2884 STAM_PROFILE_STOP(&pPhys->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2885 pPhys = NULL; /* might not be valid anymore. */
2886 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
2887 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
2888 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
2889 }
2890
2891 /*
2892 * Execute the default action and merge the status codes.
2893 */
2894 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
2895 {
2896 memcpy(pvDst, pvBuf, cbRange);
2897 rcStrict2 = VINF_SUCCESS;
2898 }
2899 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2900 {
2901 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2902 return rcStrict2;
2903 }
2904 else
2905 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2906
2907 /*
2908 * Advance if we've got more stuff to do.
2909 */
2910 if (cbRange >= cbWrite)
2911 {
2912 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2913 return rcStrict;
2914 }
2915
2916
2917 cbWrite -= cbRange;
2918 GCPhys += cbRange;
2919 pvBuf = (uint8_t *)pvBuf + cbRange;
2920 pvDst = (uint8_t *)pvDst + cbRange;
2921
2922 offPhys -= cbRange;
2923 offPhysLast -= cbRange;
2924 }
2925}
2926
2927
2928/**
2929 * Write to physical memory.
2930 *
2931 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2932 * want to ignore those.
2933 *
2934 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2935 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2936 * @retval VINF_SUCCESS in all context - write completed.
2937 *
2938 * @retval VINF_EM_OFF in RC and R0 - write completed.
2939 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
2940 * @retval VINF_EM_RESET in RC and R0 - write completed.
2941 * @retval VINF_EM_HALT in RC and R0 - write completed.
2942 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
2943 *
2944 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
2945 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
2946 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2947 *
2948 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
2949 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2950 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
2951 *
2952 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
2953 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
2954 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
2955 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
2956 * @retval VINF_CSAM_PENDING_ACTION in RC only.
2957 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2958 *
2959 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2960 * haven't been cleared for strict status codes yet.
2961 *
2962 *
2963 * @param pVM The cross context VM structure.
2964 * @param GCPhys Physical address to write to.
2965 * @param pvBuf What to write.
2966 * @param cbWrite How many bytes to write.
2967 * @param enmOrigin Who is calling.
2968 */
2969VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2970{
2971 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2972 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2973 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2974
2975 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWrite));
2976 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2977
2978 PGM_LOCK_VOID(pVM);
2979
2980 /*
2981 * Copy loop on ram ranges.
2982 */
2983 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2984 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2985 for (;;)
2986 {
2987 /* Inside range or not? */
2988 if (pRam && GCPhys >= pRam->GCPhys)
2989 {
2990 /*
2991 * Must work our way thru this page by page.
2992 */
2993 RTGCPTR off = GCPhys - pRam->GCPhys;
2994 while (off < pRam->cb)
2995 {
2996 RTGCPTR iPage = off >> GUEST_PAGE_SHIFT;
2997 PPGMPAGE pPage = &pRam->aPages[iPage];
2998 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
2999 if (cb > cbWrite)
3000 cb = cbWrite;
3001
3002 /*
3003 * Normal page? Get the pointer to it.
3004 */
3005 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
3006 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3007 {
3008 PGMPAGEMAPLOCK PgMpLck;
3009 void *pvDst;
3010 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
3011 if (RT_SUCCESS(rc))
3012 {
3013 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
3014 memcpy(pvDst, pvBuf, cb);
3015 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3016 }
3017 /* Ignore writes to ballooned pages. */
3018 else if (!PGM_PAGE_IS_BALLOONED(pPage))
3019 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
3020 pRam->GCPhys + off, pPage, rc));
3021 }
3022 /*
3023 * Active WRITE or ALL access handlers.
3024 */
3025 else
3026 {
3027 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
3028 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
3029 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
3030 else
3031 {
3032 PGM_UNLOCK(pVM);
3033 return rcStrict2;
3034 }
3035 }
3036
3037 /* next page */
3038 if (cb >= cbWrite)
3039 {
3040 PGM_UNLOCK(pVM);
3041 return rcStrict;
3042 }
3043
3044 cbWrite -= cb;
3045 off += cb;
3046 pvBuf = (const char *)pvBuf + cb;
3047 } /* walk pages in ram range */
3048
3049 GCPhys = pRam->GCPhysLast + 1;
3050 }
3051 else
3052 {
3053 /*
3054 * Unassigned address space, skip it.
3055 */
3056 if (!pRam)
3057 break;
3058 size_t cb = pRam->GCPhys - GCPhys;
3059 if (cb >= cbWrite)
3060 break;
3061 cbWrite -= cb;
3062 pvBuf = (const char *)pvBuf + cb;
3063 GCPhys += cb;
3064 }
3065
3066 /* Advance range if necessary. */
3067 while (pRam && GCPhys > pRam->GCPhysLast)
3068 pRam = pRam->CTX_SUFF(pNext);
3069 } /* Ram range walk */
3070
3071 PGM_UNLOCK(pVM);
3072 return rcStrict;
3073}
3074
3075
3076/**
3077 * Read from guest physical memory by GC physical address, bypassing
3078 * MMIO and access handlers.
3079 *
3080 * @returns VBox status code.
3081 * @param pVM The cross context VM structure.
3082 * @param pvDst The destination address.
3083 * @param GCPhysSrc The source address (GC physical address).
3084 * @param cb The number of bytes to read.
3085 */
3086VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
3087{
3088 /*
3089 * Treat the first page as a special case.
3090 */
3091 if (!cb)
3092 return VINF_SUCCESS;
3093
3094 /* map the 1st page */
3095 void const *pvSrc;
3096 PGMPAGEMAPLOCK Lock;
3097 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3098 if (RT_FAILURE(rc))
3099 return rc;
3100
3101 /* optimize for the case where access is completely within the first page. */
3102 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysSrc & GUEST_PAGE_OFFSET_MASK);
3103 if (RT_LIKELY(cb <= cbPage))
3104 {
3105 memcpy(pvDst, pvSrc, cb);
3106 PGMPhysReleasePageMappingLock(pVM, &Lock);
3107 return VINF_SUCCESS;
3108 }
3109
3110 /* copy to the end of the page. */
3111 memcpy(pvDst, pvSrc, cbPage);
3112 PGMPhysReleasePageMappingLock(pVM, &Lock);
3113 GCPhysSrc += cbPage;
3114 pvDst = (uint8_t *)pvDst + cbPage;
3115 cb -= cbPage;
3116
3117 /*
3118 * Page by page.
3119 */
3120 for (;;)
3121 {
3122 /* map the page */
3123 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3124 if (RT_FAILURE(rc))
3125 return rc;
3126
3127 /* last page? */
3128 if (cb <= GUEST_PAGE_SIZE)
3129 {
3130 memcpy(pvDst, pvSrc, cb);
3131 PGMPhysReleasePageMappingLock(pVM, &Lock);
3132 return VINF_SUCCESS;
3133 }
3134
3135 /* copy the entire page and advance */
3136 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3137 PGMPhysReleasePageMappingLock(pVM, &Lock);
3138 GCPhysSrc += GUEST_PAGE_SIZE;
3139 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3140 cb -= GUEST_PAGE_SIZE;
3141 }
3142 /* won't ever get here. */
3143}
3144
3145
3146/**
3147 * Write to guest physical memory referenced by GC pointer.
3148 * Write memory to GC physical address in guest physical memory.
3149 *
3150 * This will bypass MMIO and access handlers.
3151 *
3152 * @returns VBox status code.
3153 * @param pVM The cross context VM structure.
3154 * @param GCPhysDst The GC physical address of the destination.
3155 * @param pvSrc The source buffer.
3156 * @param cb The number of bytes to write.
3157 */
3158VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3159{
3160 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3161
3162 /*
3163 * Treat the first page as a special case.
3164 */
3165 if (!cb)
3166 return VINF_SUCCESS;
3167
3168 /* map the 1st page */
3169 void *pvDst;
3170 PGMPAGEMAPLOCK Lock;
3171 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3172 if (RT_FAILURE(rc))
3173 return rc;
3174
3175 /* optimize for the case where access is completely within the first page. */
3176 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysDst & GUEST_PAGE_OFFSET_MASK);
3177 if (RT_LIKELY(cb <= cbPage))
3178 {
3179 memcpy(pvDst, pvSrc, cb);
3180 PGMPhysReleasePageMappingLock(pVM, &Lock);
3181 return VINF_SUCCESS;
3182 }
3183
3184 /* copy to the end of the page. */
3185 memcpy(pvDst, pvSrc, cbPage);
3186 PGMPhysReleasePageMappingLock(pVM, &Lock);
3187 GCPhysDst += cbPage;
3188 pvSrc = (const uint8_t *)pvSrc + cbPage;
3189 cb -= cbPage;
3190
3191 /*
3192 * Page by page.
3193 */
3194 for (;;)
3195 {
3196 /* map the page */
3197 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3198 if (RT_FAILURE(rc))
3199 return rc;
3200
3201 /* last page? */
3202 if (cb <= GUEST_PAGE_SIZE)
3203 {
3204 memcpy(pvDst, pvSrc, cb);
3205 PGMPhysReleasePageMappingLock(pVM, &Lock);
3206 return VINF_SUCCESS;
3207 }
3208
3209 /* copy the entire page and advance */
3210 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3211 PGMPhysReleasePageMappingLock(pVM, &Lock);
3212 GCPhysDst += GUEST_PAGE_SIZE;
3213 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3214 cb -= GUEST_PAGE_SIZE;
3215 }
3216 /* won't ever get here. */
3217}
3218
3219
3220/**
3221 * Read from guest physical memory referenced by GC pointer.
3222 *
3223 * This function uses the current CR3/CR0/CR4 of the guest and will
3224 * bypass access handlers and not set any accessed bits.
3225 *
3226 * @returns VBox status code.
3227 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3228 * @param pvDst The destination address.
3229 * @param GCPtrSrc The source address (GC pointer).
3230 * @param cb The number of bytes to read.
3231 */
3232VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3233{
3234 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3235/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3236
3237 /*
3238 * Treat the first page as a special case.
3239 */
3240 if (!cb)
3241 return VINF_SUCCESS;
3242
3243 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleRead));
3244 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3245
3246 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3247 * when many VCPUs are fighting for the lock.
3248 */
3249 PGM_LOCK_VOID(pVM);
3250
3251 /* map the 1st page */
3252 void const *pvSrc;
3253 PGMPAGEMAPLOCK Lock;
3254 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3255 if (RT_FAILURE(rc))
3256 {
3257 PGM_UNLOCK(pVM);
3258 return rc;
3259 }
3260
3261 /* optimize for the case where access is completely within the first page. */
3262 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3263 if (RT_LIKELY(cb <= cbPage))
3264 {
3265 memcpy(pvDst, pvSrc, cb);
3266 PGMPhysReleasePageMappingLock(pVM, &Lock);
3267 PGM_UNLOCK(pVM);
3268 return VINF_SUCCESS;
3269 }
3270
3271 /* copy to the end of the page. */
3272 memcpy(pvDst, pvSrc, cbPage);
3273 PGMPhysReleasePageMappingLock(pVM, &Lock);
3274 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3275 pvDst = (uint8_t *)pvDst + cbPage;
3276 cb -= cbPage;
3277
3278 /*
3279 * Page by page.
3280 */
3281 for (;;)
3282 {
3283 /* map the page */
3284 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3285 if (RT_FAILURE(rc))
3286 {
3287 PGM_UNLOCK(pVM);
3288 return rc;
3289 }
3290
3291 /* last page? */
3292 if (cb <= GUEST_PAGE_SIZE)
3293 {
3294 memcpy(pvDst, pvSrc, cb);
3295 PGMPhysReleasePageMappingLock(pVM, &Lock);
3296 PGM_UNLOCK(pVM);
3297 return VINF_SUCCESS;
3298 }
3299
3300 /* copy the entire page and advance */
3301 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3302 PGMPhysReleasePageMappingLock(pVM, &Lock);
3303 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + GUEST_PAGE_SIZE);
3304 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3305 cb -= GUEST_PAGE_SIZE;
3306 }
3307 /* won't ever get here. */
3308}
3309
3310
3311/**
3312 * Write to guest physical memory referenced by GC pointer.
3313 *
3314 * This function uses the current CR3/CR0/CR4 of the guest and will
3315 * bypass access handlers and not set dirty or accessed bits.
3316 *
3317 * @returns VBox status code.
3318 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3319 * @param GCPtrDst The destination address (GC pointer).
3320 * @param pvSrc The source address.
3321 * @param cb The number of bytes to write.
3322 */
3323VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3324{
3325 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3326 VMCPU_ASSERT_EMT(pVCpu);
3327
3328 /*
3329 * Treat the first page as a special case.
3330 */
3331 if (!cb)
3332 return VINF_SUCCESS;
3333
3334 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWrite));
3335 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3336
3337 /* map the 1st page */
3338 void *pvDst;
3339 PGMPAGEMAPLOCK Lock;
3340 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3341 if (RT_FAILURE(rc))
3342 return rc;
3343
3344 /* optimize for the case where access is completely within the first page. */
3345 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3346 if (RT_LIKELY(cb <= cbPage))
3347 {
3348 memcpy(pvDst, pvSrc, cb);
3349 PGMPhysReleasePageMappingLock(pVM, &Lock);
3350 return VINF_SUCCESS;
3351 }
3352
3353 /* copy to the end of the page. */
3354 memcpy(pvDst, pvSrc, cbPage);
3355 PGMPhysReleasePageMappingLock(pVM, &Lock);
3356 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3357 pvSrc = (const uint8_t *)pvSrc + cbPage;
3358 cb -= cbPage;
3359
3360 /*
3361 * Page by page.
3362 */
3363 for (;;)
3364 {
3365 /* map the page */
3366 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3367 if (RT_FAILURE(rc))
3368 return rc;
3369
3370 /* last page? */
3371 if (cb <= GUEST_PAGE_SIZE)
3372 {
3373 memcpy(pvDst, pvSrc, cb);
3374 PGMPhysReleasePageMappingLock(pVM, &Lock);
3375 return VINF_SUCCESS;
3376 }
3377
3378 /* copy the entire page and advance */
3379 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3380 PGMPhysReleasePageMappingLock(pVM, &Lock);
3381 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3382 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3383 cb -= GUEST_PAGE_SIZE;
3384 }
3385 /* won't ever get here. */
3386}
3387
3388
3389/**
3390 * Write to guest physical memory referenced by GC pointer and update the PTE.
3391 *
3392 * This function uses the current CR3/CR0/CR4 of the guest and will
3393 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3394 *
3395 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3396 *
3397 * @returns VBox status code.
3398 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3399 * @param GCPtrDst The destination address (GC pointer).
3400 * @param pvSrc The source address.
3401 * @param cb The number of bytes to write.
3402 */
3403VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3404{
3405 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3406 VMCPU_ASSERT_EMT(pVCpu);
3407
3408 /*
3409 * Treat the first page as a special case.
3410 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3411 */
3412 if (!cb)
3413 return VINF_SUCCESS;
3414
3415 /* map the 1st page */
3416 void *pvDst;
3417 PGMPAGEMAPLOCK Lock;
3418 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3419 if (RT_FAILURE(rc))
3420 return rc;
3421
3422 /* optimize for the case where access is completely within the first page. */
3423 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3424 if (RT_LIKELY(cb <= cbPage))
3425 {
3426 memcpy(pvDst, pvSrc, cb);
3427 PGMPhysReleasePageMappingLock(pVM, &Lock);
3428 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3429 return VINF_SUCCESS;
3430 }
3431
3432 /* copy to the end of the page. */
3433 memcpy(pvDst, pvSrc, cbPage);
3434 PGMPhysReleasePageMappingLock(pVM, &Lock);
3435 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3436 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3437 pvSrc = (const uint8_t *)pvSrc + cbPage;
3438 cb -= cbPage;
3439
3440 /*
3441 * Page by page.
3442 */
3443 for (;;)
3444 {
3445 /* map the page */
3446 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3447 if (RT_FAILURE(rc))
3448 return rc;
3449
3450 /* last page? */
3451 if (cb <= GUEST_PAGE_SIZE)
3452 {
3453 memcpy(pvDst, pvSrc, cb);
3454 PGMPhysReleasePageMappingLock(pVM, &Lock);
3455 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3456 return VINF_SUCCESS;
3457 }
3458
3459 /* copy the entire page and advance */
3460 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3461 PGMPhysReleasePageMappingLock(pVM, &Lock);
3462 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3463 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3464 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3465 cb -= GUEST_PAGE_SIZE;
3466 }
3467 /* won't ever get here. */
3468}
3469
3470
3471/**
3472 * Read from guest physical memory referenced by GC pointer.
3473 *
3474 * This function uses the current CR3/CR0/CR4 of the guest and will
3475 * respect access handlers and set accessed bits.
3476 *
3477 * @returns Strict VBox status, see PGMPhysRead for details.
3478 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3479 * specified virtual address.
3480 *
3481 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3482 * @param pvDst The destination address.
3483 * @param GCPtrSrc The source address (GC pointer).
3484 * @param cb The number of bytes to read.
3485 * @param enmOrigin Who is calling.
3486 * @thread EMT(pVCpu)
3487 */
3488VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3489{
3490 int rc;
3491 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3492 VMCPU_ASSERT_EMT(pVCpu);
3493
3494 /*
3495 * Anything to do?
3496 */
3497 if (!cb)
3498 return VINF_SUCCESS;
3499
3500 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3501
3502 /*
3503 * Optimize reads within a single page.
3504 */
3505 if (((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3506 {
3507 /* Convert virtual to physical address + flags */
3508 PGMPTWALK Walk;
3509 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3510 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3511 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3512
3513 /* mark the guest page as accessed. */
3514 if (!(Walk.fEffective & X86_PTE_A))
3515 {
3516 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3517 AssertRC(rc);
3518 }
3519
3520 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3521 }
3522
3523 /*
3524 * Page by page.
3525 */
3526 for (;;)
3527 {
3528 /* Convert virtual to physical address + flags */
3529 PGMPTWALK Walk;
3530 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3531 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3532 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3533
3534 /* mark the guest page as accessed. */
3535 if (!(Walk.fEffective & X86_PTE_A))
3536 {
3537 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3538 AssertRC(rc);
3539 }
3540
3541 /* copy */
3542 size_t cbRead = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3543 if (cbRead < cb)
3544 {
3545 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3546 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3547 { /* likely */ }
3548 else
3549 return rcStrict;
3550 }
3551 else /* Last page (cbRead is GUEST_PAGE_SIZE, we only need cb!) */
3552 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3553
3554 /* next */
3555 Assert(cb > cbRead);
3556 cb -= cbRead;
3557 pvDst = (uint8_t *)pvDst + cbRead;
3558 GCPtrSrc += cbRead;
3559 }
3560}
3561
3562
3563/**
3564 * Write to guest physical memory referenced by GC pointer.
3565 *
3566 * This function uses the current CR3/CR0/CR4 of the guest and will
3567 * respect access handlers and set dirty and accessed bits.
3568 *
3569 * @returns Strict VBox status, see PGMPhysWrite for details.
3570 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3571 * specified virtual address.
3572 *
3573 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3574 * @param GCPtrDst The destination address (GC pointer).
3575 * @param pvSrc The source address.
3576 * @param cb The number of bytes to write.
3577 * @param enmOrigin Who is calling.
3578 */
3579VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3580{
3581 int rc;
3582 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3583 VMCPU_ASSERT_EMT(pVCpu);
3584
3585 /*
3586 * Anything to do?
3587 */
3588 if (!cb)
3589 return VINF_SUCCESS;
3590
3591 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3592
3593 /*
3594 * Optimize writes within a single page.
3595 */
3596 if (((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3597 {
3598 /* Convert virtual to physical address + flags */
3599 PGMPTWALK Walk;
3600 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3601 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3602 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3603
3604 /* Mention when we ignore X86_PTE_RW... */
3605 if (!(Walk.fEffective & X86_PTE_RW))
3606 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3607
3608 /* Mark the guest page as accessed and dirty if necessary. */
3609 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3610 {
3611 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3612 AssertRC(rc);
3613 }
3614
3615 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3616 }
3617
3618 /*
3619 * Page by page.
3620 */
3621 for (;;)
3622 {
3623 /* Convert virtual to physical address + flags */
3624 PGMPTWALK Walk;
3625 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3626 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3627 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3628
3629 /* Mention when we ignore X86_PTE_RW... */
3630 if (!(Walk.fEffective & X86_PTE_RW))
3631 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3632
3633 /* Mark the guest page as accessed and dirty if necessary. */
3634 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3635 {
3636 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3637 AssertRC(rc);
3638 }
3639
3640 /* copy */
3641 size_t cbWrite = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3642 if (cbWrite < cb)
3643 {
3644 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3645 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3646 { /* likely */ }
3647 else
3648 return rcStrict;
3649 }
3650 else /* Last page (cbWrite is GUEST_PAGE_SIZE, we only need cb!) */
3651 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3652
3653 /* next */
3654 Assert(cb > cbWrite);
3655 cb -= cbWrite;
3656 pvSrc = (uint8_t *)pvSrc + cbWrite;
3657 GCPtrDst += cbWrite;
3658 }
3659}
3660
3661
3662/**
3663 * Return the page type of the specified physical address.
3664 *
3665 * @returns The page type.
3666 * @param pVM The cross context VM structure.
3667 * @param GCPhys Guest physical address
3668 */
3669VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
3670{
3671 PGM_LOCK_VOID(pVM);
3672 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3673 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
3674 PGM_UNLOCK(pVM);
3675
3676 return enmPgType;
3677}
3678
3679
3680/**
3681 * Converts a GC physical address to a HC ring-3 pointer, with some
3682 * additional checks.
3683 *
3684 * @returns VBox status code (no informational statuses).
3685 *
3686 * @param pVM The cross context VM structure.
3687 * @param pVCpu The cross context virtual CPU structure of the
3688 * calling EMT.
3689 * @param GCPhys The GC physical address to convert. This API mask
3690 * the A20 line when necessary.
3691 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
3692 * be done while holding the PGM lock.
3693 * @param ppb Where to store the pointer corresponding to GCPhys
3694 * on success.
3695 * @param pfTlb The TLB flags and revision. We only add stuff.
3696 *
3697 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
3698 * PGMPhysIemGCPhys2Ptr.
3699 *
3700 * @thread EMT(pVCpu).
3701 */
3702VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
3703 R3R0PTRTYPE(uint8_t *) *ppb,
3704 uint64_t *pfTlb)
3705{
3706 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3707 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
3708
3709 PGM_LOCK_VOID(pVM);
3710
3711 PPGMRAMRANGE pRam;
3712 PPGMPAGE pPage;
3713 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3714 if (RT_SUCCESS(rc))
3715 {
3716 if (!PGM_PAGE_IS_BALLOONED(pPage))
3717 {
3718 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3719 {
3720 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3721 {
3722 /*
3723 * No access handler.
3724 */
3725 switch (PGM_PAGE_GET_STATE(pPage))
3726 {
3727 case PGM_PAGE_STATE_ALLOCATED:
3728 *pfTlb |= *puTlbPhysRev;
3729 break;
3730 case PGM_PAGE_STATE_BALLOONED:
3731 AssertFailed();
3732 RT_FALL_THRU();
3733 case PGM_PAGE_STATE_ZERO:
3734 case PGM_PAGE_STATE_SHARED:
3735 case PGM_PAGE_STATE_WRITE_MONITORED:
3736 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3737 break;
3738 }
3739
3740 PPGMPAGEMAPTLBE pTlbe;
3741 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3742 AssertLogRelRCReturn(rc, rc);
3743 *ppb = (uint8_t *)pTlbe->pv;
3744 }
3745 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
3746 {
3747 /*
3748 * MMIO or similar all access handler: Catch all access.
3749 */
3750 *pfTlb |= *puTlbPhysRev
3751 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3752 *ppb = NULL;
3753 }
3754 else
3755 {
3756 /*
3757 * Write access handler: Catch write accesses if active.
3758 */
3759 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3760 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3761 else
3762 switch (PGM_PAGE_GET_STATE(pPage))
3763 {
3764 case PGM_PAGE_STATE_ALLOCATED:
3765 *pfTlb |= *puTlbPhysRev;
3766 break;
3767 case PGM_PAGE_STATE_BALLOONED:
3768 AssertFailed();
3769 RT_FALL_THRU();
3770 case PGM_PAGE_STATE_ZERO:
3771 case PGM_PAGE_STATE_SHARED:
3772 case PGM_PAGE_STATE_WRITE_MONITORED:
3773 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3774 break;
3775 }
3776
3777 PPGMPAGEMAPTLBE pTlbe;
3778 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3779 AssertLogRelRCReturn(rc, rc);
3780 *ppb = (uint8_t *)pTlbe->pv;
3781 }
3782 }
3783 else
3784 {
3785 /* Alias MMIO: For now, we catch all access. */
3786 *pfTlb |= *puTlbPhysRev
3787 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3788 *ppb = NULL;
3789 }
3790 }
3791 else
3792 {
3793 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */
3794 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3795 *ppb = NULL;
3796 }
3797 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 pPage=%R[pgmpage]\n", GCPhys, *ppb, *pfTlb, pPage));
3798 }
3799 else
3800 {
3801 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ
3802 | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 | PGMIEMGCPHYS2PTR_F_UNASSIGNED;
3803 *ppb = NULL;
3804 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
3805 }
3806
3807 PGM_UNLOCK(pVM);
3808 return VINF_SUCCESS;
3809}
3810
3811
3812/**
3813 * Converts a GC physical address to a HC ring-3 pointer, with some
3814 * additional checks.
3815 *
3816 * @returns VBox status code (no informational statuses).
3817 * @retval VINF_SUCCESS on success.
3818 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3819 * access handler of some kind.
3820 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3821 * accesses or is odd in any way.
3822 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3823 *
3824 * @param pVM The cross context VM structure.
3825 * @param pVCpu The cross context virtual CPU structure of the
3826 * calling EMT.
3827 * @param GCPhys The GC physical address to convert. This API mask
3828 * the A20 line when necessary.
3829 * @param fWritable Whether write access is required.
3830 * @param fByPassHandlers Whether to bypass access handlers.
3831 * @param ppv Where to store the pointer corresponding to GCPhys
3832 * on success.
3833 * @param pLock
3834 *
3835 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
3836 * @thread EMT(pVCpu).
3837 */
3838VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
3839 void **ppv, PPGMPAGEMAPLOCK pLock)
3840{
3841 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3842
3843 PGM_LOCK_VOID(pVM);
3844
3845 PPGMRAMRANGE pRam;
3846 PPGMPAGE pPage;
3847 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3848 if (RT_SUCCESS(rc))
3849 {
3850 if (PGM_PAGE_IS_BALLOONED(pPage))
3851 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3852 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3853 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3854 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3855 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3856 rc = VINF_SUCCESS;
3857 else
3858 {
3859 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3860 {
3861 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3862 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3863 }
3864 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3865 {
3866 Assert(!fByPassHandlers);
3867 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3868 }
3869 }
3870 if (RT_SUCCESS(rc))
3871 {
3872 int rc2;
3873
3874 /* Make sure what we return is writable. */
3875 if (fWritable)
3876 switch (PGM_PAGE_GET_STATE(pPage))
3877 {
3878 case PGM_PAGE_STATE_ALLOCATED:
3879 break;
3880 case PGM_PAGE_STATE_BALLOONED:
3881 AssertFailed();
3882 break;
3883 case PGM_PAGE_STATE_ZERO:
3884 case PGM_PAGE_STATE_SHARED:
3885 case PGM_PAGE_STATE_WRITE_MONITORED:
3886 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
3887 AssertLogRelRCReturn(rc2, rc2);
3888 break;
3889 }
3890
3891 /* Get a ring-3 mapping of the address. */
3892 PPGMPAGEMAPTLBE pTlbe;
3893 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3894 AssertLogRelRCReturn(rc2, rc2);
3895
3896 /* Lock it and calculate the address. */
3897 if (fWritable)
3898 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
3899 else
3900 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
3901 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3902
3903 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3904 }
3905 else
3906 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3907
3908 /* else: handler catching all access, no pointer returned. */
3909 }
3910 else
3911 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3912
3913 PGM_UNLOCK(pVM);
3914 return rc;
3915}
3916
3917
3918/**
3919 * Checks if the give GCPhys page requires special handling for the given access
3920 * because it's MMIO or otherwise monitored.
3921 *
3922 * @returns VBox status code (no informational statuses).
3923 * @retval VINF_SUCCESS on success.
3924 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3925 * access handler of some kind.
3926 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3927 * accesses or is odd in any way.
3928 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3929 *
3930 * @param pVM The cross context VM structure.
3931 * @param GCPhys The GC physical address to convert. Since this is
3932 * only used for filling the REM TLB, the A20 mask must
3933 * be applied before calling this API.
3934 * @param fWritable Whether write access is required.
3935 * @param fByPassHandlers Whether to bypass access handlers.
3936 *
3937 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
3938 * a stop gap thing that should be removed once there is a better TLB
3939 * for virtual address accesses.
3940 */
3941VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
3942{
3943 PGM_LOCK_VOID(pVM);
3944 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
3945
3946 PPGMRAMRANGE pRam;
3947 PPGMPAGE pPage;
3948 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3949 if (RT_SUCCESS(rc))
3950 {
3951 if (PGM_PAGE_IS_BALLOONED(pPage))
3952 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3953 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3954 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3955 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3956 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3957 rc = VINF_SUCCESS;
3958 else
3959 {
3960 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3961 {
3962 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3963 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3964 }
3965 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3966 {
3967 Assert(!fByPassHandlers);
3968 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3969 }
3970 }
3971 }
3972
3973 PGM_UNLOCK(pVM);
3974 return rc;
3975}
3976
3977#ifdef VBOX_WITH_NATIVE_NEM
3978
3979/**
3980 * Interface used by NEM to check what to do on a memory access exit.
3981 *
3982 * @returns VBox status code.
3983 * @param pVM The cross context VM structure.
3984 * @param pVCpu The cross context per virtual CPU structure.
3985 * Optional.
3986 * @param GCPhys The guest physical address.
3987 * @param fMakeWritable Whether to try make the page writable or not. If it
3988 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
3989 * be returned and the return code will be unaffected
3990 * @param pInfo Where to return the page information. This is
3991 * initialized even on failure.
3992 * @param pfnChecker Page in-sync checker callback. Optional.
3993 * @param pvUser User argument to pass to pfnChecker.
3994 */
3995VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo,
3996 PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
3997{
3998 PGM_LOCK_VOID(pVM);
3999
4000 PPGMPAGE pPage;
4001 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
4002 if (RT_SUCCESS(rc))
4003 {
4004 /* Try make it writable if requested. */
4005 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
4006 if (fMakeWritable)
4007 switch (PGM_PAGE_GET_STATE(pPage))
4008 {
4009 case PGM_PAGE_STATE_SHARED:
4010 case PGM_PAGE_STATE_WRITE_MONITORED:
4011 case PGM_PAGE_STATE_ZERO:
4012 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
4013 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
4014 rc = VINF_SUCCESS;
4015 break;
4016 }
4017
4018 /* Fill in the info. */
4019 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
4020 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
4021 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
4022 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
4023 pInfo->enmType = enmType;
4024 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
4025 switch (PGM_PAGE_GET_STATE(pPage))
4026 {
4027 case PGM_PAGE_STATE_ALLOCATED:
4028 pInfo->fZeroPage = 0;
4029 break;
4030
4031 case PGM_PAGE_STATE_ZERO:
4032 pInfo->fZeroPage = 1;
4033 break;
4034
4035 case PGM_PAGE_STATE_WRITE_MONITORED:
4036 pInfo->fZeroPage = 0;
4037 break;
4038
4039 case PGM_PAGE_STATE_SHARED:
4040 pInfo->fZeroPage = 0;
4041 break;
4042
4043 case PGM_PAGE_STATE_BALLOONED:
4044 pInfo->fZeroPage = 1;
4045 break;
4046
4047 default:
4048 pInfo->fZeroPage = 1;
4049 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
4050 }
4051
4052 /* Call the checker and update NEM state. */
4053 if (pfnChecker)
4054 {
4055 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
4056 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
4057 }
4058
4059 /* Done. */
4060 PGM_UNLOCK(pVM);
4061 }
4062 else
4063 {
4064 PGM_UNLOCK(pVM);
4065
4066 pInfo->HCPhys = NIL_RTHCPHYS;
4067 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
4068 pInfo->u2NemState = 0;
4069 pInfo->fHasHandlers = 0;
4070 pInfo->fZeroPage = 0;
4071 pInfo->enmType = PGMPAGETYPE_INVALID;
4072 }
4073
4074 return rc;
4075}
4076
4077
4078/**
4079 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
4080 * or higher.
4081 *
4082 * @returns VBox status code from callback.
4083 * @param pVM The cross context VM structure.
4084 * @param pVCpu The cross context per CPU structure. This is
4085 * optional as its only for passing to callback.
4086 * @param uMinState The minimum NEM state value to call on.
4087 * @param pfnCallback The callback function.
4088 * @param pvUser User argument for the callback.
4089 */
4090VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
4091 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
4092{
4093 /*
4094 * Just brute force this problem.
4095 */
4096 PGM_LOCK_VOID(pVM);
4097 int rc = VINF_SUCCESS;
4098 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
4099 {
4100 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
4101 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4102 {
4103 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
4104 if (u2State < uMinState)
4105 { /* likely */ }
4106 else
4107 {
4108 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
4109 if (RT_SUCCESS(rc))
4110 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
4111 else
4112 break;
4113 }
4114 }
4115 }
4116 PGM_UNLOCK(pVM);
4117
4118 return rc;
4119}
4120
4121
4122/**
4123 * Helper for setting the NEM state for a range of pages.
4124 *
4125 * @param paPages Array of pages to modify.
4126 * @param cPages How many pages to modify.
4127 * @param u2State The new state value.
4128 */
4129void pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State)
4130{
4131 PPGMPAGE pPage = paPages;
4132 while (cPages-- > 0)
4133 {
4134 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
4135 pPage++;
4136 }
4137}
4138
4139#endif /* VBOX_WITH_NATIVE_NEM */
4140
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette