VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 93943

Last change on this file since 93943 was 93943, checked in by vboxsync, 3 years ago

VMM: Banned the use of PAGE_ADDRESS and PHYS_PAGE_ADDRESS too. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 145.9 KB
Line 
1/* $Id: PGMAllPhys.cpp 93943 2022-02-24 21:15:08Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_PHYS
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/nem.h>
30#include "PGMInternal.h"
31#include <VBox/vmm/vmcc.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <VBox/log.h>
38#ifdef IN_RING3
39# include <iprt/thread.h>
40#endif
41
42
43/*********************************************************************************************************************************
44* Defined Constants And Macros *
45*********************************************************************************************************************************/
46/** Enable the physical TLB. */
47#define PGM_WITH_PHYS_TLB
48
49/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
50 * Checks if valid physical access handler return code (normal handler, not PF).
51 *
52 * Checks if the given strict status code is one of the expected ones for a
53 * physical access handler in the current context.
54 *
55 * @returns true or false.
56 * @param a_rcStrict The status code.
57 * @param a_fWrite Whether it is a write or read being serviced.
58 *
59 * @remarks We wish to keep the list of statuses here as short as possible.
60 * When changing, please make sure to update the PGMPhysRead,
61 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
62 */
63#ifdef IN_RING3
64# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
65 ( (a_rcStrict) == VINF_SUCCESS \
66 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
67#elif defined(IN_RING0)
68#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
69 ( (a_rcStrict) == VINF_SUCCESS \
70 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
71 \
72 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
73 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
74 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
75 \
76 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
77 || (a_rcStrict) == VINF_EM_DBG_STOP \
78 || (a_rcStrict) == VINF_EM_DBG_EVENT \
79 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
80 || (a_rcStrict) == VINF_EM_OFF \
81 || (a_rcStrict) == VINF_EM_SUSPEND \
82 || (a_rcStrict) == VINF_EM_RESET \
83 )
84#else
85# error "Context?"
86#endif
87
88/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
89 * Checks if valid virtual access handler return code (normal handler, not PF).
90 *
91 * Checks if the given strict status code is one of the expected ones for a
92 * virtual access handler in the current context.
93 *
94 * @returns true or false.
95 * @param a_rcStrict The status code.
96 * @param a_fWrite Whether it is a write or read being serviced.
97 *
98 * @remarks We wish to keep the list of statuses here as short as possible.
99 * When changing, please make sure to update the PGMPhysRead,
100 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
101 */
102#ifdef IN_RING3
103# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
104 ( (a_rcStrict) == VINF_SUCCESS \
105 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
106#elif defined(IN_RING0)
107# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
108 (false /* no virtual handlers in ring-0! */ )
109#else
110# error "Context?"
111#endif
112
113
114
115/**
116 * Calculate the actual table size.
117 *
118 * The memory is layed out like this:
119 * - PGMPHYSHANDLERTREE (8 bytes)
120 * - Allocation bitmap (8-byte size align)
121 * - Slab of PGMPHYSHANDLER. Start is 64 byte aligned.
122 */
123uint32_t pgmHandlerPhysicalCalcTableSizes(uint32_t *pcEntries, uint32_t *pcbTreeAndBitmap)
124{
125 /*
126 * A minimum of 64 entries and a maximum of ~64K.
127 */
128 uint32_t cEntries = *pcEntries;
129 if (cEntries <= 64)
130 cEntries = 64;
131 else if (cEntries >= _64K)
132 cEntries = _64K;
133 else
134 cEntries = RT_ALIGN_32(cEntries, 16);
135
136 /*
137 * Do the initial calculation.
138 */
139 uint32_t cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
140 uint32_t cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
141 uint32_t cbTable = cEntries * sizeof(PGMPHYSHANDLER);
142 uint32_t cbTotal = cbTreeAndBitmap + cbTable;
143
144 /*
145 * Align the total and try use up extra space from that.
146 */
147 uint32_t cbTotalAligned = RT_ALIGN_32(cbTotal, RT_MAX(HOST_PAGE_SIZE, _16K));
148 uint32_t cAvail = cbTotalAligned - cbTotal;
149 cAvail /= sizeof(PGMPHYSHANDLER);
150 if (cAvail >= 1)
151 for (;;)
152 {
153 cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
154 cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
155 cbTable = cEntries * sizeof(PGMPHYSHANDLER);
156 cbTotal = cbTreeAndBitmap + cbTable;
157 if (cbTotal <= cbTotalAligned)
158 break;
159 cEntries--;
160 Assert(cEntries >= 16);
161 }
162
163 /*
164 * Return the result.
165 */
166 *pcbTreeAndBitmap = cbTreeAndBitmap;
167 *pcEntries = cEntries;
168 return cbTotalAligned;
169}
170
171
172/**
173 * Looks up a ROM range by its PGMROMRANGE::GCPhys value.
174 */
175DECLINLINE(PPGMROMRANGE) pgmPhysRomLookupByBase(PVMCC pVM, RTGCPHYS GCPhys)
176{
177 for (PPGMROMRANGE pRom = pVM->pgm.s.CTX_SUFF(pRomRanges); pRom; pRom = pRom->CTX_SUFF(pNext))
178 if (pRom->GCPhys == GCPhys)
179 return pRom;
180 return NULL;
181}
182
183#ifndef IN_RING3
184
185/**
186 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
187 * \#PF access handler callback for guest ROM range write access.}
188 *
189 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
190 */
191DECLCALLBACK(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
192 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
193
194{
195 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
196 AssertReturn(pRom, VINF_EM_RAW_EMULATE_INSTR);
197 uint32_t const iPage = (GCPhysFault - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
198 int rc;
199 RT_NOREF(uErrorCode, pvFault);
200
201 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
202
203 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
204 switch (pRom->aPages[iPage].enmProt)
205 {
206 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
207 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
208 {
209 /*
210 * If it's a simple instruction which doesn't change the cpu state
211 * we will simply skip it. Otherwise we'll have to defer it to REM.
212 */
213 uint32_t cbOp;
214 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
215 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
216 if ( RT_SUCCESS(rc)
217 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
218 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
219 {
220 switch (pDis->bOpCode)
221 {
222 /** @todo Find other instructions we can safely skip, possibly
223 * adding this kind of detection to DIS or EM. */
224 case OP_MOV:
225 pRegFrame->rip += cbOp;
226 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteHandled);
227 return VINF_SUCCESS;
228 }
229 }
230 break;
231 }
232
233 case PGMROMPROT_READ_RAM_WRITE_RAM:
234 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
235 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
236 AssertRC(rc);
237 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
238
239 case PGMROMPROT_READ_ROM_WRITE_RAM:
240 /* Handle it in ring-3 because it's *way* easier there. */
241 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
242 break;
243
244 default:
245 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
246 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
247 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
248 }
249
250 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteUnhandled);
251 return VINF_EM_RAW_EMULATE_INSTR;
252}
253
254#endif /* !IN_RING3 */
255
256
257/**
258 * @callback_method_impl{FNPGMPHYSHANDLER,
259 * Access handler callback for ROM write accesses.}
260 *
261 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
262 */
263DECLCALLBACK(VBOXSTRICTRC)
264pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
265 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
266{
267 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
268 AssertReturn(pRom, VERR_INTERNAL_ERROR_3);
269 uint32_t const iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
270 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
271 PPGMROMPAGE const pRomPage = &pRom->aPages[iPage];
272
273 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
274 RT_NOREF(pVCpu, pvPhys, enmOrigin);
275
276 if (enmAccessType == PGMACCESSTYPE_READ)
277 {
278 switch (pRomPage->enmProt)
279 {
280 /*
281 * Take the default action.
282 */
283 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
284 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
285 case PGMROMPROT_READ_ROM_WRITE_RAM:
286 case PGMROMPROT_READ_RAM_WRITE_RAM:
287 return VINF_PGM_HANDLER_DO_DEFAULT;
288
289 default:
290 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
291 pRom->aPages[iPage].enmProt, iPage, GCPhys),
292 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
293 }
294 }
295 else
296 {
297 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
298 switch (pRomPage->enmProt)
299 {
300 /*
301 * Ignore writes.
302 */
303 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
304 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
305 return VINF_SUCCESS;
306
307 /*
308 * Write to the RAM page.
309 */
310 case PGMROMPROT_READ_ROM_WRITE_RAM:
311 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
312 {
313 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
314 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> GUEST_PAGE_SHIFT) == iPage);
315
316 /*
317 * Take the lock, do lazy allocation, map the page and copy the data.
318 *
319 * Note that we have to bypass the mapping TLB since it works on
320 * guest physical addresses and entering the shadow page would
321 * kind of screw things up...
322 */
323 PGM_LOCK_VOID(pVM);
324
325 PPGMPAGE pShadowPage = &pRomPage->Shadow;
326 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
327 {
328 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
329 AssertLogRelMsgReturnStmt(pShadowPage, ("%RGp\n", GCPhys), PGM_UNLOCK(pVM), VERR_PGM_PHYS_PAGE_GET_IPE);
330 }
331
332 void *pvDstPage;
333 int rc;
334#if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
335 if (PGM_IS_IN_NEM_MODE(pVM) && PGMROMPROT_IS_ROM(pRomPage->enmProt))
336 {
337 pvDstPage = &pRom->pbR3Alternate[GCPhys - pRom->GCPhys];
338 rc = VINF_SUCCESS;
339 }
340 else
341#endif
342 {
343 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
344 if (RT_SUCCESS(rc))
345 pvDstPage = (uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK);
346 }
347 if (RT_SUCCESS(rc))
348 {
349 memcpy((uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK), pvBuf, cbBuf);
350 pRomPage->LiveSave.fWrittenTo = true;
351
352 AssertMsg( rc == VINF_SUCCESS
353 || ( rc == VINF_PGM_SYNC_CR3
354 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
355 , ("%Rrc\n", rc));
356 rc = VINF_SUCCESS;
357 }
358
359 PGM_UNLOCK(pVM);
360 return rc;
361 }
362
363 default:
364 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
365 pRom->aPages[iPage].enmProt, iPage, GCPhys),
366 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
367 }
368 }
369}
370
371
372/**
373 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler.
374 */
375static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uint64_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr)
376{
377 /*
378 * Get the MMIO2 range.
379 */
380 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), VERR_INTERNAL_ERROR_3);
381 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3);
382 PPGMREGMMIO2RANGE pMmio2 = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2 - 1];
383 Assert(pMmio2->idMmio2 == hMmio2);
384 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
385 VERR_INTERNAL_ERROR_4);
386
387 /*
388 * Get the page and make sure it's an MMIO2 page.
389 */
390 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
391 AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR);
392 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR);
393
394 /*
395 * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty.
396 * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single
397 * page is dirty, saving the need for additional storage (bitmap).)
398 */
399 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY;
400
401 /*
402 * Disable the handler for this page.
403 */
404 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->RamRange.GCPhys, GCPhys & X86_PTE_PG_MASK);
405 AssertRC(rc);
406#ifndef IN_RING3
407 if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0)
408 {
409 rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
410 AssertMsgReturn(rc == VINF_SUCCESS, ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc);
411 }
412#else
413 RT_NOREF(pVCpu, GCPtr);
414#endif
415 return VINF_SUCCESS;
416}
417
418
419#ifndef IN_RING3
420/**
421 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
422 * \#PF access handler callback for guest MMIO2 dirty page tracing.}
423 *
424 * @remarks The @a uUser is the MMIO2 index.
425 */
426DECLCALLBACK(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
427 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
428{
429 RT_NOREF(pVCpu, uErrorCode, pRegFrame);
430 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
431 if (RT_SUCCESS(rcStrict))
432 {
433 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhysFault, pvFault);
434 PGM_UNLOCK(pVM);
435 }
436 return rcStrict;
437}
438#endif /* !IN_RING3 */
439
440
441/**
442 * @callback_method_impl{FNPGMPHYSHANDLER,
443 * Access handler callback for MMIO2 dirty page tracing.}
444 *
445 * @remarks The @a uUser is the MMIO2 index.
446 */
447DECLCALLBACK(VBOXSTRICTRC)
448pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
449 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
450{
451 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
452 if (RT_SUCCESS(rcStrict))
453 {
454 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhys, ~(RTGCPTR)0);
455 PGM_UNLOCK(pVM);
456 if (rcStrict == VINF_SUCCESS)
457 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
458 }
459 RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
460 return rcStrict;
461}
462
463
464/**
465 * Invalidates the RAM range TLBs.
466 *
467 * @param pVM The cross context VM structure.
468 */
469void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
470{
471 PGM_LOCK_VOID(pVM);
472 RT_ZERO(pVM->pgm.s.apRamRangesTlbR3);
473 RT_ZERO(pVM->pgm.s.apRamRangesTlbR0);
474 PGM_UNLOCK(pVM);
475}
476
477
478/**
479 * Tests if a value of type RTGCPHYS is negative if the type had been signed
480 * instead of unsigned.
481 *
482 * @returns @c true if negative, @c false if positive or zero.
483 * @param a_GCPhys The value to test.
484 * @todo Move me to iprt/types.h.
485 */
486#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
487
488
489/**
490 * Slow worker for pgmPhysGetRange.
491 *
492 * @copydoc pgmPhysGetRange
493 */
494PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
495{
496 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
497
498 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
499 while (pRam)
500 {
501 RTGCPHYS off = GCPhys - pRam->GCPhys;
502 if (off < pRam->cb)
503 {
504 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
505 return pRam;
506 }
507 if (RTGCPHYS_IS_NEGATIVE(off))
508 pRam = pRam->CTX_SUFF(pLeft);
509 else
510 pRam = pRam->CTX_SUFF(pRight);
511 }
512 return NULL;
513}
514
515
516/**
517 * Slow worker for pgmPhysGetRangeAtOrAbove.
518 *
519 * @copydoc pgmPhysGetRangeAtOrAbove
520 */
521PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
522{
523 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
524
525 PPGMRAMRANGE pLastLeft = NULL;
526 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
527 while (pRam)
528 {
529 RTGCPHYS off = GCPhys - pRam->GCPhys;
530 if (off < pRam->cb)
531 {
532 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
533 return pRam;
534 }
535 if (RTGCPHYS_IS_NEGATIVE(off))
536 {
537 pLastLeft = pRam;
538 pRam = pRam->CTX_SUFF(pLeft);
539 }
540 else
541 pRam = pRam->CTX_SUFF(pRight);
542 }
543 return pLastLeft;
544}
545
546
547/**
548 * Slow worker for pgmPhysGetPage.
549 *
550 * @copydoc pgmPhysGetPage
551 */
552PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
553{
554 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
555
556 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
557 while (pRam)
558 {
559 RTGCPHYS off = GCPhys - pRam->GCPhys;
560 if (off < pRam->cb)
561 {
562 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
563 return &pRam->aPages[off >> GUEST_PAGE_SHIFT];
564 }
565
566 if (RTGCPHYS_IS_NEGATIVE(off))
567 pRam = pRam->CTX_SUFF(pLeft);
568 else
569 pRam = pRam->CTX_SUFF(pRight);
570 }
571 return NULL;
572}
573
574
575/**
576 * Slow worker for pgmPhysGetPageEx.
577 *
578 * @copydoc pgmPhysGetPageEx
579 */
580int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
581{
582 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
583
584 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
585 while (pRam)
586 {
587 RTGCPHYS off = GCPhys - pRam->GCPhys;
588 if (off < pRam->cb)
589 {
590 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
591 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
592 return VINF_SUCCESS;
593 }
594
595 if (RTGCPHYS_IS_NEGATIVE(off))
596 pRam = pRam->CTX_SUFF(pLeft);
597 else
598 pRam = pRam->CTX_SUFF(pRight);
599 }
600
601 *ppPage = NULL;
602 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
603}
604
605
606/**
607 * Slow worker for pgmPhysGetPageAndRangeEx.
608 *
609 * @copydoc pgmPhysGetPageAndRangeEx
610 */
611int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
612{
613 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
614
615 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
616 while (pRam)
617 {
618 RTGCPHYS off = GCPhys - pRam->GCPhys;
619 if (off < pRam->cb)
620 {
621 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
622 *ppRam = pRam;
623 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
624 return VINF_SUCCESS;
625 }
626
627 if (RTGCPHYS_IS_NEGATIVE(off))
628 pRam = pRam->CTX_SUFF(pLeft);
629 else
630 pRam = pRam->CTX_SUFF(pRight);
631 }
632
633 *ppRam = NULL;
634 *ppPage = NULL;
635 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
636}
637
638
639/**
640 * Checks if Address Gate 20 is enabled or not.
641 *
642 * @returns true if enabled.
643 * @returns false if disabled.
644 * @param pVCpu The cross context virtual CPU structure.
645 */
646VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
647{
648 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
649 return pVCpu->pgm.s.fA20Enabled;
650}
651
652
653/**
654 * Validates a GC physical address.
655 *
656 * @returns true if valid.
657 * @returns false if invalid.
658 * @param pVM The cross context VM structure.
659 * @param GCPhys The physical address to validate.
660 */
661VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
662{
663 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
664 return pPage != NULL;
665}
666
667
668/**
669 * Checks if a GC physical address is a normal page,
670 * i.e. not ROM, MMIO or reserved.
671 *
672 * @returns true if normal.
673 * @returns false if invalid, ROM, MMIO or reserved page.
674 * @param pVM The cross context VM structure.
675 * @param GCPhys The physical address to check.
676 */
677VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
678{
679 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
680 return pPage
681 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
682}
683
684
685/**
686 * Converts a GC physical address to a HC physical address.
687 *
688 * @returns VINF_SUCCESS on success.
689 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
690 * page but has no physical backing.
691 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
692 * GC physical address.
693 *
694 * @param pVM The cross context VM structure.
695 * @param GCPhys The GC physical address to convert.
696 * @param pHCPhys Where to store the HC physical address on success.
697 */
698VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
699{
700 PGM_LOCK_VOID(pVM);
701 PPGMPAGE pPage;
702 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
703 if (RT_SUCCESS(rc))
704 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
705 PGM_UNLOCK(pVM);
706 return rc;
707}
708
709
710/**
711 * Invalidates all page mapping TLBs.
712 *
713 * @param pVM The cross context VM structure.
714 */
715void pgmPhysInvalidatePageMapTLB(PVMCC pVM)
716{
717 PGM_LOCK_VOID(pVM);
718 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushes);
719
720 /* Clear the R3 & R0 TLBs completely. */
721 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
722 {
723 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
724 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
725 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
726 }
727
728 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
729 {
730 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
731 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
732 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
733 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
734 }
735
736 PGM_UNLOCK(pVM);
737}
738
739
740/**
741 * Invalidates a page mapping TLB entry
742 *
743 * @param pVM The cross context VM structure.
744 * @param GCPhys GCPhys entry to flush
745 */
746void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys)
747{
748 PGM_LOCK_ASSERT_OWNER(pVM);
749
750 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushEntry);
751
752 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
753
754 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
755 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
756 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
757
758 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
759 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
760 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
761 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
762}
763
764
765/**
766 * Makes sure that there is at least one handy page ready for use.
767 *
768 * This will also take the appropriate actions when reaching water-marks.
769 *
770 * @returns VBox status code.
771 * @retval VINF_SUCCESS on success.
772 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
773 *
774 * @param pVM The cross context VM structure.
775 *
776 * @remarks Must be called from within the PGM critical section. It may
777 * nip back to ring-3/0 in some cases.
778 */
779static int pgmPhysEnsureHandyPage(PVMCC pVM)
780{
781 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
782
783 /*
784 * Do we need to do anything special?
785 */
786#ifdef IN_RING3
787 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
788#else
789 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
790#endif
791 {
792 /*
793 * Allocate pages only if we're out of them, or in ring-3, almost out.
794 */
795#ifdef IN_RING3
796 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
797#else
798 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
799#endif
800 {
801 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
802 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
803#ifdef IN_RING3
804 int rc = PGMR3PhysAllocateHandyPages(pVM);
805#else
806 int rc = pgmR0PhysAllocateHandyPages(pVM, VMMGetCpuId(pVM), false /*fRing3*/);
807#endif
808 if (RT_UNLIKELY(rc != VINF_SUCCESS))
809 {
810 if (RT_FAILURE(rc))
811 return rc;
812 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
813 if (!pVM->pgm.s.cHandyPages)
814 {
815 LogRel(("PGM: no more handy pages!\n"));
816 return VERR_EM_NO_MEMORY;
817 }
818 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
819 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
820#ifndef IN_RING3
821 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
822#endif
823 }
824 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
825 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
826 ("%u\n", pVM->pgm.s.cHandyPages),
827 VERR_PGM_HANDY_PAGE_IPE);
828 }
829 else
830 {
831 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
832 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
833#ifndef IN_RING3
834 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
835 {
836 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
837 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
838 }
839#endif
840 }
841 }
842
843 return VINF_SUCCESS;
844}
845
846
847/**
848 * Replace a zero or shared page with new page that we can write to.
849 *
850 * @returns The following VBox status codes.
851 * @retval VINF_SUCCESS on success, pPage is modified.
852 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
853 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
854 *
855 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
856 *
857 * @param pVM The cross context VM structure.
858 * @param pPage The physical page tracking structure. This will
859 * be modified on success.
860 * @param GCPhys The address of the page.
861 *
862 * @remarks Must be called from within the PGM critical section. It may
863 * nip back to ring-3/0 in some cases.
864 *
865 * @remarks This function shouldn't really fail, however if it does
866 * it probably means we've screwed up the size of handy pages and/or
867 * the low-water mark. Or, that some device I/O is causing a lot of
868 * pages to be allocated while while the host is in a low-memory
869 * condition. This latter should be handled elsewhere and in a more
870 * controlled manner, it's on the @bugref{3170} todo list...
871 */
872int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
873{
874 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
875
876 /*
877 * Prereqs.
878 */
879 PGM_LOCK_ASSERT_OWNER(pVM);
880 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
881 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
882
883# ifdef PGM_WITH_LARGE_PAGES
884 /*
885 * Try allocate a large page if applicable.
886 */
887 if ( PGMIsUsingLargePages(pVM)
888 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
889 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
890 {
891 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
892 PPGMPAGE pBasePage;
893
894 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
895 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
896 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
897 {
898 rc = pgmPhysAllocLargePage(pVM, GCPhys);
899 if (rc == VINF_SUCCESS)
900 return rc;
901 }
902 /* Mark the base as type page table, so we don't check over and over again. */
903 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
904
905 /* fall back to 4KB pages. */
906 }
907# endif
908
909 /*
910 * Flush any shadow page table mappings of the page.
911 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
912 */
913 bool fFlushTLBs = false;
914 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
915 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
916
917 /*
918 * Ensure that we've got a page handy, take it and use it.
919 */
920 int rc2 = pgmPhysEnsureHandyPage(pVM);
921 if (RT_FAILURE(rc2))
922 {
923 if (fFlushTLBs)
924 PGM_INVL_ALL_VCPU_TLBS(pVM);
925 Assert(rc2 == VERR_EM_NO_MEMORY);
926 return rc2;
927 }
928 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
929 PGM_LOCK_ASSERT_OWNER(pVM);
930 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
931 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
932
933 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
934 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
935 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_GMMPAGEDESC_PHYS);
936 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
937 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
938 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
939
940 /*
941 * There are one or two action to be taken the next time we allocate handy pages:
942 * - Tell the GMM (global memory manager) what the page is being used for.
943 * (Speeds up replacement operations - sharing and defragmenting.)
944 * - If the current backing is shared, it must be freed.
945 */
946 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
947 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
948
949 void const *pvSharedPage = NULL;
950 if (PGM_PAGE_IS_SHARED(pPage))
951 {
952 /* Mark this shared page for freeing/dereferencing. */
953 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
954 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
955
956 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
957 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
958 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageReplaceShared));
959 pVM->pgm.s.cSharedPages--;
960
961 /* Grab the address of the page so we can make a copy later on. (safe) */
962 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
963 AssertRC(rc);
964 }
965 else
966 {
967 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
968 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero);
969 pVM->pgm.s.cZeroPages--;
970 }
971
972 /*
973 * Do the PGMPAGE modifications.
974 */
975 pVM->pgm.s.cPrivatePages++;
976 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
977 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
978 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
979 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
980 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
981
982 /* Copy the shared page contents to the replacement page. */
983 if (pvSharedPage)
984 {
985 /* Get the virtual address of the new page. */
986 PGMPAGEMAPLOCK PgMpLck;
987 void *pvNewPage;
988 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
989 if (RT_SUCCESS(rc))
990 {
991 memcpy(pvNewPage, pvSharedPage, GUEST_PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
992 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
993 }
994 }
995
996 if ( fFlushTLBs
997 && rc != VINF_PGM_GCPHYS_ALIASED)
998 PGM_INVL_ALL_VCPU_TLBS(pVM);
999
1000 /*
1001 * Notify NEM about the mapping change for this page.
1002 *
1003 * Note! Shadow ROM pages are complicated as they can definitely be
1004 * allocated while not visible, so play safe.
1005 */
1006 if (VM_IS_NEM_ENABLED(pVM))
1007 {
1008 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1009 if ( enmType != PGMPAGETYPE_ROM_SHADOW
1010 || pgmPhysGetPage(pVM, GCPhys) == pPage)
1011 {
1012 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1013 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
1014 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1015 if (RT_SUCCESS(rc))
1016 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1017 else
1018 rc = rc2;
1019 }
1020 }
1021
1022 return rc;
1023}
1024
1025#ifdef PGM_WITH_LARGE_PAGES
1026
1027/**
1028 * Replace a 2 MB range of zero pages with new pages that we can write to.
1029 *
1030 * @returns The following VBox status codes.
1031 * @retval VINF_SUCCESS on success, pPage is modified.
1032 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1033 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
1034 *
1035 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
1036 *
1037 * @param pVM The cross context VM structure.
1038 * @param GCPhys The address of the page.
1039 *
1040 * @remarks Must be called from within the PGM critical section. It may block
1041 * on GMM and host mutexes/locks, leaving HM context.
1042 */
1043int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
1044{
1045 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
1046 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
1047 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1048
1049 /*
1050 * Check Prereqs.
1051 */
1052 PGM_LOCK_ASSERT_OWNER(pVM);
1053 Assert(PGMIsUsingLargePages(pVM));
1054
1055 /*
1056 * All the pages must be unallocated RAM pages, i.e. mapping the ZERO page.
1057 */
1058 PPGMPAGE pFirstPage;
1059 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
1060 if ( RT_SUCCESS(rc)
1061 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM
1062 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
1063 {
1064 /*
1065 * Further they should have PDE type set to PGM_PAGE_PDE_TYPE_DONTCARE,
1066 * since they are unallocated.
1067 */
1068 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
1069 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
1070 if (uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE)
1071 {
1072 /*
1073 * Now, make sure all the other pages in the 2 MB is in the same state.
1074 */
1075 GCPhys = GCPhysBase;
1076 unsigned cLeft = _2M / GUEST_PAGE_SIZE;
1077 while (cLeft-- > 0)
1078 {
1079 PPGMPAGE pSubPage = pgmPhysGetPage(pVM, GCPhys);
1080 if ( pSubPage
1081 && PGM_PAGE_GET_TYPE(pSubPage) == PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
1082 && PGM_PAGE_GET_STATE(pSubPage) == PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
1083 {
1084 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
1085 GCPhys += GUEST_PAGE_SIZE;
1086 }
1087 else
1088 {
1089 LogFlow(("pgmPhysAllocLargePage: Found page %RGp with wrong attributes (type=%d; state=%d); cancel check.\n",
1090 GCPhys, pSubPage ? PGM_PAGE_GET_TYPE(pSubPage) : -1, pSubPage ? PGM_PAGE_GET_STATE(pSubPage) : -1));
1091
1092 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
1093 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
1094 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
1095 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1096 }
1097 }
1098
1099 /*
1100 * Do the allocation.
1101 */
1102# ifdef IN_RING3
1103 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhysBase, NULL);
1104# elif defined(IN_RING0)
1105 rc = pgmR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase);
1106# else
1107# error "Port me"
1108# endif
1109 if (RT_SUCCESS(rc))
1110 {
1111 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
1112 pVM->pgm.s.cLargePages++;
1113 return VINF_SUCCESS;
1114 }
1115
1116 /* If we fail once, it most likely means the host's memory is too
1117 fragmented; don't bother trying again. */
1118 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
1119 return rc;
1120 }
1121 }
1122 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1123}
1124
1125
1126/**
1127 * Recheck the entire 2 MB range to see if we can use it again as a large page.
1128 *
1129 * @returns The following VBox status codes.
1130 * @retval VINF_SUCCESS on success, the large page can be used again
1131 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
1132 *
1133 * @param pVM The cross context VM structure.
1134 * @param GCPhys The address of the page.
1135 * @param pLargePage Page structure of the base page
1136 */
1137int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
1138{
1139 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
1140
1141 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1142
1143 GCPhys &= X86_PDE2M_PAE_PG_MASK;
1144
1145 /* Check the base page. */
1146 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
1147 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
1148 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
1149 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1150 {
1151 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
1152 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1153 }
1154
1155 STAM_PROFILE_START(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1156 /* Check all remaining pages in the 2 MB range. */
1157 unsigned i;
1158 GCPhys += GUEST_PAGE_SIZE;
1159 for (i = 1; i < _2M / GUEST_PAGE_SIZE; i++)
1160 {
1161 PPGMPAGE pPage;
1162 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1163 AssertRCBreak(rc);
1164
1165 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1166 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1167 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1168 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1169 {
1170 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
1171 break;
1172 }
1173
1174 GCPhys += GUEST_PAGE_SIZE;
1175 }
1176 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1177
1178 if (i == _2M / GUEST_PAGE_SIZE)
1179 {
1180 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
1181 pVM->pgm.s.cLargePagesDisabled--;
1182 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
1183 return VINF_SUCCESS;
1184 }
1185
1186 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1187}
1188
1189#endif /* PGM_WITH_LARGE_PAGES */
1190
1191
1192/**
1193 * Deal with a write monitored page.
1194 *
1195 * @returns VBox strict status code.
1196 *
1197 * @param pVM The cross context VM structure.
1198 * @param pPage The physical page tracking structure.
1199 * @param GCPhys The guest physical address of the page.
1200 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
1201 * very unlikely situation where it is okay that we let NEM
1202 * fix the page access in a lazy fasion.
1203 *
1204 * @remarks Called from within the PGM critical section.
1205 */
1206void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1207{
1208 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
1209 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1210 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1211 Assert(pVM->pgm.s.cMonitoredPages > 0);
1212 pVM->pgm.s.cMonitoredPages--;
1213 pVM->pgm.s.cWrittenToPages++;
1214
1215#ifdef VBOX_WITH_NATIVE_NEM
1216 /*
1217 * Notify NEM about the protection change so we won't spin forever.
1218 *
1219 * Note! NEM need to be handle to lazily correct page protection as we cannot
1220 * really get it 100% right here it seems. The page pool does this too.
1221 */
1222 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
1223 {
1224 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1225 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1226 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1227 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1228 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
1229 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1230 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1231 }
1232#else
1233 RT_NOREF(GCPhys);
1234#endif
1235}
1236
1237
1238/**
1239 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
1240 *
1241 * @returns VBox strict status code.
1242 * @retval VINF_SUCCESS on success.
1243 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1244 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1245 *
1246 * @param pVM The cross context VM structure.
1247 * @param pPage The physical page tracking structure.
1248 * @param GCPhys The address of the page.
1249 *
1250 * @remarks Called from within the PGM critical section.
1251 */
1252int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1253{
1254 PGM_LOCK_ASSERT_OWNER(pVM);
1255 switch (PGM_PAGE_GET_STATE(pPage))
1256 {
1257 case PGM_PAGE_STATE_WRITE_MONITORED:
1258 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
1259 RT_FALL_THRU();
1260 default: /* to shut up GCC */
1261 case PGM_PAGE_STATE_ALLOCATED:
1262 return VINF_SUCCESS;
1263
1264 /*
1265 * Zero pages can be dummy pages for MMIO or reserved memory,
1266 * so we need to check the flags before joining cause with
1267 * shared page replacement.
1268 */
1269 case PGM_PAGE_STATE_ZERO:
1270 if (PGM_PAGE_IS_MMIO(pPage))
1271 return VERR_PGM_PHYS_PAGE_RESERVED;
1272 RT_FALL_THRU();
1273 case PGM_PAGE_STATE_SHARED:
1274 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1275
1276 /* Not allowed to write to ballooned pages. */
1277 case PGM_PAGE_STATE_BALLOONED:
1278 return VERR_PGM_PHYS_PAGE_BALLOONED;
1279 }
1280}
1281
1282
1283/**
1284 * Internal usage: Map the page specified by its GMM ID.
1285 *
1286 * This is similar to pgmPhysPageMap
1287 *
1288 * @returns VBox status code.
1289 *
1290 * @param pVM The cross context VM structure.
1291 * @param idPage The Page ID.
1292 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
1293 * @param ppv Where to store the mapping address.
1294 *
1295 * @remarks Called from within the PGM critical section. The mapping is only
1296 * valid while you are inside this section.
1297 */
1298int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1299{
1300 /*
1301 * Validation.
1302 */
1303 PGM_LOCK_ASSERT_OWNER(pVM);
1304 AssertReturn(HCPhys && !(HCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1305 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1306 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1307
1308#ifdef IN_RING0
1309# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1310 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)GUEST_PAGE_OFFSET_MASK, ppv);
1311# else
1312 return GMMR0PageIdToVirt(pVM, idPage, ppv);
1313# endif
1314
1315#else
1316 /*
1317 * Find/make Chunk TLB entry for the mapping chunk.
1318 */
1319 PPGMCHUNKR3MAP pMap;
1320 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1321 if (pTlbe->idChunk == idChunk)
1322 {
1323 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1324 pMap = pTlbe->pChunk;
1325 }
1326 else
1327 {
1328 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1329
1330 /*
1331 * Find the chunk, map it if necessary.
1332 */
1333 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1334 if (pMap)
1335 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1336 else
1337 {
1338 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1339 if (RT_FAILURE(rc))
1340 return rc;
1341 }
1342
1343 /*
1344 * Enter it into the Chunk TLB.
1345 */
1346 pTlbe->idChunk = idChunk;
1347 pTlbe->pChunk = pMap;
1348 }
1349
1350 *ppv = (uint8_t *)pMap->pv + ((idPage & GMM_PAGEID_IDX_MASK) << GUEST_PAGE_SHIFT);
1351 return VINF_SUCCESS;
1352#endif
1353}
1354
1355
1356/**
1357 * Maps a page into the current virtual address space so it can be accessed.
1358 *
1359 * @returns VBox status code.
1360 * @retval VINF_SUCCESS on success.
1361 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1362 *
1363 * @param pVM The cross context VM structure.
1364 * @param pPage The physical page tracking structure.
1365 * @param GCPhys The address of the page.
1366 * @param ppMap Where to store the address of the mapping tracking structure.
1367 * @param ppv Where to store the mapping address of the page. The page
1368 * offset is masked off!
1369 *
1370 * @remarks Called from within the PGM critical section.
1371 */
1372static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1373{
1374 PGM_LOCK_ASSERT_OWNER(pVM);
1375 NOREF(GCPhys);
1376
1377 /*
1378 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1379 */
1380 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1381 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1382 {
1383 /* Decode the page id to a page in a MMIO2 ram range. */
1384 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1385 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1386 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1387 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1388 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1389 pPage->s.idPage, pPage->s.uStateY),
1390 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1391 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1392 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1393 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1394 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> GUEST_PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1395 *ppMap = NULL;
1396# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1397 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1398# elif defined(IN_RING0)
1399 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1400 return VINF_SUCCESS;
1401# else
1402 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1403 return VINF_SUCCESS;
1404# endif
1405 }
1406
1407# ifdef VBOX_WITH_PGM_NEM_MODE
1408 if (pVM->pgm.s.fNemMode)
1409 {
1410# ifdef IN_RING3
1411 /*
1412 * Find the corresponding RAM range and use that to locate the mapping address.
1413 */
1414 /** @todo Use the page ID for some kind of indexing as we do with MMIO2 above. */
1415 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
1416 AssertLogRelMsgReturn(pRam, ("%RTGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
1417 size_t const idxPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
1418 Assert(pPage == &pRam->aPages[idxPage]);
1419 *ppMap = NULL;
1420 *ppv = (uint8_t *)pRam->pvR3 + (idxPage << GUEST_PAGE_SHIFT);
1421 return VINF_SUCCESS;
1422# else
1423 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1424# endif
1425 }
1426# endif
1427
1428 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1429 if (idChunk == NIL_GMM_CHUNKID)
1430 {
1431 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1432 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1433 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1434 {
1435 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1436 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1437 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1438 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1439 *ppv = pVM->pgm.s.abZeroPg;
1440 }
1441 else
1442 *ppv = pVM->pgm.s.abZeroPg;
1443 *ppMap = NULL;
1444 return VINF_SUCCESS;
1445 }
1446
1447# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1448 /*
1449 * Just use the physical address.
1450 */
1451 *ppMap = NULL;
1452 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1453
1454# elif defined(IN_RING0)
1455 /*
1456 * Go by page ID thru GMMR0.
1457 */
1458 *ppMap = NULL;
1459 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
1460
1461# else
1462 /*
1463 * Find/make Chunk TLB entry for the mapping chunk.
1464 */
1465 PPGMCHUNKR3MAP pMap;
1466 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1467 if (pTlbe->idChunk == idChunk)
1468 {
1469 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1470 pMap = pTlbe->pChunk;
1471 AssertPtr(pMap->pv);
1472 }
1473 else
1474 {
1475 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1476
1477 /*
1478 * Find the chunk, map it if necessary.
1479 */
1480 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1481 if (pMap)
1482 {
1483 AssertPtr(pMap->pv);
1484 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1485 }
1486 else
1487 {
1488 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1489 if (RT_FAILURE(rc))
1490 return rc;
1491 AssertPtr(pMap->pv);
1492 }
1493
1494 /*
1495 * Enter it into the Chunk TLB.
1496 */
1497 pTlbe->idChunk = idChunk;
1498 pTlbe->pChunk = pMap;
1499 }
1500
1501 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << GUEST_PAGE_SHIFT);
1502 *ppMap = pMap;
1503 return VINF_SUCCESS;
1504# endif /* !IN_RING0 */
1505}
1506
1507
1508/**
1509 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1510 *
1511 * This is typically used is paths where we cannot use the TLB methods (like ROM
1512 * pages) or where there is no point in using them since we won't get many hits.
1513 *
1514 * @returns VBox strict status code.
1515 * @retval VINF_SUCCESS on success.
1516 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1517 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1518 *
1519 * @param pVM The cross context VM structure.
1520 * @param pPage The physical page tracking structure.
1521 * @param GCPhys The address of the page.
1522 * @param ppv Where to store the mapping address of the page. The page
1523 * offset is masked off!
1524 *
1525 * @remarks Called from within the PGM critical section. The mapping is only
1526 * valid while you are inside section.
1527 */
1528int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1529{
1530 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1531 if (RT_SUCCESS(rc))
1532 {
1533 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1534 PPGMPAGEMAP pMapIgnore;
1535 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1536 if (RT_FAILURE(rc2)) /* preserve rc */
1537 rc = rc2;
1538 }
1539 return rc;
1540}
1541
1542
1543/**
1544 * Maps a page into the current virtual address space so it can be accessed for
1545 * both writing and reading.
1546 *
1547 * This is typically used is paths where we cannot use the TLB methods (like ROM
1548 * pages) or where there is no point in using them since we won't get many hits.
1549 *
1550 * @returns VBox status code.
1551 * @retval VINF_SUCCESS on success.
1552 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1553 *
1554 * @param pVM The cross context VM structure.
1555 * @param pPage The physical page tracking structure. Must be in the
1556 * allocated state.
1557 * @param GCPhys The address of the page.
1558 * @param ppv Where to store the mapping address of the page. The page
1559 * offset is masked off!
1560 *
1561 * @remarks Called from within the PGM critical section. The mapping is only
1562 * valid while you are inside section.
1563 */
1564int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1565{
1566 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1567 PPGMPAGEMAP pMapIgnore;
1568 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1569}
1570
1571
1572/**
1573 * Maps a page into the current virtual address space so it can be accessed for
1574 * reading.
1575 *
1576 * This is typically used is paths where we cannot use the TLB methods (like ROM
1577 * pages) or where there is no point in using them since we won't get many hits.
1578 *
1579 * @returns VBox status code.
1580 * @retval VINF_SUCCESS on success.
1581 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1582 *
1583 * @param pVM The cross context VM structure.
1584 * @param pPage The physical page tracking structure.
1585 * @param GCPhys The address of the page.
1586 * @param ppv Where to store the mapping address of the page. The page
1587 * offset is masked off!
1588 *
1589 * @remarks Called from within the PGM critical section. The mapping is only
1590 * valid while you are inside this section.
1591 */
1592int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1593{
1594 PPGMPAGEMAP pMapIgnore;
1595 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1596}
1597
1598
1599/**
1600 * Load a guest page into the ring-3 physical TLB.
1601 *
1602 * @returns VBox status code.
1603 * @retval VINF_SUCCESS on success
1604 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1605 * @param pVM The cross context VM structure.
1606 * @param GCPhys The guest physical address in question.
1607 */
1608int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
1609{
1610 PGM_LOCK_ASSERT_OWNER(pVM);
1611
1612 /*
1613 * Find the ram range and page and hand it over to the with-page function.
1614 * 99.8% of requests are expected to be in the first range.
1615 */
1616 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1617 if (!pPage)
1618 {
1619 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1620 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1621 }
1622
1623 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1624}
1625
1626
1627/**
1628 * Load a guest page into the ring-3 physical TLB.
1629 *
1630 * @returns VBox status code.
1631 * @retval VINF_SUCCESS on success
1632 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1633 *
1634 * @param pVM The cross context VM structure.
1635 * @param pPage Pointer to the PGMPAGE structure corresponding to
1636 * GCPhys.
1637 * @param GCPhys The guest physical address in question.
1638 */
1639int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1640{
1641 PGM_LOCK_ASSERT_OWNER(pVM);
1642 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1643
1644 /*
1645 * Map the page.
1646 * Make a special case for the zero page as it is kind of special.
1647 */
1648 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1649 if ( !PGM_PAGE_IS_ZERO(pPage)
1650 && !PGM_PAGE_IS_BALLOONED(pPage))
1651 {
1652 void *pv;
1653 PPGMPAGEMAP pMap;
1654 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1655 if (RT_FAILURE(rc))
1656 return rc;
1657# ifndef IN_RING0
1658 pTlbe->pMap = pMap;
1659# endif
1660 pTlbe->pv = pv;
1661 Assert(!((uintptr_t)pTlbe->pv & GUEST_PAGE_OFFSET_MASK));
1662 }
1663 else
1664 {
1665 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1666# ifndef IN_RING0
1667 pTlbe->pMap = NULL;
1668# endif
1669 pTlbe->pv = pVM->pgm.s.abZeroPg;
1670 }
1671# ifdef PGM_WITH_PHYS_TLB
1672 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1673 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1674 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1675 else
1676 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1677# else
1678 pTlbe->GCPhys = NIL_RTGCPHYS;
1679# endif
1680 pTlbe->pPage = pPage;
1681 return VINF_SUCCESS;
1682}
1683
1684
1685/**
1686 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1687 * own the PGM lock and therefore not need to lock the mapped page.
1688 *
1689 * @returns VBox status code.
1690 * @retval VINF_SUCCESS on success.
1691 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1692 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1693 *
1694 * @param pVM The cross context VM structure.
1695 * @param GCPhys The guest physical address of the page that should be mapped.
1696 * @param pPage Pointer to the PGMPAGE structure for the page.
1697 * @param ppv Where to store the address corresponding to GCPhys.
1698 *
1699 * @internal
1700 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1701 */
1702int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1703{
1704 int rc;
1705 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1706 PGM_LOCK_ASSERT_OWNER(pVM);
1707 pVM->pgm.s.cDeprecatedPageLocks++;
1708
1709 /*
1710 * Make sure the page is writable.
1711 */
1712 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1713 {
1714 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1715 if (RT_FAILURE(rc))
1716 return rc;
1717 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1718 }
1719 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1720
1721 /*
1722 * Get the mapping address.
1723 */
1724 PPGMPAGEMAPTLBE pTlbe;
1725 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1726 if (RT_FAILURE(rc))
1727 return rc;
1728 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1729 return VINF_SUCCESS;
1730}
1731
1732
1733/**
1734 * Locks a page mapping for writing.
1735 *
1736 * @param pVM The cross context VM structure.
1737 * @param pPage The page.
1738 * @param pTlbe The mapping TLB entry for the page.
1739 * @param pLock The lock structure (output).
1740 */
1741DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1742{
1743# ifndef IN_RING0
1744 PPGMPAGEMAP pMap = pTlbe->pMap;
1745 if (pMap)
1746 pMap->cRefs++;
1747# else
1748 RT_NOREF(pTlbe);
1749# endif
1750
1751 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1752 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1753 {
1754 if (cLocks == 0)
1755 pVM->pgm.s.cWriteLockedPages++;
1756 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1757 }
1758 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1759 {
1760 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1761 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1762# ifndef IN_RING0
1763 if (pMap)
1764 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1765# endif
1766 }
1767
1768 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1769# ifndef IN_RING0
1770 pLock->pvMap = pMap;
1771# else
1772 pLock->pvMap = NULL;
1773# endif
1774}
1775
1776/**
1777 * Locks a page mapping for reading.
1778 *
1779 * @param pVM The cross context VM structure.
1780 * @param pPage The page.
1781 * @param pTlbe The mapping TLB entry for the page.
1782 * @param pLock The lock structure (output).
1783 */
1784DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1785{
1786# ifndef IN_RING0
1787 PPGMPAGEMAP pMap = pTlbe->pMap;
1788 if (pMap)
1789 pMap->cRefs++;
1790# else
1791 RT_NOREF(pTlbe);
1792# endif
1793
1794 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1795 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1796 {
1797 if (cLocks == 0)
1798 pVM->pgm.s.cReadLockedPages++;
1799 PGM_PAGE_INC_READ_LOCKS(pPage);
1800 }
1801 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1802 {
1803 PGM_PAGE_INC_READ_LOCKS(pPage);
1804 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1805# ifndef IN_RING0
1806 if (pMap)
1807 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1808# endif
1809 }
1810
1811 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1812# ifndef IN_RING0
1813 pLock->pvMap = pMap;
1814# else
1815 pLock->pvMap = NULL;
1816# endif
1817}
1818
1819
1820/**
1821 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1822 * own the PGM lock and have access to the page structure.
1823 *
1824 * @returns VBox status code.
1825 * @retval VINF_SUCCESS on success.
1826 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1827 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1828 *
1829 * @param pVM The cross context VM structure.
1830 * @param GCPhys The guest physical address of the page that should be mapped.
1831 * @param pPage Pointer to the PGMPAGE structure for the page.
1832 * @param ppv Where to store the address corresponding to GCPhys.
1833 * @param pLock Where to store the lock information that
1834 * pgmPhysReleaseInternalPageMappingLock needs.
1835 *
1836 * @internal
1837 */
1838int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1839{
1840 int rc;
1841 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1842 PGM_LOCK_ASSERT_OWNER(pVM);
1843
1844 /*
1845 * Make sure the page is writable.
1846 */
1847 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1848 {
1849 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1850 if (RT_FAILURE(rc))
1851 return rc;
1852 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1853 }
1854 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1855
1856 /*
1857 * Do the job.
1858 */
1859 PPGMPAGEMAPTLBE pTlbe;
1860 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1861 if (RT_FAILURE(rc))
1862 return rc;
1863 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1864 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1865 return VINF_SUCCESS;
1866}
1867
1868
1869/**
1870 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1871 * own the PGM lock and have access to the page structure.
1872 *
1873 * @returns VBox status code.
1874 * @retval VINF_SUCCESS on success.
1875 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1876 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1877 *
1878 * @param pVM The cross context VM structure.
1879 * @param GCPhys The guest physical address of the page that should be mapped.
1880 * @param pPage Pointer to the PGMPAGE structure for the page.
1881 * @param ppv Where to store the address corresponding to GCPhys.
1882 * @param pLock Where to store the lock information that
1883 * pgmPhysReleaseInternalPageMappingLock needs.
1884 *
1885 * @internal
1886 */
1887int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1888{
1889 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1890 PGM_LOCK_ASSERT_OWNER(pVM);
1891 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1892
1893 /*
1894 * Do the job.
1895 */
1896 PPGMPAGEMAPTLBE pTlbe;
1897 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1898 if (RT_FAILURE(rc))
1899 return rc;
1900 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1901 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1902 return VINF_SUCCESS;
1903}
1904
1905
1906/**
1907 * Requests the mapping of a guest page into the current context.
1908 *
1909 * This API should only be used for very short term, as it will consume scarse
1910 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1911 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1912 *
1913 * This API will assume your intention is to write to the page, and will
1914 * therefore replace shared and zero pages. If you do not intend to modify
1915 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1916 *
1917 * @returns VBox status code.
1918 * @retval VINF_SUCCESS on success.
1919 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1920 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1921 *
1922 * @param pVM The cross context VM structure.
1923 * @param GCPhys The guest physical address of the page that should be
1924 * mapped.
1925 * @param ppv Where to store the address corresponding to GCPhys.
1926 * @param pLock Where to store the lock information that
1927 * PGMPhysReleasePageMappingLock needs.
1928 *
1929 * @remarks The caller is responsible for dealing with access handlers.
1930 * @todo Add an informational return code for pages with access handlers?
1931 *
1932 * @remark Avoid calling this API from within critical sections (other than
1933 * the PGM one) because of the deadlock risk. External threads may
1934 * need to delegate jobs to the EMTs.
1935 * @remarks Only one page is mapped! Make no assumption about what's after or
1936 * before the returned page!
1937 * @thread Any thread.
1938 */
1939VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1940{
1941 int rc = PGM_LOCK(pVM);
1942 AssertRCReturn(rc, rc);
1943
1944 /*
1945 * Query the Physical TLB entry for the page (may fail).
1946 */
1947 PPGMPAGEMAPTLBE pTlbe;
1948 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1949 if (RT_SUCCESS(rc))
1950 {
1951 /*
1952 * If the page is shared, the zero page, or being write monitored
1953 * it must be converted to a page that's writable if possible.
1954 */
1955 PPGMPAGE pPage = pTlbe->pPage;
1956 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1957 {
1958 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1959 if (RT_SUCCESS(rc))
1960 {
1961 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1962 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1963 }
1964 }
1965 if (RT_SUCCESS(rc))
1966 {
1967 /*
1968 * Now, just perform the locking and calculate the return address.
1969 */
1970 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1971 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1972 }
1973 }
1974
1975 PGM_UNLOCK(pVM);
1976 return rc;
1977}
1978
1979
1980/**
1981 * Requests the mapping of a guest page into the current context.
1982 *
1983 * This API should only be used for very short term, as it will consume scarse
1984 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1985 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1986 *
1987 * @returns VBox status code.
1988 * @retval VINF_SUCCESS on success.
1989 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1990 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1991 *
1992 * @param pVM The cross context VM structure.
1993 * @param GCPhys The guest physical address of the page that should be
1994 * mapped.
1995 * @param ppv Where to store the address corresponding to GCPhys.
1996 * @param pLock Where to store the lock information that
1997 * PGMPhysReleasePageMappingLock needs.
1998 *
1999 * @remarks The caller is responsible for dealing with access handlers.
2000 * @todo Add an informational return code for pages with access handlers?
2001 *
2002 * @remarks Avoid calling this API from within critical sections (other than
2003 * the PGM one) because of the deadlock risk.
2004 * @remarks Only one page is mapped! Make no assumption about what's after or
2005 * before the returned page!
2006 * @thread Any thread.
2007 */
2008VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
2009{
2010 int rc = PGM_LOCK(pVM);
2011 AssertRCReturn(rc, rc);
2012
2013 /*
2014 * Query the Physical TLB entry for the page (may fail).
2015 */
2016 PPGMPAGEMAPTLBE pTlbe;
2017 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
2018 if (RT_SUCCESS(rc))
2019 {
2020 /* MMIO pages doesn't have any readable backing. */
2021 PPGMPAGE pPage = pTlbe->pPage;
2022 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
2023 rc = VERR_PGM_PHYS_PAGE_RESERVED;
2024 else
2025 {
2026 /*
2027 * Now, just perform the locking and calculate the return address.
2028 */
2029 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
2030 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
2031 }
2032 }
2033
2034 PGM_UNLOCK(pVM);
2035 return rc;
2036}
2037
2038
2039/**
2040 * Requests the mapping of a guest page given by virtual address into the current context.
2041 *
2042 * This API should only be used for very short term, as it will consume
2043 * scarse resources (R0 and GC) in the mapping cache. When you're done
2044 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2045 *
2046 * This API will assume your intention is to write to the page, and will
2047 * therefore replace shared and zero pages. If you do not intend to modify
2048 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
2049 *
2050 * @returns VBox status code.
2051 * @retval VINF_SUCCESS on success.
2052 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2053 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2054 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2055 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2056 *
2057 * @param pVCpu The cross context virtual CPU structure.
2058 * @param GCPtr The guest physical address of the page that should be
2059 * mapped.
2060 * @param ppv Where to store the address corresponding to GCPhys.
2061 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2062 *
2063 * @remark Avoid calling this API from within critical sections (other than
2064 * the PGM one) because of the deadlock risk.
2065 * @thread EMT
2066 */
2067VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
2068{
2069 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2070 RTGCPHYS GCPhys;
2071 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2072 if (RT_SUCCESS(rc))
2073 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2074 return rc;
2075}
2076
2077
2078/**
2079 * Requests the mapping of a guest page given by virtual address into the current context.
2080 *
2081 * This API should only be used for very short term, as it will consume
2082 * scarse resources (R0 and GC) in the mapping cache. When you're done
2083 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2084 *
2085 * @returns VBox status code.
2086 * @retval VINF_SUCCESS on success.
2087 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2088 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2089 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2090 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2091 *
2092 * @param pVCpu The cross context virtual CPU structure.
2093 * @param GCPtr The guest physical address of the page that should be
2094 * mapped.
2095 * @param ppv Where to store the address corresponding to GCPtr.
2096 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2097 *
2098 * @remark Avoid calling this API from within critical sections (other than
2099 * the PGM one) because of the deadlock risk.
2100 * @thread EMT
2101 */
2102VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
2103{
2104 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2105 RTGCPHYS GCPhys;
2106 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2107 if (RT_SUCCESS(rc))
2108 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2109 return rc;
2110}
2111
2112
2113/**
2114 * Release the mapping of a guest page.
2115 *
2116 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
2117 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
2118 *
2119 * @param pVM The cross context VM structure.
2120 * @param pLock The lock structure initialized by the mapping function.
2121 */
2122VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2123{
2124# ifndef IN_RING0
2125 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
2126# endif
2127 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2128 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2129
2130 pLock->uPageAndType = 0;
2131 pLock->pvMap = NULL;
2132
2133 PGM_LOCK_VOID(pVM);
2134 if (fWriteLock)
2135 {
2136 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2137 Assert(cLocks > 0);
2138 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2139 {
2140 if (cLocks == 1)
2141 {
2142 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2143 pVM->pgm.s.cWriteLockedPages--;
2144 }
2145 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2146 }
2147
2148 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2149 { /* probably extremely likely */ }
2150 else
2151 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2152 }
2153 else
2154 {
2155 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2156 Assert(cLocks > 0);
2157 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2158 {
2159 if (cLocks == 1)
2160 {
2161 Assert(pVM->pgm.s.cReadLockedPages > 0);
2162 pVM->pgm.s.cReadLockedPages--;
2163 }
2164 PGM_PAGE_DEC_READ_LOCKS(pPage);
2165 }
2166 }
2167
2168# ifndef IN_RING0
2169 if (pMap)
2170 {
2171 Assert(pMap->cRefs >= 1);
2172 pMap->cRefs--;
2173 }
2174# endif
2175 PGM_UNLOCK(pVM);
2176}
2177
2178
2179#ifdef IN_RING3
2180/**
2181 * Release the mapping of multiple guest pages.
2182 *
2183 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
2184 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
2185 *
2186 * @param pVM The cross context VM structure.
2187 * @param cPages Number of pages to unlock.
2188 * @param paLocks Array of locks lock structure initialized by the mapping
2189 * function.
2190 */
2191VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
2192{
2193 Assert(cPages > 0);
2194 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2195#ifdef VBOX_STRICT
2196 for (uint32_t i = 1; i < cPages; i++)
2197 {
2198 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
2199 AssertPtr(paLocks[i].uPageAndType);
2200 }
2201#endif
2202
2203 PGM_LOCK_VOID(pVM);
2204 if (fWriteLock)
2205 {
2206 /*
2207 * Write locks:
2208 */
2209 for (uint32_t i = 0; i < cPages; i++)
2210 {
2211 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2212 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2213 Assert(cLocks > 0);
2214 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2215 {
2216 if (cLocks == 1)
2217 {
2218 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2219 pVM->pgm.s.cWriteLockedPages--;
2220 }
2221 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2222 }
2223
2224 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2225 { /* probably extremely likely */ }
2226 else
2227 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2228
2229 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2230 if (pMap)
2231 {
2232 Assert(pMap->cRefs >= 1);
2233 pMap->cRefs--;
2234 }
2235
2236 /* Yield the lock: */
2237 if ((i & 1023) == 1023 && i + 1 < cPages)
2238 {
2239 PGM_UNLOCK(pVM);
2240 PGM_LOCK_VOID(pVM);
2241 }
2242 }
2243 }
2244 else
2245 {
2246 /*
2247 * Read locks:
2248 */
2249 for (uint32_t i = 0; i < cPages; i++)
2250 {
2251 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2252 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2253 Assert(cLocks > 0);
2254 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2255 {
2256 if (cLocks == 1)
2257 {
2258 Assert(pVM->pgm.s.cReadLockedPages > 0);
2259 pVM->pgm.s.cReadLockedPages--;
2260 }
2261 PGM_PAGE_DEC_READ_LOCKS(pPage);
2262 }
2263
2264 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2265 if (pMap)
2266 {
2267 Assert(pMap->cRefs >= 1);
2268 pMap->cRefs--;
2269 }
2270
2271 /* Yield the lock: */
2272 if ((i & 1023) == 1023 && i + 1 < cPages)
2273 {
2274 PGM_UNLOCK(pVM);
2275 PGM_LOCK_VOID(pVM);
2276 }
2277 }
2278 }
2279 PGM_UNLOCK(pVM);
2280
2281 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
2282}
2283#endif /* IN_RING3 */
2284
2285
2286/**
2287 * Release the internal mapping of a guest page.
2288 *
2289 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2290 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2291 *
2292 * @param pVM The cross context VM structure.
2293 * @param pLock The lock structure initialized by the mapping function.
2294 *
2295 * @remarks Caller must hold the PGM lock.
2296 */
2297void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2298{
2299 PGM_LOCK_ASSERT_OWNER(pVM);
2300 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2301}
2302
2303
2304/**
2305 * Converts a GC physical address to a HC ring-3 pointer.
2306 *
2307 * @returns VINF_SUCCESS on success.
2308 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2309 * page but has no physical backing.
2310 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2311 * GC physical address.
2312 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2313 * a dynamic ram chunk boundary
2314 *
2315 * @param pVM The cross context VM structure.
2316 * @param GCPhys The GC physical address to convert.
2317 * @param pR3Ptr Where to store the R3 pointer on success.
2318 *
2319 * @deprecated Avoid when possible!
2320 */
2321int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2322{
2323/** @todo this is kind of hacky and needs some more work. */
2324#ifndef DEBUG_sandervl
2325 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2326#endif
2327
2328 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2329 PGM_LOCK_VOID(pVM);
2330
2331 PPGMRAMRANGE pRam;
2332 PPGMPAGE pPage;
2333 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2334 if (RT_SUCCESS(rc))
2335 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2336
2337 PGM_UNLOCK(pVM);
2338 Assert(rc <= VINF_SUCCESS);
2339 return rc;
2340}
2341
2342
2343/**
2344 * Converts a guest pointer to a GC physical address.
2345 *
2346 * This uses the current CR3/CR0/CR4 of the guest.
2347 *
2348 * @returns VBox status code.
2349 * @param pVCpu The cross context virtual CPU structure.
2350 * @param GCPtr The guest pointer to convert.
2351 * @param pGCPhys Where to store the GC physical address.
2352 */
2353VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2354{
2355 PGMPTWALK Walk;
2356 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2357 if (pGCPhys && RT_SUCCESS(rc))
2358 *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK);
2359 return rc;
2360}
2361
2362
2363/**
2364 * Converts a guest pointer to a HC physical address.
2365 *
2366 * This uses the current CR3/CR0/CR4 of the guest.
2367 *
2368 * @returns VBox status code.
2369 * @param pVCpu The cross context virtual CPU structure.
2370 * @param GCPtr The guest pointer to convert.
2371 * @param pHCPhys Where to store the HC physical address.
2372 */
2373VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2374{
2375 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2376 PGMPTWALK Walk;
2377 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2378 if (RT_SUCCESS(rc))
2379 rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK), pHCPhys);
2380 return rc;
2381}
2382
2383
2384
2385#undef LOG_GROUP
2386#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2387
2388
2389#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2390/**
2391 * Cache PGMPhys memory access
2392 *
2393 * @param pVM The cross context VM structure.
2394 * @param pCache Cache structure pointer
2395 * @param GCPhys GC physical address
2396 * @param pbR3 HC pointer corresponding to physical page
2397 *
2398 * @thread EMT.
2399 */
2400static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2401{
2402 uint32_t iCacheIndex;
2403
2404 Assert(VM_IS_EMT(pVM));
2405
2406 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
2407 pbR3 = (uint8_t *)((uintptr_t)pbR3 & ~(uintptr_t)GUEST_PAGE_OFFSET_MASK);
2408
2409 iCacheIndex = ((GCPhys >> GUEST_PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2410
2411 ASMBitSet(&pCache->aEntries, iCacheIndex);
2412
2413 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2414 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2415}
2416#endif /* IN_RING3 */
2417
2418
2419/**
2420 * Deals with reading from a page with one or more ALL access handlers.
2421 *
2422 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2423 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2424 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2425 *
2426 * @param pVM The cross context VM structure.
2427 * @param pPage The page descriptor.
2428 * @param GCPhys The physical address to start reading at.
2429 * @param pvBuf Where to put the bits we read.
2430 * @param cb How much to read - less or equal to a page.
2431 * @param enmOrigin The origin of this call.
2432 */
2433static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2434 PGMACCESSORIGIN enmOrigin)
2435{
2436 /*
2437 * The most frequent access here is MMIO and shadowed ROM.
2438 * The current code ASSUMES all these access handlers covers full pages!
2439 */
2440
2441 /*
2442 * Whatever we do we need the source page, map it first.
2443 */
2444 PGMPAGEMAPLOCK PgMpLck;
2445 const void *pvSrc = NULL;
2446 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2447/** @todo Check how this can work for MMIO pages? */
2448 if (RT_FAILURE(rc))
2449 {
2450 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2451 GCPhys, pPage, rc));
2452 memset(pvBuf, 0xff, cb);
2453 return VINF_SUCCESS;
2454 }
2455
2456 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2457
2458 /*
2459 * Deal with any physical handlers.
2460 */
2461 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2462 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2463 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2464 {
2465 PPGMPHYSHANDLER pCur;
2466 rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2467 if (RT_SUCCESS(rc))
2468 {
2469 Assert(pCur && GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2470 Assert((pCur->Key & GUEST_PAGE_OFFSET_MASK) == 0);
2471 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
2472#ifndef IN_RING3
2473 if (enmOrigin != PGMACCESSORIGIN_IEM)
2474 {
2475 /* Cannot reliably handle informational status codes in this context */
2476 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2477 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2478 }
2479#endif
2480 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2481 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler; Assert(pfnHandler);
2482 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2483 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2484
2485 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pCur->pszDesc) ));
2486 STAM_PROFILE_START(&pCur->Stat, h);
2487 PGM_LOCK_ASSERT_OWNER(pVM);
2488
2489 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2490 PGM_UNLOCK(pVM);
2491 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, uUser);
2492 PGM_LOCK_VOID(pVM);
2493
2494 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2495 pCur = NULL; /* might not be valid anymore. */
2496 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
2497 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2498 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
2499 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2500 {
2501 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2502 return rcStrict;
2503 }
2504 }
2505 else if (rc == VERR_NOT_FOUND)
2506 AssertLogRelMsgFailed(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb));
2507 else
2508 AssertLogRelMsgFailedReturn(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb), rc);
2509 }
2510
2511 /*
2512 * Take the default action.
2513 */
2514 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2515 {
2516 memcpy(pvBuf, pvSrc, cb);
2517 rcStrict = VINF_SUCCESS;
2518 }
2519 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2520 return rcStrict;
2521}
2522
2523
2524/**
2525 * Read physical memory.
2526 *
2527 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2528 * want to ignore those.
2529 *
2530 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2531 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2532 * @retval VINF_SUCCESS in all context - read completed.
2533 *
2534 * @retval VINF_EM_OFF in RC and R0 - read completed.
2535 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
2536 * @retval VINF_EM_RESET in RC and R0 - read completed.
2537 * @retval VINF_EM_HALT in RC and R0 - read completed.
2538 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
2539 *
2540 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
2541 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
2542 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2543 *
2544 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
2545 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2546 *
2547 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2548 *
2549 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2550 * haven't been cleared for strict status codes yet.
2551 *
2552 * @param pVM The cross context VM structure.
2553 * @param GCPhys Physical address start reading from.
2554 * @param pvBuf Where to put the read bits.
2555 * @param cbRead How many bytes to read.
2556 * @param enmOrigin The origin of this call.
2557 */
2558VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2559{
2560 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2561 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2562
2563 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysRead));
2564 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2565
2566 PGM_LOCK_VOID(pVM);
2567
2568 /*
2569 * Copy loop on ram ranges.
2570 */
2571 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2572 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2573 for (;;)
2574 {
2575 /* Inside range or not? */
2576 if (pRam && GCPhys >= pRam->GCPhys)
2577 {
2578 /*
2579 * Must work our way thru this page by page.
2580 */
2581 RTGCPHYS off = GCPhys - pRam->GCPhys;
2582 while (off < pRam->cb)
2583 {
2584 unsigned iPage = off >> GUEST_PAGE_SHIFT;
2585 PPGMPAGE pPage = &pRam->aPages[iPage];
2586 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
2587 if (cb > cbRead)
2588 cb = cbRead;
2589
2590 /*
2591 * Normal page? Get the pointer to it.
2592 */
2593 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2594 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2595 {
2596 /*
2597 * Get the pointer to the page.
2598 */
2599 PGMPAGEMAPLOCK PgMpLck;
2600 const void *pvSrc;
2601 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2602 if (RT_SUCCESS(rc))
2603 {
2604 memcpy(pvBuf, pvSrc, cb);
2605 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2606 }
2607 else
2608 {
2609 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2610 pRam->GCPhys + off, pPage, rc));
2611 memset(pvBuf, 0xff, cb);
2612 }
2613 }
2614 /*
2615 * Have ALL/MMIO access handlers.
2616 */
2617 else
2618 {
2619 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2620 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2621 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2622 else
2623 {
2624 memset(pvBuf, 0xff, cb);
2625 PGM_UNLOCK(pVM);
2626 return rcStrict2;
2627 }
2628 }
2629
2630 /* next page */
2631 if (cb >= cbRead)
2632 {
2633 PGM_UNLOCK(pVM);
2634 return rcStrict;
2635 }
2636 cbRead -= cb;
2637 off += cb;
2638 pvBuf = (char *)pvBuf + cb;
2639 } /* walk pages in ram range. */
2640
2641 GCPhys = pRam->GCPhysLast + 1;
2642 }
2643 else
2644 {
2645 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2646
2647 /*
2648 * Unassigned address space.
2649 */
2650 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2651 if (cb >= cbRead)
2652 {
2653 memset(pvBuf, 0xff, cbRead);
2654 break;
2655 }
2656 memset(pvBuf, 0xff, cb);
2657
2658 cbRead -= cb;
2659 pvBuf = (char *)pvBuf + cb;
2660 GCPhys += cb;
2661 }
2662
2663 /* Advance range if necessary. */
2664 while (pRam && GCPhys > pRam->GCPhysLast)
2665 pRam = pRam->CTX_SUFF(pNext);
2666 } /* Ram range walk */
2667
2668 PGM_UNLOCK(pVM);
2669 return rcStrict;
2670}
2671
2672
2673/**
2674 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2675 *
2676 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2677 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2678 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2679 *
2680 * @param pVM The cross context VM structure.
2681 * @param pPage The page descriptor.
2682 * @param GCPhys The physical address to start writing at.
2683 * @param pvBuf What to write.
2684 * @param cbWrite How much to write - less or equal to a page.
2685 * @param enmOrigin The origin of this call.
2686 */
2687static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2688 PGMACCESSORIGIN enmOrigin)
2689{
2690 PGMPAGEMAPLOCK PgMpLck;
2691 void *pvDst = NULL;
2692 VBOXSTRICTRC rcStrict;
2693
2694 /*
2695 * Give priority to physical handlers (like #PF does).
2696 *
2697 * Hope for a lonely physical handler first that covers the whole write
2698 * area. This should be a pretty frequent case with MMIO and the heavy
2699 * usage of full page handlers in the page pool.
2700 */
2701 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2702 PPGMPHYSHANDLER pCur;
2703 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2704 if (RT_SUCCESS(rcStrict))
2705 {
2706 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2707#ifndef IN_RING3
2708 if (enmOrigin != PGMACCESSORIGIN_IEM)
2709 /* Cannot reliably handle informational status codes in this context */
2710 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2711#endif
2712 size_t cbRange = pCur->KeyLast - GCPhys + 1;
2713 if (cbRange > cbWrite)
2714 cbRange = cbWrite;
2715
2716 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->pfnHandler);
2717 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2718 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2719 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2720 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2721 else
2722 rcStrict = VINF_SUCCESS;
2723 if (RT_SUCCESS(rcStrict))
2724 {
2725 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2726 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2727 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2728 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2729 STAM_PROFILE_START(&pCur->Stat, h);
2730
2731 /* Most handlers will want to release the PGM lock for deadlock prevention
2732 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2733 dirty page trackers will want to keep it for performance reasons. */
2734 PGM_LOCK_ASSERT_OWNER(pVM);
2735 if (pCurType->fKeepPgmLock)
2736 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2737 else
2738 {
2739 PGM_UNLOCK(pVM);
2740 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2741 PGM_LOCK_VOID(pVM);
2742 }
2743
2744 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2745 pCur = NULL; /* might not be valid anymore. */
2746 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2747 {
2748 if (pvDst)
2749 memcpy(pvDst, pvBuf, cbRange);
2750 rcStrict = VINF_SUCCESS;
2751 }
2752 else
2753 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
2754 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2755 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
2756 }
2757 else
2758 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2759 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2760 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2761 {
2762 if (pvDst)
2763 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2764 return rcStrict;
2765 }
2766
2767 /* more fun to be had below */
2768 cbWrite -= cbRange;
2769 GCPhys += cbRange;
2770 pvBuf = (uint8_t *)pvBuf + cbRange;
2771 pvDst = (uint8_t *)pvDst + cbRange;
2772 }
2773 else if (rcStrict == VERR_NOT_FOUND) /* The handler is somewhere else in the page, deal with it below. */
2774 rcStrict = VINF_SUCCESS;
2775 else
2776 AssertMsgFailedReturn(("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2777 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all GUEST_PAGE_SIZEed! */
2778
2779 /*
2780 * Deal with all the odd ends (used to be deal with virt+phys).
2781 */
2782 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
2783
2784 /* We need a writable destination page. */
2785 if (!pvDst)
2786 {
2787 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2788 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
2789 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
2790 rc2);
2791 }
2792
2793 /** @todo clean up this code some more now there are no virtual handlers any
2794 * more. */
2795 /* The loop state (big + ugly). */
2796 PPGMPHYSHANDLER pPhys = NULL;
2797 uint32_t offPhys = GUEST_PAGE_SIZE;
2798 uint32_t offPhysLast = GUEST_PAGE_SIZE;
2799 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2800
2801 /* The loop. */
2802 for (;;)
2803 {
2804 if (fMorePhys && !pPhys)
2805 {
2806 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pPhys);
2807 if (RT_SUCCESS_NP(rcStrict))
2808 {
2809 offPhys = 0;
2810 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2811 }
2812 else
2813 {
2814 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2815
2816 rcStrict = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2817 GCPhys, &pPhys);
2818 AssertMsgReturn(RT_SUCCESS(rcStrict) || rcStrict == VERR_NOT_FOUND,
2819 ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2820
2821 if ( RT_SUCCESS(rcStrict)
2822 && pPhys->Key <= GCPhys + (cbWrite - 1))
2823 {
2824 offPhys = pPhys->Key - GCPhys;
2825 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2826 Assert(pPhys->KeyLast - pPhys->Key < _4G);
2827 }
2828 else
2829 {
2830 pPhys = NULL;
2831 fMorePhys = false;
2832 offPhys = offPhysLast = GUEST_PAGE_SIZE;
2833 }
2834 }
2835 }
2836
2837 /*
2838 * Handle access to space without handlers (that's easy).
2839 */
2840 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
2841 uint32_t cbRange = (uint32_t)cbWrite;
2842 Assert(cbRange == cbWrite);
2843
2844 /*
2845 * Physical handler.
2846 */
2847 if (!offPhys)
2848 {
2849#ifndef IN_RING3
2850 if (enmOrigin != PGMACCESSORIGIN_IEM)
2851 /* Cannot reliably handle informational status codes in this context */
2852 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2853#endif
2854 if (cbRange > offPhysLast + 1)
2855 cbRange = offPhysLast + 1;
2856
2857 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pPhys);
2858 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2859 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pPhys->uUser
2860 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pPhys->uUser);
2861
2862 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2863 STAM_PROFILE_START(&pPhys->Stat, h);
2864
2865 /* Most handlers will want to release the PGM lock for deadlock prevention
2866 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2867 dirty page trackers will want to keep it for performance reasons. */
2868 PGM_LOCK_ASSERT_OWNER(pVM);
2869 if (pCurType->fKeepPgmLock)
2870 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2871 else
2872 {
2873 PGM_UNLOCK(pVM);
2874 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2875 PGM_LOCK_VOID(pVM);
2876 }
2877
2878 STAM_PROFILE_STOP(&pPhys->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2879 pPhys = NULL; /* might not be valid anymore. */
2880 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
2881 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
2882 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
2883 }
2884
2885 /*
2886 * Execute the default action and merge the status codes.
2887 */
2888 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
2889 {
2890 memcpy(pvDst, pvBuf, cbRange);
2891 rcStrict2 = VINF_SUCCESS;
2892 }
2893 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2894 {
2895 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2896 return rcStrict2;
2897 }
2898 else
2899 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2900
2901 /*
2902 * Advance if we've got more stuff to do.
2903 */
2904 if (cbRange >= cbWrite)
2905 {
2906 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2907 return rcStrict;
2908 }
2909
2910
2911 cbWrite -= cbRange;
2912 GCPhys += cbRange;
2913 pvBuf = (uint8_t *)pvBuf + cbRange;
2914 pvDst = (uint8_t *)pvDst + cbRange;
2915
2916 offPhys -= cbRange;
2917 offPhysLast -= cbRange;
2918 }
2919}
2920
2921
2922/**
2923 * Write to physical memory.
2924 *
2925 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2926 * want to ignore those.
2927 *
2928 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2929 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2930 * @retval VINF_SUCCESS in all context - write completed.
2931 *
2932 * @retval VINF_EM_OFF in RC and R0 - write completed.
2933 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
2934 * @retval VINF_EM_RESET in RC and R0 - write completed.
2935 * @retval VINF_EM_HALT in RC and R0 - write completed.
2936 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
2937 *
2938 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
2939 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
2940 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2941 *
2942 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
2943 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2944 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
2945 *
2946 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
2947 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
2948 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
2949 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
2950 * @retval VINF_CSAM_PENDING_ACTION in RC only.
2951 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2952 *
2953 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2954 * haven't been cleared for strict status codes yet.
2955 *
2956 *
2957 * @param pVM The cross context VM structure.
2958 * @param GCPhys Physical address to write to.
2959 * @param pvBuf What to write.
2960 * @param cbWrite How many bytes to write.
2961 * @param enmOrigin Who is calling.
2962 */
2963VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2964{
2965 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2966 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2967 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2968
2969 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWrite));
2970 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2971
2972 PGM_LOCK_VOID(pVM);
2973
2974 /*
2975 * Copy loop on ram ranges.
2976 */
2977 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2978 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2979 for (;;)
2980 {
2981 /* Inside range or not? */
2982 if (pRam && GCPhys >= pRam->GCPhys)
2983 {
2984 /*
2985 * Must work our way thru this page by page.
2986 */
2987 RTGCPTR off = GCPhys - pRam->GCPhys;
2988 while (off < pRam->cb)
2989 {
2990 RTGCPTR iPage = off >> GUEST_PAGE_SHIFT;
2991 PPGMPAGE pPage = &pRam->aPages[iPage];
2992 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
2993 if (cb > cbWrite)
2994 cb = cbWrite;
2995
2996 /*
2997 * Normal page? Get the pointer to it.
2998 */
2999 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
3000 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3001 {
3002 PGMPAGEMAPLOCK PgMpLck;
3003 void *pvDst;
3004 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
3005 if (RT_SUCCESS(rc))
3006 {
3007 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
3008 memcpy(pvDst, pvBuf, cb);
3009 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3010 }
3011 /* Ignore writes to ballooned pages. */
3012 else if (!PGM_PAGE_IS_BALLOONED(pPage))
3013 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
3014 pRam->GCPhys + off, pPage, rc));
3015 }
3016 /*
3017 * Active WRITE or ALL access handlers.
3018 */
3019 else
3020 {
3021 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
3022 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
3023 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
3024 else
3025 {
3026 PGM_UNLOCK(pVM);
3027 return rcStrict2;
3028 }
3029 }
3030
3031 /* next page */
3032 if (cb >= cbWrite)
3033 {
3034 PGM_UNLOCK(pVM);
3035 return rcStrict;
3036 }
3037
3038 cbWrite -= cb;
3039 off += cb;
3040 pvBuf = (const char *)pvBuf + cb;
3041 } /* walk pages in ram range */
3042
3043 GCPhys = pRam->GCPhysLast + 1;
3044 }
3045 else
3046 {
3047 /*
3048 * Unassigned address space, skip it.
3049 */
3050 if (!pRam)
3051 break;
3052 size_t cb = pRam->GCPhys - GCPhys;
3053 if (cb >= cbWrite)
3054 break;
3055 cbWrite -= cb;
3056 pvBuf = (const char *)pvBuf + cb;
3057 GCPhys += cb;
3058 }
3059
3060 /* Advance range if necessary. */
3061 while (pRam && GCPhys > pRam->GCPhysLast)
3062 pRam = pRam->CTX_SUFF(pNext);
3063 } /* Ram range walk */
3064
3065 PGM_UNLOCK(pVM);
3066 return rcStrict;
3067}
3068
3069
3070/**
3071 * Read from guest physical memory by GC physical address, bypassing
3072 * MMIO and access handlers.
3073 *
3074 * @returns VBox status code.
3075 * @param pVM The cross context VM structure.
3076 * @param pvDst The destination address.
3077 * @param GCPhysSrc The source address (GC physical address).
3078 * @param cb The number of bytes to read.
3079 */
3080VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
3081{
3082 /*
3083 * Treat the first page as a special case.
3084 */
3085 if (!cb)
3086 return VINF_SUCCESS;
3087
3088 /* map the 1st page */
3089 void const *pvSrc;
3090 PGMPAGEMAPLOCK Lock;
3091 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3092 if (RT_FAILURE(rc))
3093 return rc;
3094
3095 /* optimize for the case where access is completely within the first page. */
3096 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysSrc & GUEST_PAGE_OFFSET_MASK);
3097 if (RT_LIKELY(cb <= cbPage))
3098 {
3099 memcpy(pvDst, pvSrc, cb);
3100 PGMPhysReleasePageMappingLock(pVM, &Lock);
3101 return VINF_SUCCESS;
3102 }
3103
3104 /* copy to the end of the page. */
3105 memcpy(pvDst, pvSrc, cbPage);
3106 PGMPhysReleasePageMappingLock(pVM, &Lock);
3107 GCPhysSrc += cbPage;
3108 pvDst = (uint8_t *)pvDst + cbPage;
3109 cb -= cbPage;
3110
3111 /*
3112 * Page by page.
3113 */
3114 for (;;)
3115 {
3116 /* map the page */
3117 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3118 if (RT_FAILURE(rc))
3119 return rc;
3120
3121 /* last page? */
3122 if (cb <= GUEST_PAGE_SIZE)
3123 {
3124 memcpy(pvDst, pvSrc, cb);
3125 PGMPhysReleasePageMappingLock(pVM, &Lock);
3126 return VINF_SUCCESS;
3127 }
3128
3129 /* copy the entire page and advance */
3130 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3131 PGMPhysReleasePageMappingLock(pVM, &Lock);
3132 GCPhysSrc += GUEST_PAGE_SIZE;
3133 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3134 cb -= GUEST_PAGE_SIZE;
3135 }
3136 /* won't ever get here. */
3137}
3138
3139
3140/**
3141 * Write to guest physical memory referenced by GC pointer.
3142 * Write memory to GC physical address in guest physical memory.
3143 *
3144 * This will bypass MMIO and access handlers.
3145 *
3146 * @returns VBox status code.
3147 * @param pVM The cross context VM structure.
3148 * @param GCPhysDst The GC physical address of the destination.
3149 * @param pvSrc The source buffer.
3150 * @param cb The number of bytes to write.
3151 */
3152VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3153{
3154 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3155
3156 /*
3157 * Treat the first page as a special case.
3158 */
3159 if (!cb)
3160 return VINF_SUCCESS;
3161
3162 /* map the 1st page */
3163 void *pvDst;
3164 PGMPAGEMAPLOCK Lock;
3165 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3166 if (RT_FAILURE(rc))
3167 return rc;
3168
3169 /* optimize for the case where access is completely within the first page. */
3170 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysDst & GUEST_PAGE_OFFSET_MASK);
3171 if (RT_LIKELY(cb <= cbPage))
3172 {
3173 memcpy(pvDst, pvSrc, cb);
3174 PGMPhysReleasePageMappingLock(pVM, &Lock);
3175 return VINF_SUCCESS;
3176 }
3177
3178 /* copy to the end of the page. */
3179 memcpy(pvDst, pvSrc, cbPage);
3180 PGMPhysReleasePageMappingLock(pVM, &Lock);
3181 GCPhysDst += cbPage;
3182 pvSrc = (const uint8_t *)pvSrc + cbPage;
3183 cb -= cbPage;
3184
3185 /*
3186 * Page by page.
3187 */
3188 for (;;)
3189 {
3190 /* map the page */
3191 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3192 if (RT_FAILURE(rc))
3193 return rc;
3194
3195 /* last page? */
3196 if (cb <= GUEST_PAGE_SIZE)
3197 {
3198 memcpy(pvDst, pvSrc, cb);
3199 PGMPhysReleasePageMappingLock(pVM, &Lock);
3200 return VINF_SUCCESS;
3201 }
3202
3203 /* copy the entire page and advance */
3204 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3205 PGMPhysReleasePageMappingLock(pVM, &Lock);
3206 GCPhysDst += GUEST_PAGE_SIZE;
3207 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3208 cb -= GUEST_PAGE_SIZE;
3209 }
3210 /* won't ever get here. */
3211}
3212
3213
3214/**
3215 * Read from guest physical memory referenced by GC pointer.
3216 *
3217 * This function uses the current CR3/CR0/CR4 of the guest and will
3218 * bypass access handlers and not set any accessed bits.
3219 *
3220 * @returns VBox status code.
3221 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3222 * @param pvDst The destination address.
3223 * @param GCPtrSrc The source address (GC pointer).
3224 * @param cb The number of bytes to read.
3225 */
3226VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3227{
3228 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3229/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3230
3231 /*
3232 * Treat the first page as a special case.
3233 */
3234 if (!cb)
3235 return VINF_SUCCESS;
3236
3237 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleRead));
3238 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3239
3240 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3241 * when many VCPUs are fighting for the lock.
3242 */
3243 PGM_LOCK_VOID(pVM);
3244
3245 /* map the 1st page */
3246 void const *pvSrc;
3247 PGMPAGEMAPLOCK Lock;
3248 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3249 if (RT_FAILURE(rc))
3250 {
3251 PGM_UNLOCK(pVM);
3252 return rc;
3253 }
3254
3255 /* optimize for the case where access is completely within the first page. */
3256 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3257 if (RT_LIKELY(cb <= cbPage))
3258 {
3259 memcpy(pvDst, pvSrc, cb);
3260 PGMPhysReleasePageMappingLock(pVM, &Lock);
3261 PGM_UNLOCK(pVM);
3262 return VINF_SUCCESS;
3263 }
3264
3265 /* copy to the end of the page. */
3266 memcpy(pvDst, pvSrc, cbPage);
3267 PGMPhysReleasePageMappingLock(pVM, &Lock);
3268 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3269 pvDst = (uint8_t *)pvDst + cbPage;
3270 cb -= cbPage;
3271
3272 /*
3273 * Page by page.
3274 */
3275 for (;;)
3276 {
3277 /* map the page */
3278 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3279 if (RT_FAILURE(rc))
3280 {
3281 PGM_UNLOCK(pVM);
3282 return rc;
3283 }
3284
3285 /* last page? */
3286 if (cb <= GUEST_PAGE_SIZE)
3287 {
3288 memcpy(pvDst, pvSrc, cb);
3289 PGMPhysReleasePageMappingLock(pVM, &Lock);
3290 PGM_UNLOCK(pVM);
3291 return VINF_SUCCESS;
3292 }
3293
3294 /* copy the entire page and advance */
3295 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3296 PGMPhysReleasePageMappingLock(pVM, &Lock);
3297 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + GUEST_PAGE_SIZE);
3298 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3299 cb -= GUEST_PAGE_SIZE;
3300 }
3301 /* won't ever get here. */
3302}
3303
3304
3305/**
3306 * Write to guest physical memory referenced by GC pointer.
3307 *
3308 * This function uses the current CR3/CR0/CR4 of the guest and will
3309 * bypass access handlers and not set dirty or accessed bits.
3310 *
3311 * @returns VBox status code.
3312 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3313 * @param GCPtrDst The destination address (GC pointer).
3314 * @param pvSrc The source address.
3315 * @param cb The number of bytes to write.
3316 */
3317VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3318{
3319 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3320 VMCPU_ASSERT_EMT(pVCpu);
3321
3322 /*
3323 * Treat the first page as a special case.
3324 */
3325 if (!cb)
3326 return VINF_SUCCESS;
3327
3328 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWrite));
3329 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3330
3331 /* map the 1st page */
3332 void *pvDst;
3333 PGMPAGEMAPLOCK Lock;
3334 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3335 if (RT_FAILURE(rc))
3336 return rc;
3337
3338 /* optimize for the case where access is completely within the first page. */
3339 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3340 if (RT_LIKELY(cb <= cbPage))
3341 {
3342 memcpy(pvDst, pvSrc, cb);
3343 PGMPhysReleasePageMappingLock(pVM, &Lock);
3344 return VINF_SUCCESS;
3345 }
3346
3347 /* copy to the end of the page. */
3348 memcpy(pvDst, pvSrc, cbPage);
3349 PGMPhysReleasePageMappingLock(pVM, &Lock);
3350 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3351 pvSrc = (const uint8_t *)pvSrc + cbPage;
3352 cb -= cbPage;
3353
3354 /*
3355 * Page by page.
3356 */
3357 for (;;)
3358 {
3359 /* map the page */
3360 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3361 if (RT_FAILURE(rc))
3362 return rc;
3363
3364 /* last page? */
3365 if (cb <= GUEST_PAGE_SIZE)
3366 {
3367 memcpy(pvDst, pvSrc, cb);
3368 PGMPhysReleasePageMappingLock(pVM, &Lock);
3369 return VINF_SUCCESS;
3370 }
3371
3372 /* copy the entire page and advance */
3373 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3374 PGMPhysReleasePageMappingLock(pVM, &Lock);
3375 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3376 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3377 cb -= GUEST_PAGE_SIZE;
3378 }
3379 /* won't ever get here. */
3380}
3381
3382
3383/**
3384 * Write to guest physical memory referenced by GC pointer and update the PTE.
3385 *
3386 * This function uses the current CR3/CR0/CR4 of the guest and will
3387 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3388 *
3389 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3390 *
3391 * @returns VBox status code.
3392 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3393 * @param GCPtrDst The destination address (GC pointer).
3394 * @param pvSrc The source address.
3395 * @param cb The number of bytes to write.
3396 */
3397VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3398{
3399 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3400 VMCPU_ASSERT_EMT(pVCpu);
3401
3402 /*
3403 * Treat the first page as a special case.
3404 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3405 */
3406 if (!cb)
3407 return VINF_SUCCESS;
3408
3409 /* map the 1st page */
3410 void *pvDst;
3411 PGMPAGEMAPLOCK Lock;
3412 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3413 if (RT_FAILURE(rc))
3414 return rc;
3415
3416 /* optimize for the case where access is completely within the first page. */
3417 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3418 if (RT_LIKELY(cb <= cbPage))
3419 {
3420 memcpy(pvDst, pvSrc, cb);
3421 PGMPhysReleasePageMappingLock(pVM, &Lock);
3422 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3423 return VINF_SUCCESS;
3424 }
3425
3426 /* copy to the end of the page. */
3427 memcpy(pvDst, pvSrc, cbPage);
3428 PGMPhysReleasePageMappingLock(pVM, &Lock);
3429 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3430 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3431 pvSrc = (const uint8_t *)pvSrc + cbPage;
3432 cb -= cbPage;
3433
3434 /*
3435 * Page by page.
3436 */
3437 for (;;)
3438 {
3439 /* map the page */
3440 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3441 if (RT_FAILURE(rc))
3442 return rc;
3443
3444 /* last page? */
3445 if (cb <= GUEST_PAGE_SIZE)
3446 {
3447 memcpy(pvDst, pvSrc, cb);
3448 PGMPhysReleasePageMappingLock(pVM, &Lock);
3449 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3450 return VINF_SUCCESS;
3451 }
3452
3453 /* copy the entire page and advance */
3454 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3455 PGMPhysReleasePageMappingLock(pVM, &Lock);
3456 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3457 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3458 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3459 cb -= GUEST_PAGE_SIZE;
3460 }
3461 /* won't ever get here. */
3462}
3463
3464
3465/**
3466 * Read from guest physical memory referenced by GC pointer.
3467 *
3468 * This function uses the current CR3/CR0/CR4 of the guest and will
3469 * respect access handlers and set accessed bits.
3470 *
3471 * @returns Strict VBox status, see PGMPhysRead for details.
3472 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3473 * specified virtual address.
3474 *
3475 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3476 * @param pvDst The destination address.
3477 * @param GCPtrSrc The source address (GC pointer).
3478 * @param cb The number of bytes to read.
3479 * @param enmOrigin Who is calling.
3480 * @thread EMT(pVCpu)
3481 */
3482VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3483{
3484 int rc;
3485 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3486 VMCPU_ASSERT_EMT(pVCpu);
3487
3488 /*
3489 * Anything to do?
3490 */
3491 if (!cb)
3492 return VINF_SUCCESS;
3493
3494 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3495
3496 /*
3497 * Optimize reads within a single page.
3498 */
3499 if (((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3500 {
3501 /* Convert virtual to physical address + flags */
3502 PGMPTWALK Walk;
3503 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3504 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3505 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3506
3507 /* mark the guest page as accessed. */
3508 if (!(Walk.fEffective & X86_PTE_A))
3509 {
3510 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3511 AssertRC(rc);
3512 }
3513
3514 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3515 }
3516
3517 /*
3518 * Page by page.
3519 */
3520 for (;;)
3521 {
3522 /* Convert virtual to physical address + flags */
3523 PGMPTWALK Walk;
3524 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3525 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3526 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3527
3528 /* mark the guest page as accessed. */
3529 if (!(Walk.fEffective & X86_PTE_A))
3530 {
3531 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3532 AssertRC(rc);
3533 }
3534
3535 /* copy */
3536 size_t cbRead = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3537 if (cbRead < cb)
3538 {
3539 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3540 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3541 { /* likely */ }
3542 else
3543 return rcStrict;
3544 }
3545 else /* Last page (cbRead is GUEST_PAGE_SIZE, we only need cb!) */
3546 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3547
3548 /* next */
3549 Assert(cb > cbRead);
3550 cb -= cbRead;
3551 pvDst = (uint8_t *)pvDst + cbRead;
3552 GCPtrSrc += cbRead;
3553 }
3554}
3555
3556
3557/**
3558 * Write to guest physical memory referenced by GC pointer.
3559 *
3560 * This function uses the current CR3/CR0/CR4 of the guest and will
3561 * respect access handlers and set dirty and accessed bits.
3562 *
3563 * @returns Strict VBox status, see PGMPhysWrite for details.
3564 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3565 * specified virtual address.
3566 *
3567 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3568 * @param GCPtrDst The destination address (GC pointer).
3569 * @param pvSrc The source address.
3570 * @param cb The number of bytes to write.
3571 * @param enmOrigin Who is calling.
3572 */
3573VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3574{
3575 int rc;
3576 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3577 VMCPU_ASSERT_EMT(pVCpu);
3578
3579 /*
3580 * Anything to do?
3581 */
3582 if (!cb)
3583 return VINF_SUCCESS;
3584
3585 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3586
3587 /*
3588 * Optimize writes within a single page.
3589 */
3590 if (((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3591 {
3592 /* Convert virtual to physical address + flags */
3593 PGMPTWALK Walk;
3594 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3595 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3596 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3597
3598 /* Mention when we ignore X86_PTE_RW... */
3599 if (!(Walk.fEffective & X86_PTE_RW))
3600 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3601
3602 /* Mark the guest page as accessed and dirty if necessary. */
3603 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3604 {
3605 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3606 AssertRC(rc);
3607 }
3608
3609 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3610 }
3611
3612 /*
3613 * Page by page.
3614 */
3615 for (;;)
3616 {
3617 /* Convert virtual to physical address + flags */
3618 PGMPTWALK Walk;
3619 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3620 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3621 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3622
3623 /* Mention when we ignore X86_PTE_RW... */
3624 if (!(Walk.fEffective & X86_PTE_RW))
3625 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3626
3627 /* Mark the guest page as accessed and dirty if necessary. */
3628 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3629 {
3630 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3631 AssertRC(rc);
3632 }
3633
3634 /* copy */
3635 size_t cbWrite = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3636 if (cbWrite < cb)
3637 {
3638 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3639 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3640 { /* likely */ }
3641 else
3642 return rcStrict;
3643 }
3644 else /* Last page (cbWrite is GUEST_PAGE_SIZE, we only need cb!) */
3645 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3646
3647 /* next */
3648 Assert(cb > cbWrite);
3649 cb -= cbWrite;
3650 pvSrc = (uint8_t *)pvSrc + cbWrite;
3651 GCPtrDst += cbWrite;
3652 }
3653}
3654
3655
3656/**
3657 * Return the page type of the specified physical address.
3658 *
3659 * @returns The page type.
3660 * @param pVM The cross context VM structure.
3661 * @param GCPhys Guest physical address
3662 */
3663VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
3664{
3665 PGM_LOCK_VOID(pVM);
3666 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3667 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
3668 PGM_UNLOCK(pVM);
3669
3670 return enmPgType;
3671}
3672
3673
3674/**
3675 * Converts a GC physical address to a HC ring-3 pointer, with some
3676 * additional checks.
3677 *
3678 * @returns VBox status code (no informational statuses).
3679 *
3680 * @param pVM The cross context VM structure.
3681 * @param pVCpu The cross context virtual CPU structure of the
3682 * calling EMT.
3683 * @param GCPhys The GC physical address to convert. This API mask
3684 * the A20 line when necessary.
3685 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
3686 * be done while holding the PGM lock.
3687 * @param ppb Where to store the pointer corresponding to GCPhys
3688 * on success.
3689 * @param pfTlb The TLB flags and revision. We only add stuff.
3690 *
3691 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
3692 * PGMPhysIemGCPhys2Ptr.
3693 *
3694 * @thread EMT(pVCpu).
3695 */
3696VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
3697 R3R0PTRTYPE(uint8_t *) *ppb,
3698 uint64_t *pfTlb)
3699{
3700 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3701 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
3702
3703 PGM_LOCK_VOID(pVM);
3704
3705 PPGMRAMRANGE pRam;
3706 PPGMPAGE pPage;
3707 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3708 if (RT_SUCCESS(rc))
3709 {
3710 if (!PGM_PAGE_IS_BALLOONED(pPage))
3711 {
3712 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3713 {
3714 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3715 {
3716 /*
3717 * No access handler.
3718 */
3719 switch (PGM_PAGE_GET_STATE(pPage))
3720 {
3721 case PGM_PAGE_STATE_ALLOCATED:
3722 *pfTlb |= *puTlbPhysRev;
3723 break;
3724 case PGM_PAGE_STATE_BALLOONED:
3725 AssertFailed();
3726 RT_FALL_THRU();
3727 case PGM_PAGE_STATE_ZERO:
3728 case PGM_PAGE_STATE_SHARED:
3729 case PGM_PAGE_STATE_WRITE_MONITORED:
3730 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3731 break;
3732 }
3733
3734 PPGMPAGEMAPTLBE pTlbe;
3735 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3736 AssertLogRelRCReturn(rc, rc);
3737 *ppb = (uint8_t *)pTlbe->pv;
3738 }
3739 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
3740 {
3741 /*
3742 * MMIO or similar all access handler: Catch all access.
3743 */
3744 *pfTlb |= *puTlbPhysRev
3745 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3746 *ppb = NULL;
3747 }
3748 else
3749 {
3750 /*
3751 * Write access handler: Catch write accesses if active.
3752 */
3753 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3754 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3755 else
3756 switch (PGM_PAGE_GET_STATE(pPage))
3757 {
3758 case PGM_PAGE_STATE_ALLOCATED:
3759 *pfTlb |= *puTlbPhysRev;
3760 break;
3761 case PGM_PAGE_STATE_BALLOONED:
3762 AssertFailed();
3763 RT_FALL_THRU();
3764 case PGM_PAGE_STATE_ZERO:
3765 case PGM_PAGE_STATE_SHARED:
3766 case PGM_PAGE_STATE_WRITE_MONITORED:
3767 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3768 break;
3769 }
3770
3771 PPGMPAGEMAPTLBE pTlbe;
3772 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3773 AssertLogRelRCReturn(rc, rc);
3774 *ppb = (uint8_t *)pTlbe->pv;
3775 }
3776 }
3777 else
3778 {
3779 /* Alias MMIO: For now, we catch all access. */
3780 *pfTlb |= *puTlbPhysRev
3781 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3782 *ppb = NULL;
3783 }
3784 }
3785 else
3786 {
3787 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */
3788 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3789 *ppb = NULL;
3790 }
3791 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 pPage=%R[pgmpage]\n", GCPhys, *ppb, *pfTlb, pPage));
3792 }
3793 else
3794 {
3795 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3796 *ppb = NULL;
3797 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
3798 }
3799
3800 PGM_UNLOCK(pVM);
3801 return VINF_SUCCESS;
3802}
3803
3804
3805/**
3806 * Converts a GC physical address to a HC ring-3 pointer, with some
3807 * additional checks.
3808 *
3809 * @returns VBox status code (no informational statuses).
3810 * @retval VINF_SUCCESS on success.
3811 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3812 * access handler of some kind.
3813 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3814 * accesses or is odd in any way.
3815 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3816 *
3817 * @param pVM The cross context VM structure.
3818 * @param pVCpu The cross context virtual CPU structure of the
3819 * calling EMT.
3820 * @param GCPhys The GC physical address to convert. This API mask
3821 * the A20 line when necessary.
3822 * @param fWritable Whether write access is required.
3823 * @param fByPassHandlers Whether to bypass access handlers.
3824 * @param ppv Where to store the pointer corresponding to GCPhys
3825 * on success.
3826 * @param pLock
3827 *
3828 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
3829 * @thread EMT(pVCpu).
3830 */
3831VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
3832 void **ppv, PPGMPAGEMAPLOCK pLock)
3833{
3834 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3835
3836 PGM_LOCK_VOID(pVM);
3837
3838 PPGMRAMRANGE pRam;
3839 PPGMPAGE pPage;
3840 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3841 if (RT_SUCCESS(rc))
3842 {
3843 if (PGM_PAGE_IS_BALLOONED(pPage))
3844 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3845 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3846 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3847 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3848 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3849 rc = VINF_SUCCESS;
3850 else
3851 {
3852 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3853 {
3854 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3855 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3856 }
3857 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3858 {
3859 Assert(!fByPassHandlers);
3860 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3861 }
3862 }
3863 if (RT_SUCCESS(rc))
3864 {
3865 int rc2;
3866
3867 /* Make sure what we return is writable. */
3868 if (fWritable)
3869 switch (PGM_PAGE_GET_STATE(pPage))
3870 {
3871 case PGM_PAGE_STATE_ALLOCATED:
3872 break;
3873 case PGM_PAGE_STATE_BALLOONED:
3874 AssertFailed();
3875 break;
3876 case PGM_PAGE_STATE_ZERO:
3877 case PGM_PAGE_STATE_SHARED:
3878 case PGM_PAGE_STATE_WRITE_MONITORED:
3879 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
3880 AssertLogRelRCReturn(rc2, rc2);
3881 break;
3882 }
3883
3884 /* Get a ring-3 mapping of the address. */
3885 PPGMPAGEMAPTLBE pTlbe;
3886 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3887 AssertLogRelRCReturn(rc2, rc2);
3888
3889 /* Lock it and calculate the address. */
3890 if (fWritable)
3891 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
3892 else
3893 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
3894 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3895
3896 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3897 }
3898 else
3899 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3900
3901 /* else: handler catching all access, no pointer returned. */
3902 }
3903 else
3904 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3905
3906 PGM_UNLOCK(pVM);
3907 return rc;
3908}
3909
3910
3911/**
3912 * Checks if the give GCPhys page requires special handling for the given access
3913 * because it's MMIO or otherwise monitored.
3914 *
3915 * @returns VBox status code (no informational statuses).
3916 * @retval VINF_SUCCESS on success.
3917 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3918 * access handler of some kind.
3919 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3920 * accesses or is odd in any way.
3921 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3922 *
3923 * @param pVM The cross context VM structure.
3924 * @param GCPhys The GC physical address to convert. Since this is
3925 * only used for filling the REM TLB, the A20 mask must
3926 * be applied before calling this API.
3927 * @param fWritable Whether write access is required.
3928 * @param fByPassHandlers Whether to bypass access handlers.
3929 *
3930 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
3931 * a stop gap thing that should be removed once there is a better TLB
3932 * for virtual address accesses.
3933 */
3934VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
3935{
3936 PGM_LOCK_VOID(pVM);
3937 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
3938
3939 PPGMRAMRANGE pRam;
3940 PPGMPAGE pPage;
3941 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3942 if (RT_SUCCESS(rc))
3943 {
3944 if (PGM_PAGE_IS_BALLOONED(pPage))
3945 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3946 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3947 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3948 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3949 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3950 rc = VINF_SUCCESS;
3951 else
3952 {
3953 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3954 {
3955 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3956 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3957 }
3958 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3959 {
3960 Assert(!fByPassHandlers);
3961 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3962 }
3963 }
3964 }
3965
3966 PGM_UNLOCK(pVM);
3967 return rc;
3968}
3969
3970#ifdef VBOX_WITH_NATIVE_NEM
3971
3972/**
3973 * Interface used by NEM to check what to do on a memory access exit.
3974 *
3975 * @returns VBox status code.
3976 * @param pVM The cross context VM structure.
3977 * @param pVCpu The cross context per virtual CPU structure.
3978 * Optional.
3979 * @param GCPhys The guest physical address.
3980 * @param fMakeWritable Whether to try make the page writable or not. If it
3981 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
3982 * be returned and the return code will be unaffected
3983 * @param pInfo Where to return the page information. This is
3984 * initialized even on failure.
3985 * @param pfnChecker Page in-sync checker callback. Optional.
3986 * @param pvUser User argument to pass to pfnChecker.
3987 */
3988VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo,
3989 PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
3990{
3991 PGM_LOCK_VOID(pVM);
3992
3993 PPGMPAGE pPage;
3994 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
3995 if (RT_SUCCESS(rc))
3996 {
3997 /* Try make it writable if requested. */
3998 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
3999 if (fMakeWritable)
4000 switch (PGM_PAGE_GET_STATE(pPage))
4001 {
4002 case PGM_PAGE_STATE_SHARED:
4003 case PGM_PAGE_STATE_WRITE_MONITORED:
4004 case PGM_PAGE_STATE_ZERO:
4005 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
4006 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
4007 rc = VINF_SUCCESS;
4008 break;
4009 }
4010
4011 /* Fill in the info. */
4012 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
4013 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
4014 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
4015 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
4016 pInfo->enmType = enmType;
4017 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
4018 switch (PGM_PAGE_GET_STATE(pPage))
4019 {
4020 case PGM_PAGE_STATE_ALLOCATED:
4021 pInfo->fZeroPage = 0;
4022 break;
4023
4024 case PGM_PAGE_STATE_ZERO:
4025 pInfo->fZeroPage = 1;
4026 break;
4027
4028 case PGM_PAGE_STATE_WRITE_MONITORED:
4029 pInfo->fZeroPage = 0;
4030 break;
4031
4032 case PGM_PAGE_STATE_SHARED:
4033 pInfo->fZeroPage = 0;
4034 break;
4035
4036 case PGM_PAGE_STATE_BALLOONED:
4037 pInfo->fZeroPage = 1;
4038 break;
4039
4040 default:
4041 pInfo->fZeroPage = 1;
4042 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
4043 }
4044
4045 /* Call the checker and update NEM state. */
4046 if (pfnChecker)
4047 {
4048 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
4049 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
4050 }
4051
4052 /* Done. */
4053 PGM_UNLOCK(pVM);
4054 }
4055 else
4056 {
4057 PGM_UNLOCK(pVM);
4058
4059 pInfo->HCPhys = NIL_RTHCPHYS;
4060 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
4061 pInfo->u2NemState = 0;
4062 pInfo->fHasHandlers = 0;
4063 pInfo->fZeroPage = 0;
4064 pInfo->enmType = PGMPAGETYPE_INVALID;
4065 }
4066
4067 return rc;
4068}
4069
4070
4071/**
4072 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
4073 * or higher.
4074 *
4075 * @returns VBox status code from callback.
4076 * @param pVM The cross context VM structure.
4077 * @param pVCpu The cross context per CPU structure. This is
4078 * optional as its only for passing to callback.
4079 * @param uMinState The minimum NEM state value to call on.
4080 * @param pfnCallback The callback function.
4081 * @param pvUser User argument for the callback.
4082 */
4083VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
4084 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
4085{
4086 /*
4087 * Just brute force this problem.
4088 */
4089 PGM_LOCK_VOID(pVM);
4090 int rc = VINF_SUCCESS;
4091 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
4092 {
4093 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
4094 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4095 {
4096 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
4097 if (u2State < uMinState)
4098 { /* likely */ }
4099 else
4100 {
4101 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
4102 if (RT_SUCCESS(rc))
4103 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
4104 else
4105 break;
4106 }
4107 }
4108 }
4109 PGM_UNLOCK(pVM);
4110
4111 return rc;
4112}
4113
4114
4115/**
4116 * Helper for setting the NEM state for a range of pages.
4117 *
4118 * @param paPages Array of pages to modify.
4119 * @param cPages How many pages to modify.
4120 * @param u2State The new state value.
4121 */
4122void pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State)
4123{
4124 PPGMPAGE pPage = paPages;
4125 while (cPages-- > 0)
4126 {
4127 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
4128 pPage++;
4129 }
4130}
4131
4132#endif /* VBOX_WITH_NATIVE_NEM */
4133
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette