VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 97441

Last change on this file since 97441 was 97197, checked in by vboxsync, 2 years ago

VMM/PGM,IEM,EM: Changed FNPGMRZPHYSPFHANDLER, PGMTrap0eHandler and PGMR0Trap0eHandlerNPMisconfig to take PCPUMCTX instead of PCPUMCTXCORE parameters; dropped PCPUMCTXCORE parameters from IEMExecOneBypassEx, PGMInterpretInstruction and EMInterpretInstruction together with some associated cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 146.6 KB
Line 
1/* $Id: PGMAllPhys.cpp 97197 2022-10-18 11:09:55Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM_PHYS
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/iom.h>
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/nem.h>
41#include "PGMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include "PGMInline.h"
44#include <VBox/param.h>
45#include <VBox/err.h>
46#include <iprt/assert.h>
47#include <iprt/string.h>
48#include <VBox/log.h>
49#ifdef IN_RING3
50# include <iprt/thread.h>
51#endif
52
53
54/*********************************************************************************************************************************
55* Defined Constants And Macros *
56*********************************************************************************************************************************/
57/** Enable the physical TLB. */
58#define PGM_WITH_PHYS_TLB
59
60/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
61 * Checks if valid physical access handler return code (normal handler, not PF).
62 *
63 * Checks if the given strict status code is one of the expected ones for a
64 * physical access handler in the current context.
65 *
66 * @returns true or false.
67 * @param a_rcStrict The status code.
68 * @param a_fWrite Whether it is a write or read being serviced.
69 *
70 * @remarks We wish to keep the list of statuses here as short as possible.
71 * When changing, please make sure to update the PGMPhysRead,
72 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
73 */
74#ifdef IN_RING3
75# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
76 ( (a_rcStrict) == VINF_SUCCESS \
77 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
78#elif defined(IN_RING0)
79#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
80 ( (a_rcStrict) == VINF_SUCCESS \
81 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
82 \
83 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
84 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
85 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
86 \
87 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
88 || (a_rcStrict) == VINF_EM_DBG_STOP \
89 || (a_rcStrict) == VINF_EM_DBG_EVENT \
90 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
91 || (a_rcStrict) == VINF_EM_OFF \
92 || (a_rcStrict) == VINF_EM_SUSPEND \
93 || (a_rcStrict) == VINF_EM_RESET \
94 )
95#else
96# error "Context?"
97#endif
98
99/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
100 * Checks if valid virtual access handler return code (normal handler, not PF).
101 *
102 * Checks if the given strict status code is one of the expected ones for a
103 * virtual access handler in the current context.
104 *
105 * @returns true or false.
106 * @param a_rcStrict The status code.
107 * @param a_fWrite Whether it is a write or read being serviced.
108 *
109 * @remarks We wish to keep the list of statuses here as short as possible.
110 * When changing, please make sure to update the PGMPhysRead,
111 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
112 */
113#ifdef IN_RING3
114# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
115 ( (a_rcStrict) == VINF_SUCCESS \
116 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
117#elif defined(IN_RING0)
118# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
119 (false /* no virtual handlers in ring-0! */ )
120#else
121# error "Context?"
122#endif
123
124
125
126/**
127 * Calculate the actual table size.
128 *
129 * The memory is layed out like this:
130 * - PGMPHYSHANDLERTREE (8 bytes)
131 * - Allocation bitmap (8-byte size align)
132 * - Slab of PGMPHYSHANDLER. Start is 64 byte aligned.
133 */
134uint32_t pgmHandlerPhysicalCalcTableSizes(uint32_t *pcEntries, uint32_t *pcbTreeAndBitmap)
135{
136 /*
137 * A minimum of 64 entries and a maximum of ~64K.
138 */
139 uint32_t cEntries = *pcEntries;
140 if (cEntries <= 64)
141 cEntries = 64;
142 else if (cEntries >= _64K)
143 cEntries = _64K;
144 else
145 cEntries = RT_ALIGN_32(cEntries, 16);
146
147 /*
148 * Do the initial calculation.
149 */
150 uint32_t cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
151 uint32_t cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
152 uint32_t cbTable = cEntries * sizeof(PGMPHYSHANDLER);
153 uint32_t cbTotal = cbTreeAndBitmap + cbTable;
154
155 /*
156 * Align the total and try use up extra space from that.
157 */
158 uint32_t cbTotalAligned = RT_ALIGN_32(cbTotal, RT_MAX(HOST_PAGE_SIZE, _16K));
159 uint32_t cAvail = cbTotalAligned - cbTotal;
160 cAvail /= sizeof(PGMPHYSHANDLER);
161 if (cAvail >= 1)
162 for (;;)
163 {
164 cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
165 cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
166 cbTable = cEntries * sizeof(PGMPHYSHANDLER);
167 cbTotal = cbTreeAndBitmap + cbTable;
168 if (cbTotal <= cbTotalAligned)
169 break;
170 cEntries--;
171 Assert(cEntries >= 16);
172 }
173
174 /*
175 * Return the result.
176 */
177 *pcbTreeAndBitmap = cbTreeAndBitmap;
178 *pcEntries = cEntries;
179 return cbTotalAligned;
180}
181
182
183/**
184 * Looks up a ROM range by its PGMROMRANGE::GCPhys value.
185 */
186DECLINLINE(PPGMROMRANGE) pgmPhysRomLookupByBase(PVMCC pVM, RTGCPHYS GCPhys)
187{
188 for (PPGMROMRANGE pRom = pVM->pgm.s.CTX_SUFF(pRomRanges); pRom; pRom = pRom->CTX_SUFF(pNext))
189 if (pRom->GCPhys == GCPhys)
190 return pRom;
191 return NULL;
192}
193
194#ifndef IN_RING3
195
196/**
197 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
198 * \#PF access handler callback for guest ROM range write access.}
199 *
200 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
201 */
202DECLCALLBACK(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
203 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
204
205{
206 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
207 AssertReturn(pRom, VINF_EM_RAW_EMULATE_INSTR);
208 uint32_t const iPage = (GCPhysFault - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
209 int rc;
210 RT_NOREF(uErrorCode, pvFault);
211
212 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
213
214 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
215 switch (pRom->aPages[iPage].enmProt)
216 {
217 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
218 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
219 {
220 /*
221 * If it's a simple instruction which doesn't change the cpu state
222 * we will simply skip it. Otherwise we'll have to defer it to REM.
223 */
224 uint32_t cbOp;
225 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
226 rc = EMInterpretDisasCurrent(pVCpu, pDis, &cbOp);
227 if ( RT_SUCCESS(rc)
228 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
229 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
230 {
231 switch (pDis->bOpCode)
232 {
233 /** @todo Find other instructions we can safely skip, possibly
234 * adding this kind of detection to DIS or EM. */
235 case OP_MOV:
236 pCtx->rip += cbOp;
237 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteHandled);
238 return VINF_SUCCESS;
239 }
240 }
241 break;
242 }
243
244 case PGMROMPROT_READ_RAM_WRITE_RAM:
245 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
246 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
247 AssertRC(rc);
248 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
249
250 case PGMROMPROT_READ_ROM_WRITE_RAM:
251 /* Handle it in ring-3 because it's *way* easier there. */
252 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
253 break;
254
255 default:
256 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
257 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
258 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
259 }
260
261 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteUnhandled);
262 return VINF_EM_RAW_EMULATE_INSTR;
263}
264
265#endif /* !IN_RING3 */
266
267
268/**
269 * @callback_method_impl{FNPGMPHYSHANDLER,
270 * Access handler callback for ROM write accesses.}
271 *
272 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
273 */
274DECLCALLBACK(VBOXSTRICTRC)
275pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
276 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
277{
278 PPGMROMRANGE const pRom = pgmPhysRomLookupByBase(pVM, uUser);
279 AssertReturn(pRom, VERR_INTERNAL_ERROR_3);
280 uint32_t const iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
281 Assert(iPage < (pRom->cb >> GUEST_PAGE_SHIFT));
282 PPGMROMPAGE const pRomPage = &pRom->aPages[iPage];
283
284 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
285 RT_NOREF(pVCpu, pvPhys, enmOrigin);
286
287 if (enmAccessType == PGMACCESSTYPE_READ)
288 {
289 switch (pRomPage->enmProt)
290 {
291 /*
292 * Take the default action.
293 */
294 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
295 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
296 case PGMROMPROT_READ_ROM_WRITE_RAM:
297 case PGMROMPROT_READ_RAM_WRITE_RAM:
298 return VINF_PGM_HANDLER_DO_DEFAULT;
299
300 default:
301 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
302 pRom->aPages[iPage].enmProt, iPage, GCPhys),
303 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
304 }
305 }
306 else
307 {
308 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
309 switch (pRomPage->enmProt)
310 {
311 /*
312 * Ignore writes.
313 */
314 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
315 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
316 return VINF_SUCCESS;
317
318 /*
319 * Write to the RAM page.
320 */
321 case PGMROMPROT_READ_ROM_WRITE_RAM:
322 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
323 {
324 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
325 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> GUEST_PAGE_SHIFT) == iPage);
326
327 /*
328 * Take the lock, do lazy allocation, map the page and copy the data.
329 *
330 * Note that we have to bypass the mapping TLB since it works on
331 * guest physical addresses and entering the shadow page would
332 * kind of screw things up...
333 */
334 PGM_LOCK_VOID(pVM);
335
336 PPGMPAGE pShadowPage = &pRomPage->Shadow;
337 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
338 {
339 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
340 AssertLogRelMsgReturnStmt(pShadowPage, ("%RGp\n", GCPhys), PGM_UNLOCK(pVM), VERR_PGM_PHYS_PAGE_GET_IPE);
341 }
342
343 void *pvDstPage;
344 int rc;
345#if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
346 if (PGM_IS_IN_NEM_MODE(pVM) && PGMROMPROT_IS_ROM(pRomPage->enmProt))
347 {
348 pvDstPage = &pRom->pbR3Alternate[GCPhys - pRom->GCPhys];
349 rc = VINF_SUCCESS;
350 }
351 else
352#endif
353 {
354 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
355 if (RT_SUCCESS(rc))
356 pvDstPage = (uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK);
357 }
358 if (RT_SUCCESS(rc))
359 {
360 memcpy((uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK), pvBuf, cbBuf);
361 pRomPage->LiveSave.fWrittenTo = true;
362
363 AssertMsg( rc == VINF_SUCCESS
364 || ( rc == VINF_PGM_SYNC_CR3
365 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
366 , ("%Rrc\n", rc));
367 rc = VINF_SUCCESS;
368 }
369
370 PGM_UNLOCK(pVM);
371 return rc;
372 }
373
374 default:
375 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
376 pRom->aPages[iPage].enmProt, iPage, GCPhys),
377 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
378 }
379 }
380}
381
382
383/**
384 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler.
385 */
386static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uint64_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr)
387{
388 /*
389 * Get the MMIO2 range.
390 */
391 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), VERR_INTERNAL_ERROR_3);
392 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3);
393 PPGMREGMMIO2RANGE pMmio2 = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2 - 1];
394 Assert(pMmio2->idMmio2 == hMmio2);
395 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
396 VERR_INTERNAL_ERROR_4);
397
398 /*
399 * Get the page and make sure it's an MMIO2 page.
400 */
401 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
402 AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR);
403 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR);
404
405 /*
406 * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty.
407 * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single
408 * page is dirty, saving the need for additional storage (bitmap).)
409 */
410 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY;
411
412 /*
413 * Disable the handler for this page.
414 */
415 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->RamRange.GCPhys, GCPhys & X86_PTE_PG_MASK);
416 AssertRC(rc);
417#ifndef IN_RING3
418 if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0)
419 {
420 rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
421 AssertMsgReturn(rc == VINF_SUCCESS, ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc);
422 }
423#else
424 RT_NOREF(pVCpu, GCPtr);
425#endif
426 return VINF_SUCCESS;
427}
428
429
430#ifndef IN_RING3
431/**
432 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
433 * \#PF access handler callback for guest MMIO2 dirty page tracing.}
434 *
435 * @remarks The @a uUser is the MMIO2 index.
436 */
437DECLCALLBACK(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
438 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
439{
440 RT_NOREF(pVCpu, uErrorCode, pCtx);
441 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
442 if (RT_SUCCESS(rcStrict))
443 {
444 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhysFault, pvFault);
445 PGM_UNLOCK(pVM);
446 }
447 return rcStrict;
448}
449#endif /* !IN_RING3 */
450
451
452/**
453 * @callback_method_impl{FNPGMPHYSHANDLER,
454 * Access handler callback for MMIO2 dirty page tracing.}
455 *
456 * @remarks The @a uUser is the MMIO2 index.
457 */
458DECLCALLBACK(VBOXSTRICTRC)
459pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
460 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
461{
462 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
463 if (RT_SUCCESS(rcStrict))
464 {
465 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhys, ~(RTGCPTR)0);
466 PGM_UNLOCK(pVM);
467 if (rcStrict == VINF_SUCCESS)
468 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
469 }
470 RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
471 return rcStrict;
472}
473
474
475/**
476 * Invalidates the RAM range TLBs.
477 *
478 * @param pVM The cross context VM structure.
479 */
480void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
481{
482 PGM_LOCK_VOID(pVM);
483 RT_ZERO(pVM->pgm.s.apRamRangesTlbR3);
484 RT_ZERO(pVM->pgm.s.apRamRangesTlbR0);
485 PGM_UNLOCK(pVM);
486}
487
488
489/**
490 * Tests if a value of type RTGCPHYS is negative if the type had been signed
491 * instead of unsigned.
492 *
493 * @returns @c true if negative, @c false if positive or zero.
494 * @param a_GCPhys The value to test.
495 * @todo Move me to iprt/types.h.
496 */
497#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
498
499
500/**
501 * Slow worker for pgmPhysGetRange.
502 *
503 * @copydoc pgmPhysGetRange
504 */
505PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
506{
507 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
508
509 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
510 while (pRam)
511 {
512 RTGCPHYS off = GCPhys - pRam->GCPhys;
513 if (off < pRam->cb)
514 {
515 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
516 return pRam;
517 }
518 if (RTGCPHYS_IS_NEGATIVE(off))
519 pRam = pRam->CTX_SUFF(pLeft);
520 else
521 pRam = pRam->CTX_SUFF(pRight);
522 }
523 return NULL;
524}
525
526
527/**
528 * Slow worker for pgmPhysGetRangeAtOrAbove.
529 *
530 * @copydoc pgmPhysGetRangeAtOrAbove
531 */
532PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
533{
534 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
535
536 PPGMRAMRANGE pLastLeft = NULL;
537 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
538 while (pRam)
539 {
540 RTGCPHYS off = GCPhys - pRam->GCPhys;
541 if (off < pRam->cb)
542 {
543 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
544 return pRam;
545 }
546 if (RTGCPHYS_IS_NEGATIVE(off))
547 {
548 pLastLeft = pRam;
549 pRam = pRam->CTX_SUFF(pLeft);
550 }
551 else
552 pRam = pRam->CTX_SUFF(pRight);
553 }
554 return pLastLeft;
555}
556
557
558/**
559 * Slow worker for pgmPhysGetPage.
560 *
561 * @copydoc pgmPhysGetPage
562 */
563PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
564{
565 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
566
567 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
568 while (pRam)
569 {
570 RTGCPHYS off = GCPhys - pRam->GCPhys;
571 if (off < pRam->cb)
572 {
573 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
574 return &pRam->aPages[off >> GUEST_PAGE_SHIFT];
575 }
576
577 if (RTGCPHYS_IS_NEGATIVE(off))
578 pRam = pRam->CTX_SUFF(pLeft);
579 else
580 pRam = pRam->CTX_SUFF(pRight);
581 }
582 return NULL;
583}
584
585
586/**
587 * Slow worker for pgmPhysGetPageEx.
588 *
589 * @copydoc pgmPhysGetPageEx
590 */
591int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
592{
593 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
594
595 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
596 while (pRam)
597 {
598 RTGCPHYS off = GCPhys - pRam->GCPhys;
599 if (off < pRam->cb)
600 {
601 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
602 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
603 return VINF_SUCCESS;
604 }
605
606 if (RTGCPHYS_IS_NEGATIVE(off))
607 pRam = pRam->CTX_SUFF(pLeft);
608 else
609 pRam = pRam->CTX_SUFF(pRight);
610 }
611
612 *ppPage = NULL;
613 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
614}
615
616
617/**
618 * Slow worker for pgmPhysGetPageAndRangeEx.
619 *
620 * @copydoc pgmPhysGetPageAndRangeEx
621 */
622int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
623{
624 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
625
626 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
627 while (pRam)
628 {
629 RTGCPHYS off = GCPhys - pRam->GCPhys;
630 if (off < pRam->cb)
631 {
632 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
633 *ppRam = pRam;
634 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
635 return VINF_SUCCESS;
636 }
637
638 if (RTGCPHYS_IS_NEGATIVE(off))
639 pRam = pRam->CTX_SUFF(pLeft);
640 else
641 pRam = pRam->CTX_SUFF(pRight);
642 }
643
644 *ppRam = NULL;
645 *ppPage = NULL;
646 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
647}
648
649
650/**
651 * Checks if Address Gate 20 is enabled or not.
652 *
653 * @returns true if enabled.
654 * @returns false if disabled.
655 * @param pVCpu The cross context virtual CPU structure.
656 */
657VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
658{
659 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
660 return pVCpu->pgm.s.fA20Enabled;
661}
662
663
664/**
665 * Validates a GC physical address.
666 *
667 * @returns true if valid.
668 * @returns false if invalid.
669 * @param pVM The cross context VM structure.
670 * @param GCPhys The physical address to validate.
671 */
672VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
673{
674 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
675 return pPage != NULL;
676}
677
678
679/**
680 * Checks if a GC physical address is a normal page,
681 * i.e. not ROM, MMIO or reserved.
682 *
683 * @returns true if normal.
684 * @returns false if invalid, ROM, MMIO or reserved page.
685 * @param pVM The cross context VM structure.
686 * @param GCPhys The physical address to check.
687 */
688VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
689{
690 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
691 return pPage
692 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
693}
694
695
696/**
697 * Converts a GC physical address to a HC physical address.
698 *
699 * @returns VINF_SUCCESS on success.
700 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
701 * page but has no physical backing.
702 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
703 * GC physical address.
704 *
705 * @param pVM The cross context VM structure.
706 * @param GCPhys The GC physical address to convert.
707 * @param pHCPhys Where to store the HC physical address on success.
708 */
709VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
710{
711 PGM_LOCK_VOID(pVM);
712 PPGMPAGE pPage;
713 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
714 if (RT_SUCCESS(rc))
715 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
716 PGM_UNLOCK(pVM);
717 return rc;
718}
719
720
721/**
722 * Invalidates all page mapping TLBs.
723 *
724 * @param pVM The cross context VM structure.
725 */
726void pgmPhysInvalidatePageMapTLB(PVMCC pVM)
727{
728 PGM_LOCK_VOID(pVM);
729 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushes);
730
731 /* Clear the R3 & R0 TLBs completely. */
732 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
733 {
734 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
735 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
736 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
737 }
738
739 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
740 {
741 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
742 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
743 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
744 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
745 }
746
747 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
748 PGM_UNLOCK(pVM);
749}
750
751
752/**
753 * Invalidates a page mapping TLB entry
754 *
755 * @param pVM The cross context VM structure.
756 * @param GCPhys GCPhys entry to flush
757 *
758 * @note Caller is responsible for calling IEMTlbInvalidateAllPhysicalAllCpus
759 * when needed.
760 */
761void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys)
762{
763 PGM_LOCK_ASSERT_OWNER(pVM);
764
765 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushEntry);
766
767 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
768
769 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
770 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
771 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
772
773 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
774 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
775 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
776 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
777}
778
779
780/**
781 * Makes sure that there is at least one handy page ready for use.
782 *
783 * This will also take the appropriate actions when reaching water-marks.
784 *
785 * @returns VBox status code.
786 * @retval VINF_SUCCESS on success.
787 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
788 *
789 * @param pVM The cross context VM structure.
790 *
791 * @remarks Must be called from within the PGM critical section. It may
792 * nip back to ring-3/0 in some cases.
793 */
794static int pgmPhysEnsureHandyPage(PVMCC pVM)
795{
796 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
797
798 /*
799 * Do we need to do anything special?
800 */
801#ifdef IN_RING3
802 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
803#else
804 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
805#endif
806 {
807 /*
808 * Allocate pages only if we're out of them, or in ring-3, almost out.
809 */
810#ifdef IN_RING3
811 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
812#else
813 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
814#endif
815 {
816 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
817 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
818#ifdef IN_RING3
819 int rc = PGMR3PhysAllocateHandyPages(pVM);
820#else
821 int rc = pgmR0PhysAllocateHandyPages(pVM, VMMGetCpuId(pVM), false /*fRing3*/);
822#endif
823 if (RT_UNLIKELY(rc != VINF_SUCCESS))
824 {
825 if (RT_FAILURE(rc))
826 return rc;
827 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
828 if (!pVM->pgm.s.cHandyPages)
829 {
830 LogRel(("PGM: no more handy pages!\n"));
831 return VERR_EM_NO_MEMORY;
832 }
833 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
834 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
835#ifndef IN_RING3
836 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
837#endif
838 }
839 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
840 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
841 ("%u\n", pVM->pgm.s.cHandyPages),
842 VERR_PGM_HANDY_PAGE_IPE);
843 }
844 else
845 {
846 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
847 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
848#ifndef IN_RING3
849 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
850 {
851 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
852 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
853 }
854#endif
855 }
856 }
857
858 return VINF_SUCCESS;
859}
860
861
862/**
863 * Replace a zero or shared page with new page that we can write to.
864 *
865 * @returns The following VBox status codes.
866 * @retval VINF_SUCCESS on success, pPage is modified.
867 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
868 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
869 *
870 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
871 *
872 * @param pVM The cross context VM structure.
873 * @param pPage The physical page tracking structure. This will
874 * be modified on success.
875 * @param GCPhys The address of the page.
876 *
877 * @remarks Must be called from within the PGM critical section. It may
878 * nip back to ring-3/0 in some cases.
879 *
880 * @remarks This function shouldn't really fail, however if it does
881 * it probably means we've screwed up the size of handy pages and/or
882 * the low-water mark. Or, that some device I/O is causing a lot of
883 * pages to be allocated while while the host is in a low-memory
884 * condition. This latter should be handled elsewhere and in a more
885 * controlled manner, it's on the @bugref{3170} todo list...
886 */
887int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
888{
889 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
890
891 /*
892 * Prereqs.
893 */
894 PGM_LOCK_ASSERT_OWNER(pVM);
895 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
896 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
897
898# ifdef PGM_WITH_LARGE_PAGES
899 /*
900 * Try allocate a large page if applicable.
901 */
902 if ( PGMIsUsingLargePages(pVM)
903 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
904 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
905 {
906 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
907 PPGMPAGE pBasePage;
908
909 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
910 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
911 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
912 {
913 rc = pgmPhysAllocLargePage(pVM, GCPhys);
914 if (rc == VINF_SUCCESS)
915 return rc;
916 }
917 /* Mark the base as type page table, so we don't check over and over again. */
918 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
919
920 /* fall back to 4KB pages. */
921 }
922# endif
923
924 /*
925 * Flush any shadow page table mappings of the page.
926 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
927 */
928 bool fFlushTLBs = false;
929 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
930 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
931
932 /*
933 * Ensure that we've got a page handy, take it and use it.
934 */
935 int rc2 = pgmPhysEnsureHandyPage(pVM);
936 if (RT_FAILURE(rc2))
937 {
938 if (fFlushTLBs)
939 PGM_INVL_ALL_VCPU_TLBS(pVM);
940 Assert(rc2 == VERR_EM_NO_MEMORY);
941 return rc2;
942 }
943 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
944 PGM_LOCK_ASSERT_OWNER(pVM);
945 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
946 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
947
948 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
949 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
950 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_GMMPAGEDESC_PHYS);
951 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
952 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
953 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
954
955 /*
956 * There are one or two action to be taken the next time we allocate handy pages:
957 * - Tell the GMM (global memory manager) what the page is being used for.
958 * (Speeds up replacement operations - sharing and defragmenting.)
959 * - If the current backing is shared, it must be freed.
960 */
961 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
962 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
963
964 void const *pvSharedPage = NULL;
965 if (PGM_PAGE_IS_SHARED(pPage))
966 {
967 /* Mark this shared page for freeing/dereferencing. */
968 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
969 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
970
971 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
972 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
973 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageReplaceShared));
974 pVM->pgm.s.cSharedPages--;
975
976 /* Grab the address of the page so we can make a copy later on. (safe) */
977 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
978 AssertRC(rc);
979 }
980 else
981 {
982 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
983 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero);
984 pVM->pgm.s.cZeroPages--;
985 }
986
987 /*
988 * Do the PGMPAGE modifications.
989 */
990 pVM->pgm.s.cPrivatePages++;
991 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
992 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
993 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
994 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
995 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
996 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
997
998 /* Copy the shared page contents to the replacement page. */
999 if (pvSharedPage)
1000 {
1001 /* Get the virtual address of the new page. */
1002 PGMPAGEMAPLOCK PgMpLck;
1003 void *pvNewPage;
1004 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
1005 if (RT_SUCCESS(rc))
1006 {
1007 memcpy(pvNewPage, pvSharedPage, GUEST_PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
1008 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
1009 }
1010 }
1011
1012 if ( fFlushTLBs
1013 && rc != VINF_PGM_GCPHYS_ALIASED)
1014 PGM_INVL_ALL_VCPU_TLBS(pVM);
1015
1016 /*
1017 * Notify NEM about the mapping change for this page.
1018 *
1019 * Note! Shadow ROM pages are complicated as they can definitely be
1020 * allocated while not visible, so play safe.
1021 */
1022 if (VM_IS_NEM_ENABLED(pVM))
1023 {
1024 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1025 if ( enmType != PGMPAGETYPE_ROM_SHADOW
1026 || pgmPhysGetPage(pVM, GCPhys) == pPage)
1027 {
1028 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1029 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
1030 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1031 if (RT_SUCCESS(rc))
1032 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1033 else
1034 rc = rc2;
1035 }
1036 }
1037
1038 return rc;
1039}
1040
1041#ifdef PGM_WITH_LARGE_PAGES
1042
1043/**
1044 * Replace a 2 MB range of zero pages with new pages that we can write to.
1045 *
1046 * @returns The following VBox status codes.
1047 * @retval VINF_SUCCESS on success, pPage is modified.
1048 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1049 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
1050 *
1051 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
1052 *
1053 * @param pVM The cross context VM structure.
1054 * @param GCPhys The address of the page.
1055 *
1056 * @remarks Must be called from within the PGM critical section. It may block
1057 * on GMM and host mutexes/locks, leaving HM context.
1058 */
1059int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
1060{
1061 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
1062 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
1063 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1064
1065 /*
1066 * Check Prereqs.
1067 */
1068 PGM_LOCK_ASSERT_OWNER(pVM);
1069 Assert(PGMIsUsingLargePages(pVM));
1070
1071 /*
1072 * All the pages must be unallocated RAM pages, i.e. mapping the ZERO page.
1073 */
1074 PPGMPAGE pFirstPage;
1075 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
1076 if ( RT_SUCCESS(rc)
1077 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM
1078 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
1079 {
1080 /*
1081 * Further they should have PDE type set to PGM_PAGE_PDE_TYPE_DONTCARE,
1082 * since they are unallocated.
1083 */
1084 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
1085 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
1086 if (uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE)
1087 {
1088 /*
1089 * Now, make sure all the other pages in the 2 MB is in the same state.
1090 */
1091 GCPhys = GCPhysBase;
1092 unsigned cLeft = _2M / GUEST_PAGE_SIZE;
1093 while (cLeft-- > 0)
1094 {
1095 PPGMPAGE pSubPage = pgmPhysGetPage(pVM, GCPhys);
1096 if ( pSubPage
1097 && PGM_PAGE_GET_TYPE(pSubPage) == PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
1098 && PGM_PAGE_GET_STATE(pSubPage) == PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
1099 {
1100 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
1101 GCPhys += GUEST_PAGE_SIZE;
1102 }
1103 else
1104 {
1105 LogFlow(("pgmPhysAllocLargePage: Found page %RGp with wrong attributes (type=%d; state=%d); cancel check.\n",
1106 GCPhys, pSubPage ? PGM_PAGE_GET_TYPE(pSubPage) : -1, pSubPage ? PGM_PAGE_GET_STATE(pSubPage) : -1));
1107
1108 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
1109 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
1110 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
1111 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1112 }
1113 }
1114
1115 /*
1116 * Do the allocation.
1117 */
1118# ifdef IN_RING3
1119 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhysBase, NULL);
1120# elif defined(IN_RING0)
1121 rc = pgmR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase);
1122# else
1123# error "Port me"
1124# endif
1125 if (RT_SUCCESS(rc))
1126 {
1127 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
1128 pVM->pgm.s.cLargePages++;
1129 return VINF_SUCCESS;
1130 }
1131
1132 /* If we fail once, it most likely means the host's memory is too
1133 fragmented; don't bother trying again. */
1134 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
1135 return rc;
1136 }
1137 }
1138 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1139}
1140
1141
1142/**
1143 * Recheck the entire 2 MB range to see if we can use it again as a large page.
1144 *
1145 * @returns The following VBox status codes.
1146 * @retval VINF_SUCCESS on success, the large page can be used again
1147 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
1148 *
1149 * @param pVM The cross context VM structure.
1150 * @param GCPhys The address of the page.
1151 * @param pLargePage Page structure of the base page
1152 */
1153int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
1154{
1155 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
1156
1157 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
1158
1159 AssertCompile(X86_PDE2M_PAE_PG_MASK == EPT_PDE2M_PG_MASK); /* Paranoia: Caller uses this for guest EPT tables as well. */
1160 GCPhys &= X86_PDE2M_PAE_PG_MASK;
1161
1162 /* Check the base page. */
1163 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
1164 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
1165 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
1166 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1167 {
1168 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
1169 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1170 }
1171
1172 STAM_PROFILE_START(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1173 /* Check all remaining pages in the 2 MB range. */
1174 unsigned i;
1175 GCPhys += GUEST_PAGE_SIZE;
1176 for (i = 1; i < _2M / GUEST_PAGE_SIZE; i++)
1177 {
1178 PPGMPAGE pPage;
1179 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1180 AssertRCBreak(rc);
1181
1182 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1183 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1184 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1185 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1186 {
1187 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
1188 break;
1189 }
1190
1191 GCPhys += GUEST_PAGE_SIZE;
1192 }
1193 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
1194
1195 if (i == _2M / GUEST_PAGE_SIZE)
1196 {
1197 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
1198 pVM->pgm.s.cLargePagesDisabled--;
1199 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
1200 return VINF_SUCCESS;
1201 }
1202
1203 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1204}
1205
1206#endif /* PGM_WITH_LARGE_PAGES */
1207
1208
1209/**
1210 * Deal with a write monitored page.
1211 *
1212 * @returns VBox strict status code.
1213 *
1214 * @param pVM The cross context VM structure.
1215 * @param pPage The physical page tracking structure.
1216 * @param GCPhys The guest physical address of the page.
1217 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
1218 * very unlikely situation where it is okay that we let NEM
1219 * fix the page access in a lazy fasion.
1220 *
1221 * @remarks Called from within the PGM critical section.
1222 */
1223void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1224{
1225 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
1226 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1227 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1228 Assert(pVM->pgm.s.cMonitoredPages > 0);
1229 pVM->pgm.s.cMonitoredPages--;
1230 pVM->pgm.s.cWrittenToPages++;
1231
1232#ifdef VBOX_WITH_NATIVE_NEM
1233 /*
1234 * Notify NEM about the protection change so we won't spin forever.
1235 *
1236 * Note! NEM need to be handle to lazily correct page protection as we cannot
1237 * really get it 100% right here it seems. The page pool does this too.
1238 */
1239 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
1240 {
1241 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1242 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1243 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1244 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1245 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
1246 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1247 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1248 }
1249#else
1250 RT_NOREF(GCPhys);
1251#endif
1252}
1253
1254
1255/**
1256 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
1257 *
1258 * @returns VBox strict status code.
1259 * @retval VINF_SUCCESS on success.
1260 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1261 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1262 *
1263 * @param pVM The cross context VM structure.
1264 * @param pPage The physical page tracking structure.
1265 * @param GCPhys The address of the page.
1266 *
1267 * @remarks Called from within the PGM critical section.
1268 */
1269int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1270{
1271 PGM_LOCK_ASSERT_OWNER(pVM);
1272 switch (PGM_PAGE_GET_STATE(pPage))
1273 {
1274 case PGM_PAGE_STATE_WRITE_MONITORED:
1275 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
1276 RT_FALL_THRU();
1277 default: /* to shut up GCC */
1278 case PGM_PAGE_STATE_ALLOCATED:
1279 return VINF_SUCCESS;
1280
1281 /*
1282 * Zero pages can be dummy pages for MMIO or reserved memory,
1283 * so we need to check the flags before joining cause with
1284 * shared page replacement.
1285 */
1286 case PGM_PAGE_STATE_ZERO:
1287 if (PGM_PAGE_IS_MMIO(pPage))
1288 return VERR_PGM_PHYS_PAGE_RESERVED;
1289 RT_FALL_THRU();
1290 case PGM_PAGE_STATE_SHARED:
1291 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1292
1293 /* Not allowed to write to ballooned pages. */
1294 case PGM_PAGE_STATE_BALLOONED:
1295 return VERR_PGM_PHYS_PAGE_BALLOONED;
1296 }
1297}
1298
1299
1300/**
1301 * Internal usage: Map the page specified by its GMM ID.
1302 *
1303 * This is similar to pgmPhysPageMap
1304 *
1305 * @returns VBox status code.
1306 *
1307 * @param pVM The cross context VM structure.
1308 * @param idPage The Page ID.
1309 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
1310 * @param ppv Where to store the mapping address.
1311 *
1312 * @remarks Called from within the PGM critical section. The mapping is only
1313 * valid while you are inside this section.
1314 */
1315int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1316{
1317 /*
1318 * Validation.
1319 */
1320 PGM_LOCK_ASSERT_OWNER(pVM);
1321 AssertReturn(HCPhys && !(HCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1322 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1323 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1324
1325#ifdef IN_RING0
1326# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1327 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)GUEST_PAGE_OFFSET_MASK, ppv);
1328# else
1329 return GMMR0PageIdToVirt(pVM, idPage, ppv);
1330# endif
1331
1332#else
1333 /*
1334 * Find/make Chunk TLB entry for the mapping chunk.
1335 */
1336 PPGMCHUNKR3MAP pMap;
1337 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1338 if (pTlbe->idChunk == idChunk)
1339 {
1340 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1341 pMap = pTlbe->pChunk;
1342 }
1343 else
1344 {
1345 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1346
1347 /*
1348 * Find the chunk, map it if necessary.
1349 */
1350 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1351 if (pMap)
1352 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1353 else
1354 {
1355 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1356 if (RT_FAILURE(rc))
1357 return rc;
1358 }
1359
1360 /*
1361 * Enter it into the Chunk TLB.
1362 */
1363 pTlbe->idChunk = idChunk;
1364 pTlbe->pChunk = pMap;
1365 }
1366
1367 *ppv = (uint8_t *)pMap->pv + ((idPage & GMM_PAGEID_IDX_MASK) << GUEST_PAGE_SHIFT);
1368 return VINF_SUCCESS;
1369#endif
1370}
1371
1372
1373/**
1374 * Maps a page into the current virtual address space so it can be accessed.
1375 *
1376 * @returns VBox status code.
1377 * @retval VINF_SUCCESS on success.
1378 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1379 *
1380 * @param pVM The cross context VM structure.
1381 * @param pPage The physical page tracking structure.
1382 * @param GCPhys The address of the page.
1383 * @param ppMap Where to store the address of the mapping tracking structure.
1384 * @param ppv Where to store the mapping address of the page. The page
1385 * offset is masked off!
1386 *
1387 * @remarks Called from within the PGM critical section.
1388 */
1389static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1390{
1391 PGM_LOCK_ASSERT_OWNER(pVM);
1392 NOREF(GCPhys);
1393
1394 /*
1395 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1396 */
1397 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1398 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1399 {
1400 /* Decode the page id to a page in a MMIO2 ram range. */
1401 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1402 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1403 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1404 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1405 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1406 pPage->s.idPage, pPage->s.uStateY),
1407 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1408 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1409 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1410 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1411 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> GUEST_PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1412 *ppMap = NULL;
1413# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1414 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1415# elif defined(IN_RING0)
1416 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1417 return VINF_SUCCESS;
1418# else
1419 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
1420 return VINF_SUCCESS;
1421# endif
1422 }
1423
1424# ifdef VBOX_WITH_PGM_NEM_MODE
1425 if (pVM->pgm.s.fNemMode)
1426 {
1427# ifdef IN_RING3
1428 /*
1429 * Find the corresponding RAM range and use that to locate the mapping address.
1430 */
1431 /** @todo Use the page ID for some kind of indexing as we do with MMIO2 above. */
1432 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
1433 AssertLogRelMsgReturn(pRam, ("%RTGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
1434 size_t const idxPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
1435 Assert(pPage == &pRam->aPages[idxPage]);
1436 *ppMap = NULL;
1437 *ppv = (uint8_t *)pRam->pvR3 + (idxPage << GUEST_PAGE_SHIFT);
1438 return VINF_SUCCESS;
1439# else
1440 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
1441# endif
1442 }
1443# endif
1444
1445 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1446 if (idChunk == NIL_GMM_CHUNKID)
1447 {
1448 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1449 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1450 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1451 {
1452 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1453 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1454 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1455 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1456 *ppv = pVM->pgm.s.abZeroPg;
1457 }
1458 else
1459 *ppv = pVM->pgm.s.abZeroPg;
1460 *ppMap = NULL;
1461 return VINF_SUCCESS;
1462 }
1463
1464# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1465 /*
1466 * Just use the physical address.
1467 */
1468 *ppMap = NULL;
1469 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1470
1471# elif defined(IN_RING0)
1472 /*
1473 * Go by page ID thru GMMR0.
1474 */
1475 *ppMap = NULL;
1476 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
1477
1478# else
1479 /*
1480 * Find/make Chunk TLB entry for the mapping chunk.
1481 */
1482 PPGMCHUNKR3MAP pMap;
1483 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1484 if (pTlbe->idChunk == idChunk)
1485 {
1486 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1487 pMap = pTlbe->pChunk;
1488 AssertPtr(pMap->pv);
1489 }
1490 else
1491 {
1492 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1493
1494 /*
1495 * Find the chunk, map it if necessary.
1496 */
1497 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1498 if (pMap)
1499 {
1500 AssertPtr(pMap->pv);
1501 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1502 }
1503 else
1504 {
1505 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1506 if (RT_FAILURE(rc))
1507 return rc;
1508 AssertPtr(pMap->pv);
1509 }
1510
1511 /*
1512 * Enter it into the Chunk TLB.
1513 */
1514 pTlbe->idChunk = idChunk;
1515 pTlbe->pChunk = pMap;
1516 }
1517
1518 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << GUEST_PAGE_SHIFT);
1519 *ppMap = pMap;
1520 return VINF_SUCCESS;
1521# endif /* !IN_RING0 */
1522}
1523
1524
1525/**
1526 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1527 *
1528 * This is typically used is paths where we cannot use the TLB methods (like ROM
1529 * pages) or where there is no point in using them since we won't get many hits.
1530 *
1531 * @returns VBox strict status code.
1532 * @retval VINF_SUCCESS on success.
1533 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1534 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1535 *
1536 * @param pVM The cross context VM structure.
1537 * @param pPage The physical page tracking structure.
1538 * @param GCPhys The address of the page.
1539 * @param ppv Where to store the mapping address of the page. The page
1540 * offset is masked off!
1541 *
1542 * @remarks Called from within the PGM critical section. The mapping is only
1543 * valid while you are inside section.
1544 */
1545int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1546{
1547 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1548 if (RT_SUCCESS(rc))
1549 {
1550 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1551 PPGMPAGEMAP pMapIgnore;
1552 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1553 if (RT_FAILURE(rc2)) /* preserve rc */
1554 rc = rc2;
1555 }
1556 return rc;
1557}
1558
1559
1560/**
1561 * Maps a page into the current virtual address space so it can be accessed for
1562 * both writing and reading.
1563 *
1564 * This is typically used is paths where we cannot use the TLB methods (like ROM
1565 * pages) or where there is no point in using them since we won't get many hits.
1566 *
1567 * @returns VBox status code.
1568 * @retval VINF_SUCCESS on success.
1569 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1570 *
1571 * @param pVM The cross context VM structure.
1572 * @param pPage The physical page tracking structure. Must be in the
1573 * allocated state.
1574 * @param GCPhys The address of the page.
1575 * @param ppv Where to store the mapping address of the page. The page
1576 * offset is masked off!
1577 *
1578 * @remarks Called from within the PGM critical section. The mapping is only
1579 * valid while you are inside section.
1580 */
1581int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1582{
1583 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1584 PPGMPAGEMAP pMapIgnore;
1585 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1586}
1587
1588
1589/**
1590 * Maps a page into the current virtual address space so it can be accessed for
1591 * reading.
1592 *
1593 * This is typically used is paths where we cannot use the TLB methods (like ROM
1594 * pages) or where there is no point in using them since we won't get many hits.
1595 *
1596 * @returns VBox status code.
1597 * @retval VINF_SUCCESS on success.
1598 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1599 *
1600 * @param pVM The cross context VM structure.
1601 * @param pPage The physical page tracking structure.
1602 * @param GCPhys The address of the page.
1603 * @param ppv Where to store the mapping address of the page. The page
1604 * offset is masked off!
1605 *
1606 * @remarks Called from within the PGM critical section. The mapping is only
1607 * valid while you are inside this section.
1608 */
1609int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1610{
1611 PPGMPAGEMAP pMapIgnore;
1612 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1613}
1614
1615
1616/**
1617 * Load a guest page into the ring-3 physical TLB.
1618 *
1619 * @returns VBox status code.
1620 * @retval VINF_SUCCESS on success
1621 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1622 * @param pVM The cross context VM structure.
1623 * @param GCPhys The guest physical address in question.
1624 */
1625int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
1626{
1627 PGM_LOCK_ASSERT_OWNER(pVM);
1628
1629 /*
1630 * Find the ram range and page and hand it over to the with-page function.
1631 * 99.8% of requests are expected to be in the first range.
1632 */
1633 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1634 if (!pPage)
1635 {
1636 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1637 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1638 }
1639
1640 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1641}
1642
1643
1644/**
1645 * Load a guest page into the ring-3 physical TLB.
1646 *
1647 * @returns VBox status code.
1648 * @retval VINF_SUCCESS on success
1649 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1650 *
1651 * @param pVM The cross context VM structure.
1652 * @param pPage Pointer to the PGMPAGE structure corresponding to
1653 * GCPhys.
1654 * @param GCPhys The guest physical address in question.
1655 */
1656int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1657{
1658 PGM_LOCK_ASSERT_OWNER(pVM);
1659 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
1660
1661 /*
1662 * Map the page.
1663 * Make a special case for the zero page as it is kind of special.
1664 */
1665 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1666 if ( !PGM_PAGE_IS_ZERO(pPage)
1667 && !PGM_PAGE_IS_BALLOONED(pPage))
1668 {
1669 void *pv;
1670 PPGMPAGEMAP pMap;
1671 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1672 if (RT_FAILURE(rc))
1673 return rc;
1674# ifndef IN_RING0
1675 pTlbe->pMap = pMap;
1676# endif
1677 pTlbe->pv = pv;
1678 Assert(!((uintptr_t)pTlbe->pv & GUEST_PAGE_OFFSET_MASK));
1679 }
1680 else
1681 {
1682 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1683# ifndef IN_RING0
1684 pTlbe->pMap = NULL;
1685# endif
1686 pTlbe->pv = pVM->pgm.s.abZeroPg;
1687 }
1688# ifdef PGM_WITH_PHYS_TLB
1689 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1690 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1691 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1692 else
1693 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1694# else
1695 pTlbe->GCPhys = NIL_RTGCPHYS;
1696# endif
1697 pTlbe->pPage = pPage;
1698 return VINF_SUCCESS;
1699}
1700
1701
1702/**
1703 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1704 * own the PGM lock and therefore not need to lock the mapped page.
1705 *
1706 * @returns VBox status code.
1707 * @retval VINF_SUCCESS on success.
1708 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1709 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1710 *
1711 * @param pVM The cross context VM structure.
1712 * @param GCPhys The guest physical address of the page that should be mapped.
1713 * @param pPage Pointer to the PGMPAGE structure for the page.
1714 * @param ppv Where to store the address corresponding to GCPhys.
1715 *
1716 * @internal
1717 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1718 */
1719int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1720{
1721 int rc;
1722 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1723 PGM_LOCK_ASSERT_OWNER(pVM);
1724 pVM->pgm.s.cDeprecatedPageLocks++;
1725
1726 /*
1727 * Make sure the page is writable.
1728 */
1729 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1730 {
1731 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1732 if (RT_FAILURE(rc))
1733 return rc;
1734 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1735 }
1736 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1737
1738 /*
1739 * Get the mapping address.
1740 */
1741 PPGMPAGEMAPTLBE pTlbe;
1742 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1743 if (RT_FAILURE(rc))
1744 return rc;
1745 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1746 return VINF_SUCCESS;
1747}
1748
1749
1750/**
1751 * Locks a page mapping for writing.
1752 *
1753 * @param pVM The cross context VM structure.
1754 * @param pPage The page.
1755 * @param pTlbe The mapping TLB entry for the page.
1756 * @param pLock The lock structure (output).
1757 */
1758DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1759{
1760# ifndef IN_RING0
1761 PPGMPAGEMAP pMap = pTlbe->pMap;
1762 if (pMap)
1763 pMap->cRefs++;
1764# else
1765 RT_NOREF(pTlbe);
1766# endif
1767
1768 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1769 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1770 {
1771 if (cLocks == 0)
1772 pVM->pgm.s.cWriteLockedPages++;
1773 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1774 }
1775 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1776 {
1777 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1778 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1779# ifndef IN_RING0
1780 if (pMap)
1781 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1782# endif
1783 }
1784
1785 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1786# ifndef IN_RING0
1787 pLock->pvMap = pMap;
1788# else
1789 pLock->pvMap = NULL;
1790# endif
1791}
1792
1793/**
1794 * Locks a page mapping for reading.
1795 *
1796 * @param pVM The cross context VM structure.
1797 * @param pPage The page.
1798 * @param pTlbe The mapping TLB entry for the page.
1799 * @param pLock The lock structure (output).
1800 */
1801DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1802{
1803# ifndef IN_RING0
1804 PPGMPAGEMAP pMap = pTlbe->pMap;
1805 if (pMap)
1806 pMap->cRefs++;
1807# else
1808 RT_NOREF(pTlbe);
1809# endif
1810
1811 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1812 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1813 {
1814 if (cLocks == 0)
1815 pVM->pgm.s.cReadLockedPages++;
1816 PGM_PAGE_INC_READ_LOCKS(pPage);
1817 }
1818 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1819 {
1820 PGM_PAGE_INC_READ_LOCKS(pPage);
1821 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1822# ifndef IN_RING0
1823 if (pMap)
1824 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1825# endif
1826 }
1827
1828 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1829# ifndef IN_RING0
1830 pLock->pvMap = pMap;
1831# else
1832 pLock->pvMap = NULL;
1833# endif
1834}
1835
1836
1837/**
1838 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1839 * own the PGM lock and have access to the page structure.
1840 *
1841 * @returns VBox status code.
1842 * @retval VINF_SUCCESS on success.
1843 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1844 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1845 *
1846 * @param pVM The cross context VM structure.
1847 * @param GCPhys The guest physical address of the page that should be mapped.
1848 * @param pPage Pointer to the PGMPAGE structure for the page.
1849 * @param ppv Where to store the address corresponding to GCPhys.
1850 * @param pLock Where to store the lock information that
1851 * pgmPhysReleaseInternalPageMappingLock needs.
1852 *
1853 * @internal
1854 */
1855int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1856{
1857 int rc;
1858 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1859 PGM_LOCK_ASSERT_OWNER(pVM);
1860
1861 /*
1862 * Make sure the page is writable.
1863 */
1864 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1865 {
1866 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1867 if (RT_FAILURE(rc))
1868 return rc;
1869 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1870 }
1871 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1872
1873 /*
1874 * Do the job.
1875 */
1876 PPGMPAGEMAPTLBE pTlbe;
1877 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1878 if (RT_FAILURE(rc))
1879 return rc;
1880 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1881 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1882 return VINF_SUCCESS;
1883}
1884
1885
1886/**
1887 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1888 * own the PGM lock and have access to the page structure.
1889 *
1890 * @returns VBox status code.
1891 * @retval VINF_SUCCESS on success.
1892 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1893 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1894 *
1895 * @param pVM The cross context VM structure.
1896 * @param GCPhys The guest physical address of the page that should be mapped.
1897 * @param pPage Pointer to the PGMPAGE structure for the page.
1898 * @param ppv Where to store the address corresponding to GCPhys.
1899 * @param pLock Where to store the lock information that
1900 * pgmPhysReleaseInternalPageMappingLock needs.
1901 *
1902 * @internal
1903 */
1904int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1905{
1906 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1907 PGM_LOCK_ASSERT_OWNER(pVM);
1908 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1909
1910 /*
1911 * Do the job.
1912 */
1913 PPGMPAGEMAPTLBE pTlbe;
1914 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1915 if (RT_FAILURE(rc))
1916 return rc;
1917 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1918 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1919 return VINF_SUCCESS;
1920}
1921
1922
1923/**
1924 * Requests the mapping of a guest page into the current context.
1925 *
1926 * This API should only be used for very short term, as it will consume scarse
1927 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1928 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1929 *
1930 * This API will assume your intention is to write to the page, and will
1931 * therefore replace shared and zero pages. If you do not intend to modify
1932 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1933 *
1934 * @returns VBox status code.
1935 * @retval VINF_SUCCESS on success.
1936 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1937 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1938 *
1939 * @param pVM The cross context VM structure.
1940 * @param GCPhys The guest physical address of the page that should be
1941 * mapped.
1942 * @param ppv Where to store the address corresponding to GCPhys.
1943 * @param pLock Where to store the lock information that
1944 * PGMPhysReleasePageMappingLock needs.
1945 *
1946 * @remarks The caller is responsible for dealing with access handlers.
1947 * @todo Add an informational return code for pages with access handlers?
1948 *
1949 * @remark Avoid calling this API from within critical sections (other than
1950 * the PGM one) because of the deadlock risk. External threads may
1951 * need to delegate jobs to the EMTs.
1952 * @remarks Only one page is mapped! Make no assumption about what's after or
1953 * before the returned page!
1954 * @thread Any thread.
1955 */
1956VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1957{
1958 int rc = PGM_LOCK(pVM);
1959 AssertRCReturn(rc, rc);
1960
1961 /*
1962 * Query the Physical TLB entry for the page (may fail).
1963 */
1964 PPGMPAGEMAPTLBE pTlbe;
1965 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1966 if (RT_SUCCESS(rc))
1967 {
1968 /*
1969 * If the page is shared, the zero page, or being write monitored
1970 * it must be converted to a page that's writable if possible.
1971 */
1972 PPGMPAGE pPage = pTlbe->pPage;
1973 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1974 {
1975 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1976 if (RT_SUCCESS(rc))
1977 {
1978 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1979 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1980 }
1981 }
1982 if (RT_SUCCESS(rc))
1983 {
1984 /*
1985 * Now, just perform the locking and calculate the return address.
1986 */
1987 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1988 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
1989 }
1990 }
1991
1992 PGM_UNLOCK(pVM);
1993 return rc;
1994}
1995
1996
1997/**
1998 * Requests the mapping of a guest page into the current context.
1999 *
2000 * This API should only be used for very short term, as it will consume scarse
2001 * resources (R0 and GC) in the mapping cache. When you're done with the page,
2002 * call PGMPhysReleasePageMappingLock() ASAP to release it.
2003 *
2004 * @returns VBox status code.
2005 * @retval VINF_SUCCESS on success.
2006 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2007 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2008 *
2009 * @param pVM The cross context VM structure.
2010 * @param GCPhys The guest physical address of the page that should be
2011 * mapped.
2012 * @param ppv Where to store the address corresponding to GCPhys.
2013 * @param pLock Where to store the lock information that
2014 * PGMPhysReleasePageMappingLock needs.
2015 *
2016 * @remarks The caller is responsible for dealing with access handlers.
2017 * @todo Add an informational return code for pages with access handlers?
2018 *
2019 * @remarks Avoid calling this API from within critical sections (other than
2020 * the PGM one) because of the deadlock risk.
2021 * @remarks Only one page is mapped! Make no assumption about what's after or
2022 * before the returned page!
2023 * @thread Any thread.
2024 */
2025VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
2026{
2027 int rc = PGM_LOCK(pVM);
2028 AssertRCReturn(rc, rc);
2029
2030 /*
2031 * Query the Physical TLB entry for the page (may fail).
2032 */
2033 PPGMPAGEMAPTLBE pTlbe;
2034 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
2035 if (RT_SUCCESS(rc))
2036 {
2037 /* MMIO pages doesn't have any readable backing. */
2038 PPGMPAGE pPage = pTlbe->pPage;
2039 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
2040 rc = VERR_PGM_PHYS_PAGE_RESERVED;
2041 else
2042 {
2043 /*
2044 * Now, just perform the locking and calculate the return address.
2045 */
2046 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
2047 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
2048 }
2049 }
2050
2051 PGM_UNLOCK(pVM);
2052 return rc;
2053}
2054
2055
2056/**
2057 * Requests the mapping of a guest page given by virtual address into the current context.
2058 *
2059 * This API should only be used for very short term, as it will consume
2060 * scarse resources (R0 and GC) in the mapping cache. When you're done
2061 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2062 *
2063 * This API will assume your intention is to write to the page, and will
2064 * therefore replace shared and zero pages. If you do not intend to modify
2065 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
2066 *
2067 * @returns VBox status code.
2068 * @retval VINF_SUCCESS on success.
2069 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2070 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2071 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2072 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2073 *
2074 * @param pVCpu The cross context virtual CPU structure.
2075 * @param GCPtr The guest physical address of the page that should be
2076 * mapped.
2077 * @param ppv Where to store the address corresponding to GCPhys.
2078 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2079 *
2080 * @remark Avoid calling this API from within critical sections (other than
2081 * the PGM one) because of the deadlock risk.
2082 * @thread EMT
2083 */
2084VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
2085{
2086 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2087 RTGCPHYS GCPhys;
2088 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2089 if (RT_SUCCESS(rc))
2090 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2091 return rc;
2092}
2093
2094
2095/**
2096 * Requests the mapping of a guest page given by virtual address into the current context.
2097 *
2098 * This API should only be used for very short term, as it will consume
2099 * scarse resources (R0 and GC) in the mapping cache. When you're done
2100 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2101 *
2102 * @returns VBox status code.
2103 * @retval VINF_SUCCESS on success.
2104 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2105 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2106 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2107 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2108 *
2109 * @param pVCpu The cross context virtual CPU structure.
2110 * @param GCPtr The guest physical address of the page that should be
2111 * mapped.
2112 * @param ppv Where to store the address corresponding to GCPtr.
2113 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2114 *
2115 * @remark Avoid calling this API from within critical sections (other than
2116 * the PGM one) because of the deadlock risk.
2117 * @thread EMT
2118 */
2119VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
2120{
2121 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2122 RTGCPHYS GCPhys;
2123 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2124 if (RT_SUCCESS(rc))
2125 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2126 return rc;
2127}
2128
2129
2130/**
2131 * Release the mapping of a guest page.
2132 *
2133 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
2134 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
2135 *
2136 * @param pVM The cross context VM structure.
2137 * @param pLock The lock structure initialized by the mapping function.
2138 */
2139VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2140{
2141# ifndef IN_RING0
2142 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
2143# endif
2144 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2145 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2146
2147 pLock->uPageAndType = 0;
2148 pLock->pvMap = NULL;
2149
2150 PGM_LOCK_VOID(pVM);
2151 if (fWriteLock)
2152 {
2153 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2154 Assert(cLocks > 0);
2155 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2156 {
2157 if (cLocks == 1)
2158 {
2159 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2160 pVM->pgm.s.cWriteLockedPages--;
2161 }
2162 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2163 }
2164
2165 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2166 { /* probably extremely likely */ }
2167 else
2168 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2169 }
2170 else
2171 {
2172 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2173 Assert(cLocks > 0);
2174 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2175 {
2176 if (cLocks == 1)
2177 {
2178 Assert(pVM->pgm.s.cReadLockedPages > 0);
2179 pVM->pgm.s.cReadLockedPages--;
2180 }
2181 PGM_PAGE_DEC_READ_LOCKS(pPage);
2182 }
2183 }
2184
2185# ifndef IN_RING0
2186 if (pMap)
2187 {
2188 Assert(pMap->cRefs >= 1);
2189 pMap->cRefs--;
2190 }
2191# endif
2192 PGM_UNLOCK(pVM);
2193}
2194
2195
2196#ifdef IN_RING3
2197/**
2198 * Release the mapping of multiple guest pages.
2199 *
2200 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
2201 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
2202 *
2203 * @param pVM The cross context VM structure.
2204 * @param cPages Number of pages to unlock.
2205 * @param paLocks Array of locks lock structure initialized by the mapping
2206 * function.
2207 */
2208VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
2209{
2210 Assert(cPages > 0);
2211 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2212#ifdef VBOX_STRICT
2213 for (uint32_t i = 1; i < cPages; i++)
2214 {
2215 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
2216 AssertPtr(paLocks[i].uPageAndType);
2217 }
2218#endif
2219
2220 PGM_LOCK_VOID(pVM);
2221 if (fWriteLock)
2222 {
2223 /*
2224 * Write locks:
2225 */
2226 for (uint32_t i = 0; i < cPages; i++)
2227 {
2228 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2229 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2230 Assert(cLocks > 0);
2231 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2232 {
2233 if (cLocks == 1)
2234 {
2235 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2236 pVM->pgm.s.cWriteLockedPages--;
2237 }
2238 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2239 }
2240
2241 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2242 { /* probably extremely likely */ }
2243 else
2244 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2245
2246 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2247 if (pMap)
2248 {
2249 Assert(pMap->cRefs >= 1);
2250 pMap->cRefs--;
2251 }
2252
2253 /* Yield the lock: */
2254 if ((i & 1023) == 1023 && i + 1 < cPages)
2255 {
2256 PGM_UNLOCK(pVM);
2257 PGM_LOCK_VOID(pVM);
2258 }
2259 }
2260 }
2261 else
2262 {
2263 /*
2264 * Read locks:
2265 */
2266 for (uint32_t i = 0; i < cPages; i++)
2267 {
2268 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2269 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2270 Assert(cLocks > 0);
2271 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2272 {
2273 if (cLocks == 1)
2274 {
2275 Assert(pVM->pgm.s.cReadLockedPages > 0);
2276 pVM->pgm.s.cReadLockedPages--;
2277 }
2278 PGM_PAGE_DEC_READ_LOCKS(pPage);
2279 }
2280
2281 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2282 if (pMap)
2283 {
2284 Assert(pMap->cRefs >= 1);
2285 pMap->cRefs--;
2286 }
2287
2288 /* Yield the lock: */
2289 if ((i & 1023) == 1023 && i + 1 < cPages)
2290 {
2291 PGM_UNLOCK(pVM);
2292 PGM_LOCK_VOID(pVM);
2293 }
2294 }
2295 }
2296 PGM_UNLOCK(pVM);
2297
2298 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
2299}
2300#endif /* IN_RING3 */
2301
2302
2303/**
2304 * Release the internal mapping of a guest page.
2305 *
2306 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2307 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2308 *
2309 * @param pVM The cross context VM structure.
2310 * @param pLock The lock structure initialized by the mapping function.
2311 *
2312 * @remarks Caller must hold the PGM lock.
2313 */
2314void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2315{
2316 PGM_LOCK_ASSERT_OWNER(pVM);
2317 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2318}
2319
2320
2321/**
2322 * Converts a GC physical address to a HC ring-3 pointer.
2323 *
2324 * @returns VINF_SUCCESS on success.
2325 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2326 * page but has no physical backing.
2327 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2328 * GC physical address.
2329 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2330 * a dynamic ram chunk boundary
2331 *
2332 * @param pVM The cross context VM structure.
2333 * @param GCPhys The GC physical address to convert.
2334 * @param pR3Ptr Where to store the R3 pointer on success.
2335 *
2336 * @deprecated Avoid when possible!
2337 */
2338int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2339{
2340/** @todo this is kind of hacky and needs some more work. */
2341#ifndef DEBUG_sandervl
2342 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2343#endif
2344
2345 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2346 PGM_LOCK_VOID(pVM);
2347
2348 PPGMRAMRANGE pRam;
2349 PPGMPAGE pPage;
2350 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2351 if (RT_SUCCESS(rc))
2352 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2353
2354 PGM_UNLOCK(pVM);
2355 Assert(rc <= VINF_SUCCESS);
2356 return rc;
2357}
2358
2359
2360/**
2361 * Converts a guest pointer to a GC physical address.
2362 *
2363 * This uses the current CR3/CR0/CR4 of the guest.
2364 *
2365 * @returns VBox status code.
2366 * @param pVCpu The cross context virtual CPU structure.
2367 * @param GCPtr The guest pointer to convert.
2368 * @param pGCPhys Where to store the GC physical address.
2369 */
2370VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2371{
2372 PGMPTWALK Walk;
2373 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2374 if (pGCPhys && RT_SUCCESS(rc))
2375 *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK);
2376 return rc;
2377}
2378
2379
2380/**
2381 * Converts a guest pointer to a HC physical address.
2382 *
2383 * This uses the current CR3/CR0/CR4 of the guest.
2384 *
2385 * @returns VBox status code.
2386 * @param pVCpu The cross context virtual CPU structure.
2387 * @param GCPtr The guest pointer to convert.
2388 * @param pHCPhys Where to store the HC physical address.
2389 */
2390VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2391{
2392 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2393 PGMPTWALK Walk;
2394 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
2395 if (RT_SUCCESS(rc))
2396 rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK), pHCPhys);
2397 return rc;
2398}
2399
2400
2401
2402#undef LOG_GROUP
2403#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2404
2405
2406#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2407/**
2408 * Cache PGMPhys memory access
2409 *
2410 * @param pVM The cross context VM structure.
2411 * @param pCache Cache structure pointer
2412 * @param GCPhys GC physical address
2413 * @param pbR3 HC pointer corresponding to physical page
2414 *
2415 * @thread EMT.
2416 */
2417static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2418{
2419 uint32_t iCacheIndex;
2420
2421 Assert(VM_IS_EMT(pVM));
2422
2423 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
2424 pbR3 = (uint8_t *)((uintptr_t)pbR3 & ~(uintptr_t)GUEST_PAGE_OFFSET_MASK);
2425
2426 iCacheIndex = ((GCPhys >> GUEST_PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2427
2428 ASMBitSet(&pCache->aEntries, iCacheIndex);
2429
2430 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2431 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2432}
2433#endif /* IN_RING3 */
2434
2435
2436/**
2437 * Deals with reading from a page with one or more ALL access handlers.
2438 *
2439 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2440 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2441 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2442 *
2443 * @param pVM The cross context VM structure.
2444 * @param pPage The page descriptor.
2445 * @param GCPhys The physical address to start reading at.
2446 * @param pvBuf Where to put the bits we read.
2447 * @param cb How much to read - less or equal to a page.
2448 * @param enmOrigin The origin of this call.
2449 */
2450static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2451 PGMACCESSORIGIN enmOrigin)
2452{
2453 /*
2454 * The most frequent access here is MMIO and shadowed ROM.
2455 * The current code ASSUMES all these access handlers covers full pages!
2456 */
2457
2458 /*
2459 * Whatever we do we need the source page, map it first.
2460 */
2461 PGMPAGEMAPLOCK PgMpLck;
2462 const void *pvSrc = NULL;
2463 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2464/** @todo Check how this can work for MMIO pages? */
2465 if (RT_FAILURE(rc))
2466 {
2467 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2468 GCPhys, pPage, rc));
2469 memset(pvBuf, 0xff, cb);
2470 return VINF_SUCCESS;
2471 }
2472
2473 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2474
2475 /*
2476 * Deal with any physical handlers.
2477 */
2478 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2479 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2480 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2481 {
2482 PPGMPHYSHANDLER pCur;
2483 rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2484 if (RT_SUCCESS(rc))
2485 {
2486 Assert(pCur && GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2487 Assert((pCur->Key & GUEST_PAGE_OFFSET_MASK) == 0);
2488 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
2489#ifndef IN_RING3
2490 if (enmOrigin != PGMACCESSORIGIN_IEM)
2491 {
2492 /* Cannot reliably handle informational status codes in this context */
2493 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2494 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2495 }
2496#endif
2497 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2498 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler; Assert(pfnHandler);
2499 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2500 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2501
2502 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pCur->pszDesc) ));
2503 STAM_PROFILE_START(&pCur->Stat, h);
2504 PGM_LOCK_ASSERT_OWNER(pVM);
2505
2506 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2507 PGM_UNLOCK(pVM);
2508 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, uUser);
2509 PGM_LOCK_VOID(pVM);
2510
2511 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2512 pCur = NULL; /* might not be valid anymore. */
2513 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
2514 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2515 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
2516 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2517 {
2518 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2519 return rcStrict;
2520 }
2521 }
2522 else if (rc == VERR_NOT_FOUND)
2523 AssertLogRelMsgFailed(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb));
2524 else
2525 AssertLogRelMsgFailedReturn(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb), rc);
2526 }
2527
2528 /*
2529 * Take the default action.
2530 */
2531 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2532 {
2533 memcpy(pvBuf, pvSrc, cb);
2534 rcStrict = VINF_SUCCESS;
2535 }
2536 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2537 return rcStrict;
2538}
2539
2540
2541/**
2542 * Read physical memory.
2543 *
2544 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2545 * want to ignore those.
2546 *
2547 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2548 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2549 * @retval VINF_SUCCESS in all context - read completed.
2550 *
2551 * @retval VINF_EM_OFF in RC and R0 - read completed.
2552 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
2553 * @retval VINF_EM_RESET in RC and R0 - read completed.
2554 * @retval VINF_EM_HALT in RC and R0 - read completed.
2555 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
2556 *
2557 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
2558 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
2559 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2560 *
2561 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
2562 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2563 *
2564 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2565 *
2566 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2567 * haven't been cleared for strict status codes yet.
2568 *
2569 * @param pVM The cross context VM structure.
2570 * @param GCPhys Physical address start reading from.
2571 * @param pvBuf Where to put the read bits.
2572 * @param cbRead How many bytes to read.
2573 * @param enmOrigin The origin of this call.
2574 */
2575VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2576{
2577 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2578 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2579
2580 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysRead));
2581 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2582
2583 PGM_LOCK_VOID(pVM);
2584
2585 /*
2586 * Copy loop on ram ranges.
2587 */
2588 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2589 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2590 for (;;)
2591 {
2592 /* Inside range or not? */
2593 if (pRam && GCPhys >= pRam->GCPhys)
2594 {
2595 /*
2596 * Must work our way thru this page by page.
2597 */
2598 RTGCPHYS off = GCPhys - pRam->GCPhys;
2599 while (off < pRam->cb)
2600 {
2601 unsigned iPage = off >> GUEST_PAGE_SHIFT;
2602 PPGMPAGE pPage = &pRam->aPages[iPage];
2603 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
2604 if (cb > cbRead)
2605 cb = cbRead;
2606
2607 /*
2608 * Normal page? Get the pointer to it.
2609 */
2610 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2611 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2612 {
2613 /*
2614 * Get the pointer to the page.
2615 */
2616 PGMPAGEMAPLOCK PgMpLck;
2617 const void *pvSrc;
2618 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2619 if (RT_SUCCESS(rc))
2620 {
2621 memcpy(pvBuf, pvSrc, cb);
2622 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2623 }
2624 else
2625 {
2626 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2627 pRam->GCPhys + off, pPage, rc));
2628 memset(pvBuf, 0xff, cb);
2629 }
2630 }
2631 /*
2632 * Have ALL/MMIO access handlers.
2633 */
2634 else
2635 {
2636 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2637 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2638 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2639 else
2640 {
2641 memset(pvBuf, 0xff, cb);
2642 PGM_UNLOCK(pVM);
2643 return rcStrict2;
2644 }
2645 }
2646
2647 /* next page */
2648 if (cb >= cbRead)
2649 {
2650 PGM_UNLOCK(pVM);
2651 return rcStrict;
2652 }
2653 cbRead -= cb;
2654 off += cb;
2655 pvBuf = (char *)pvBuf + cb;
2656 } /* walk pages in ram range. */
2657
2658 GCPhys = pRam->GCPhysLast + 1;
2659 }
2660 else
2661 {
2662 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2663
2664 /*
2665 * Unassigned address space.
2666 */
2667 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2668 if (cb >= cbRead)
2669 {
2670 memset(pvBuf, 0xff, cbRead);
2671 break;
2672 }
2673 memset(pvBuf, 0xff, cb);
2674
2675 cbRead -= cb;
2676 pvBuf = (char *)pvBuf + cb;
2677 GCPhys += cb;
2678 }
2679
2680 /* Advance range if necessary. */
2681 while (pRam && GCPhys > pRam->GCPhysLast)
2682 pRam = pRam->CTX_SUFF(pNext);
2683 } /* Ram range walk */
2684
2685 PGM_UNLOCK(pVM);
2686 return rcStrict;
2687}
2688
2689
2690/**
2691 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2692 *
2693 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2694 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2695 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2696 *
2697 * @param pVM The cross context VM structure.
2698 * @param pPage The page descriptor.
2699 * @param GCPhys The physical address to start writing at.
2700 * @param pvBuf What to write.
2701 * @param cbWrite How much to write - less or equal to a page.
2702 * @param enmOrigin The origin of this call.
2703 */
2704static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2705 PGMACCESSORIGIN enmOrigin)
2706{
2707 PGMPAGEMAPLOCK PgMpLck;
2708 void *pvDst = NULL;
2709 VBOXSTRICTRC rcStrict;
2710
2711 /*
2712 * Give priority to physical handlers (like #PF does).
2713 *
2714 * Hope for a lonely physical handler first that covers the whole write
2715 * area. This should be a pretty frequent case with MMIO and the heavy
2716 * usage of full page handlers in the page pool.
2717 */
2718 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2719 PPGMPHYSHANDLER pCur;
2720 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2721 if (RT_SUCCESS(rcStrict))
2722 {
2723 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
2724#ifndef IN_RING3
2725 if (enmOrigin != PGMACCESSORIGIN_IEM)
2726 /* Cannot reliably handle informational status codes in this context */
2727 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2728#endif
2729 size_t cbRange = pCur->KeyLast - GCPhys + 1;
2730 if (cbRange > cbWrite)
2731 cbRange = cbWrite;
2732
2733 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->pfnHandler);
2734 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2735 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2736 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2737 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2738 else
2739 rcStrict = VINF_SUCCESS;
2740 if (RT_SUCCESS(rcStrict))
2741 {
2742 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2743 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2744 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
2745 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
2746 STAM_PROFILE_START(&pCur->Stat, h);
2747
2748 /* Most handlers will want to release the PGM lock for deadlock prevention
2749 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2750 dirty page trackers will want to keep it for performance reasons. */
2751 PGM_LOCK_ASSERT_OWNER(pVM);
2752 if (pCurType->fKeepPgmLock)
2753 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2754 else
2755 {
2756 PGM_UNLOCK(pVM);
2757 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2758 PGM_LOCK_VOID(pVM);
2759 }
2760
2761 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2762 pCur = NULL; /* might not be valid anymore. */
2763 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2764 {
2765 if (pvDst)
2766 memcpy(pvDst, pvBuf, cbRange);
2767 rcStrict = VINF_SUCCESS;
2768 }
2769 else
2770 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
2771 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2772 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
2773 }
2774 else
2775 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2776 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2777 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2778 {
2779 if (pvDst)
2780 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2781 return rcStrict;
2782 }
2783
2784 /* more fun to be had below */
2785 cbWrite -= cbRange;
2786 GCPhys += cbRange;
2787 pvBuf = (uint8_t *)pvBuf + cbRange;
2788 pvDst = (uint8_t *)pvDst + cbRange;
2789 }
2790 else if (rcStrict == VERR_NOT_FOUND) /* The handler is somewhere else in the page, deal with it below. */
2791 rcStrict = VINF_SUCCESS;
2792 else
2793 AssertMsgFailedReturn(("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2794 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all GUEST_PAGE_SIZEed! */
2795
2796 /*
2797 * Deal with all the odd ends (used to be deal with virt+phys).
2798 */
2799 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
2800
2801 /* We need a writable destination page. */
2802 if (!pvDst)
2803 {
2804 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2805 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
2806 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
2807 rc2);
2808 }
2809
2810 /** @todo clean up this code some more now there are no virtual handlers any
2811 * more. */
2812 /* The loop state (big + ugly). */
2813 PPGMPHYSHANDLER pPhys = NULL;
2814 uint32_t offPhys = GUEST_PAGE_SIZE;
2815 uint32_t offPhysLast = GUEST_PAGE_SIZE;
2816 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2817
2818 /* The loop. */
2819 for (;;)
2820 {
2821 if (fMorePhys && !pPhys)
2822 {
2823 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pPhys);
2824 if (RT_SUCCESS_NP(rcStrict))
2825 {
2826 offPhys = 0;
2827 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2828 }
2829 else
2830 {
2831 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2832
2833 rcStrict = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2834 GCPhys, &pPhys);
2835 AssertMsgReturn(RT_SUCCESS(rcStrict) || rcStrict == VERR_NOT_FOUND,
2836 ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
2837
2838 if ( RT_SUCCESS(rcStrict)
2839 && pPhys->Key <= GCPhys + (cbWrite - 1))
2840 {
2841 offPhys = pPhys->Key - GCPhys;
2842 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2843 Assert(pPhys->KeyLast - pPhys->Key < _4G);
2844 }
2845 else
2846 {
2847 pPhys = NULL;
2848 fMorePhys = false;
2849 offPhys = offPhysLast = GUEST_PAGE_SIZE;
2850 }
2851 }
2852 }
2853
2854 /*
2855 * Handle access to space without handlers (that's easy).
2856 */
2857 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
2858 uint32_t cbRange = (uint32_t)cbWrite;
2859 Assert(cbRange == cbWrite);
2860
2861 /*
2862 * Physical handler.
2863 */
2864 if (!offPhys)
2865 {
2866#ifndef IN_RING3
2867 if (enmOrigin != PGMACCESSORIGIN_IEM)
2868 /* Cannot reliably handle informational status codes in this context */
2869 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2870#endif
2871 if (cbRange > offPhysLast + 1)
2872 cbRange = offPhysLast + 1;
2873
2874 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pPhys);
2875 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
2876 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pPhys->uUser
2877 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pPhys->uUser);
2878
2879 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2880 STAM_PROFILE_START(&pPhys->Stat, h);
2881
2882 /* Most handlers will want to release the PGM lock for deadlock prevention
2883 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
2884 dirty page trackers will want to keep it for performance reasons. */
2885 PGM_LOCK_ASSERT_OWNER(pVM);
2886 if (pCurType->fKeepPgmLock)
2887 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2888 else
2889 {
2890 PGM_UNLOCK(pVM);
2891 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
2892 PGM_LOCK_VOID(pVM);
2893 }
2894
2895 STAM_PROFILE_STOP(&pPhys->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
2896 pPhys = NULL; /* might not be valid anymore. */
2897 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
2898 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
2899 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
2900 }
2901
2902 /*
2903 * Execute the default action and merge the status codes.
2904 */
2905 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
2906 {
2907 memcpy(pvDst, pvBuf, cbRange);
2908 rcStrict2 = VINF_SUCCESS;
2909 }
2910 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2911 {
2912 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2913 return rcStrict2;
2914 }
2915 else
2916 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2917
2918 /*
2919 * Advance if we've got more stuff to do.
2920 */
2921 if (cbRange >= cbWrite)
2922 {
2923 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2924 return rcStrict;
2925 }
2926
2927
2928 cbWrite -= cbRange;
2929 GCPhys += cbRange;
2930 pvBuf = (uint8_t *)pvBuf + cbRange;
2931 pvDst = (uint8_t *)pvDst + cbRange;
2932
2933 offPhys -= cbRange;
2934 offPhysLast -= cbRange;
2935 }
2936}
2937
2938
2939/**
2940 * Write to physical memory.
2941 *
2942 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2943 * want to ignore those.
2944 *
2945 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2946 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2947 * @retval VINF_SUCCESS in all context - write completed.
2948 *
2949 * @retval VINF_EM_OFF in RC and R0 - write completed.
2950 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
2951 * @retval VINF_EM_RESET in RC and R0 - write completed.
2952 * @retval VINF_EM_HALT in RC and R0 - write completed.
2953 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
2954 *
2955 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
2956 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
2957 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2958 *
2959 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
2960 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2961 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
2962 *
2963 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
2964 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
2965 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
2966 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
2967 * @retval VINF_CSAM_PENDING_ACTION in RC only.
2968 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2969 *
2970 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2971 * haven't been cleared for strict status codes yet.
2972 *
2973 *
2974 * @param pVM The cross context VM structure.
2975 * @param GCPhys Physical address to write to.
2976 * @param pvBuf What to write.
2977 * @param cbWrite How many bytes to write.
2978 * @param enmOrigin Who is calling.
2979 */
2980VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2981{
2982 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2983 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2984 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2985
2986 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWrite));
2987 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2988
2989 PGM_LOCK_VOID(pVM);
2990
2991 /*
2992 * Copy loop on ram ranges.
2993 */
2994 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2995 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2996 for (;;)
2997 {
2998 /* Inside range or not? */
2999 if (pRam && GCPhys >= pRam->GCPhys)
3000 {
3001 /*
3002 * Must work our way thru this page by page.
3003 */
3004 RTGCPTR off = GCPhys - pRam->GCPhys;
3005 while (off < pRam->cb)
3006 {
3007 RTGCPTR iPage = off >> GUEST_PAGE_SHIFT;
3008 PPGMPAGE pPage = &pRam->aPages[iPage];
3009 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
3010 if (cb > cbWrite)
3011 cb = cbWrite;
3012
3013 /*
3014 * Normal page? Get the pointer to it.
3015 */
3016 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
3017 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3018 {
3019 PGMPAGEMAPLOCK PgMpLck;
3020 void *pvDst;
3021 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
3022 if (RT_SUCCESS(rc))
3023 {
3024 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
3025 memcpy(pvDst, pvBuf, cb);
3026 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3027 }
3028 /* Ignore writes to ballooned pages. */
3029 else if (!PGM_PAGE_IS_BALLOONED(pPage))
3030 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
3031 pRam->GCPhys + off, pPage, rc));
3032 }
3033 /*
3034 * Active WRITE or ALL access handlers.
3035 */
3036 else
3037 {
3038 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
3039 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
3040 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
3041 else
3042 {
3043 PGM_UNLOCK(pVM);
3044 return rcStrict2;
3045 }
3046 }
3047
3048 /* next page */
3049 if (cb >= cbWrite)
3050 {
3051 PGM_UNLOCK(pVM);
3052 return rcStrict;
3053 }
3054
3055 cbWrite -= cb;
3056 off += cb;
3057 pvBuf = (const char *)pvBuf + cb;
3058 } /* walk pages in ram range */
3059
3060 GCPhys = pRam->GCPhysLast + 1;
3061 }
3062 else
3063 {
3064 /*
3065 * Unassigned address space, skip it.
3066 */
3067 if (!pRam)
3068 break;
3069 size_t cb = pRam->GCPhys - GCPhys;
3070 if (cb >= cbWrite)
3071 break;
3072 cbWrite -= cb;
3073 pvBuf = (const char *)pvBuf + cb;
3074 GCPhys += cb;
3075 }
3076
3077 /* Advance range if necessary. */
3078 while (pRam && GCPhys > pRam->GCPhysLast)
3079 pRam = pRam->CTX_SUFF(pNext);
3080 } /* Ram range walk */
3081
3082 PGM_UNLOCK(pVM);
3083 return rcStrict;
3084}
3085
3086
3087/**
3088 * Read from guest physical memory by GC physical address, bypassing
3089 * MMIO and access handlers.
3090 *
3091 * @returns VBox status code.
3092 * @param pVM The cross context VM structure.
3093 * @param pvDst The destination address.
3094 * @param GCPhysSrc The source address (GC physical address).
3095 * @param cb The number of bytes to read.
3096 */
3097VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
3098{
3099 /*
3100 * Treat the first page as a special case.
3101 */
3102 if (!cb)
3103 return VINF_SUCCESS;
3104
3105 /* map the 1st page */
3106 void const *pvSrc;
3107 PGMPAGEMAPLOCK Lock;
3108 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3109 if (RT_FAILURE(rc))
3110 return rc;
3111
3112 /* optimize for the case where access is completely within the first page. */
3113 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysSrc & GUEST_PAGE_OFFSET_MASK);
3114 if (RT_LIKELY(cb <= cbPage))
3115 {
3116 memcpy(pvDst, pvSrc, cb);
3117 PGMPhysReleasePageMappingLock(pVM, &Lock);
3118 return VINF_SUCCESS;
3119 }
3120
3121 /* copy to the end of the page. */
3122 memcpy(pvDst, pvSrc, cbPage);
3123 PGMPhysReleasePageMappingLock(pVM, &Lock);
3124 GCPhysSrc += cbPage;
3125 pvDst = (uint8_t *)pvDst + cbPage;
3126 cb -= cbPage;
3127
3128 /*
3129 * Page by page.
3130 */
3131 for (;;)
3132 {
3133 /* map the page */
3134 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3135 if (RT_FAILURE(rc))
3136 return rc;
3137
3138 /* last page? */
3139 if (cb <= GUEST_PAGE_SIZE)
3140 {
3141 memcpy(pvDst, pvSrc, cb);
3142 PGMPhysReleasePageMappingLock(pVM, &Lock);
3143 return VINF_SUCCESS;
3144 }
3145
3146 /* copy the entire page and advance */
3147 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3148 PGMPhysReleasePageMappingLock(pVM, &Lock);
3149 GCPhysSrc += GUEST_PAGE_SIZE;
3150 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3151 cb -= GUEST_PAGE_SIZE;
3152 }
3153 /* won't ever get here. */
3154}
3155
3156
3157/**
3158 * Write to guest physical memory referenced by GC pointer.
3159 * Write memory to GC physical address in guest physical memory.
3160 *
3161 * This will bypass MMIO and access handlers.
3162 *
3163 * @returns VBox status code.
3164 * @param pVM The cross context VM structure.
3165 * @param GCPhysDst The GC physical address of the destination.
3166 * @param pvSrc The source buffer.
3167 * @param cb The number of bytes to write.
3168 */
3169VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3170{
3171 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3172
3173 /*
3174 * Treat the first page as a special case.
3175 */
3176 if (!cb)
3177 return VINF_SUCCESS;
3178
3179 /* map the 1st page */
3180 void *pvDst;
3181 PGMPAGEMAPLOCK Lock;
3182 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3183 if (RT_FAILURE(rc))
3184 return rc;
3185
3186 /* optimize for the case where access is completely within the first page. */
3187 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysDst & GUEST_PAGE_OFFSET_MASK);
3188 if (RT_LIKELY(cb <= cbPage))
3189 {
3190 memcpy(pvDst, pvSrc, cb);
3191 PGMPhysReleasePageMappingLock(pVM, &Lock);
3192 return VINF_SUCCESS;
3193 }
3194
3195 /* copy to the end of the page. */
3196 memcpy(pvDst, pvSrc, cbPage);
3197 PGMPhysReleasePageMappingLock(pVM, &Lock);
3198 GCPhysDst += cbPage;
3199 pvSrc = (const uint8_t *)pvSrc + cbPage;
3200 cb -= cbPage;
3201
3202 /*
3203 * Page by page.
3204 */
3205 for (;;)
3206 {
3207 /* map the page */
3208 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3209 if (RT_FAILURE(rc))
3210 return rc;
3211
3212 /* last page? */
3213 if (cb <= GUEST_PAGE_SIZE)
3214 {
3215 memcpy(pvDst, pvSrc, cb);
3216 PGMPhysReleasePageMappingLock(pVM, &Lock);
3217 return VINF_SUCCESS;
3218 }
3219
3220 /* copy the entire page and advance */
3221 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3222 PGMPhysReleasePageMappingLock(pVM, &Lock);
3223 GCPhysDst += GUEST_PAGE_SIZE;
3224 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3225 cb -= GUEST_PAGE_SIZE;
3226 }
3227 /* won't ever get here. */
3228}
3229
3230
3231/**
3232 * Read from guest physical memory referenced by GC pointer.
3233 *
3234 * This function uses the current CR3/CR0/CR4 of the guest and will
3235 * bypass access handlers and not set any accessed bits.
3236 *
3237 * @returns VBox status code.
3238 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3239 * @param pvDst The destination address.
3240 * @param GCPtrSrc The source address (GC pointer).
3241 * @param cb The number of bytes to read.
3242 */
3243VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3244{
3245 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3246/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3247
3248 /*
3249 * Treat the first page as a special case.
3250 */
3251 if (!cb)
3252 return VINF_SUCCESS;
3253
3254 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleRead));
3255 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3256
3257 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3258 * when many VCPUs are fighting for the lock.
3259 */
3260 PGM_LOCK_VOID(pVM);
3261
3262 /* map the 1st page */
3263 void const *pvSrc;
3264 PGMPAGEMAPLOCK Lock;
3265 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3266 if (RT_FAILURE(rc))
3267 {
3268 PGM_UNLOCK(pVM);
3269 return rc;
3270 }
3271
3272 /* optimize for the case where access is completely within the first page. */
3273 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3274 if (RT_LIKELY(cb <= cbPage))
3275 {
3276 memcpy(pvDst, pvSrc, cb);
3277 PGMPhysReleasePageMappingLock(pVM, &Lock);
3278 PGM_UNLOCK(pVM);
3279 return VINF_SUCCESS;
3280 }
3281
3282 /* copy to the end of the page. */
3283 memcpy(pvDst, pvSrc, cbPage);
3284 PGMPhysReleasePageMappingLock(pVM, &Lock);
3285 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3286 pvDst = (uint8_t *)pvDst + cbPage;
3287 cb -= cbPage;
3288
3289 /*
3290 * Page by page.
3291 */
3292 for (;;)
3293 {
3294 /* map the page */
3295 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3296 if (RT_FAILURE(rc))
3297 {
3298 PGM_UNLOCK(pVM);
3299 return rc;
3300 }
3301
3302 /* last page? */
3303 if (cb <= GUEST_PAGE_SIZE)
3304 {
3305 memcpy(pvDst, pvSrc, cb);
3306 PGMPhysReleasePageMappingLock(pVM, &Lock);
3307 PGM_UNLOCK(pVM);
3308 return VINF_SUCCESS;
3309 }
3310
3311 /* copy the entire page and advance */
3312 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3313 PGMPhysReleasePageMappingLock(pVM, &Lock);
3314 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + GUEST_PAGE_SIZE);
3315 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
3316 cb -= GUEST_PAGE_SIZE;
3317 }
3318 /* won't ever get here. */
3319}
3320
3321
3322/**
3323 * Write to guest physical memory referenced by GC pointer.
3324 *
3325 * This function uses the current CR3/CR0/CR4 of the guest and will
3326 * bypass access handlers and not set dirty or accessed bits.
3327 *
3328 * @returns VBox status code.
3329 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3330 * @param GCPtrDst The destination address (GC pointer).
3331 * @param pvSrc The source address.
3332 * @param cb The number of bytes to write.
3333 */
3334VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3335{
3336 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3337 VMCPU_ASSERT_EMT(pVCpu);
3338
3339 /*
3340 * Treat the first page as a special case.
3341 */
3342 if (!cb)
3343 return VINF_SUCCESS;
3344
3345 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWrite));
3346 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3347
3348 /* map the 1st page */
3349 void *pvDst;
3350 PGMPAGEMAPLOCK Lock;
3351 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3352 if (RT_FAILURE(rc))
3353 return rc;
3354
3355 /* optimize for the case where access is completely within the first page. */
3356 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3357 if (RT_LIKELY(cb <= cbPage))
3358 {
3359 memcpy(pvDst, pvSrc, cb);
3360 PGMPhysReleasePageMappingLock(pVM, &Lock);
3361 return VINF_SUCCESS;
3362 }
3363
3364 /* copy to the end of the page. */
3365 memcpy(pvDst, pvSrc, cbPage);
3366 PGMPhysReleasePageMappingLock(pVM, &Lock);
3367 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3368 pvSrc = (const uint8_t *)pvSrc + cbPage;
3369 cb -= cbPage;
3370
3371 /*
3372 * Page by page.
3373 */
3374 for (;;)
3375 {
3376 /* map the page */
3377 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3378 if (RT_FAILURE(rc))
3379 return rc;
3380
3381 /* last page? */
3382 if (cb <= GUEST_PAGE_SIZE)
3383 {
3384 memcpy(pvDst, pvSrc, cb);
3385 PGMPhysReleasePageMappingLock(pVM, &Lock);
3386 return VINF_SUCCESS;
3387 }
3388
3389 /* copy the entire page and advance */
3390 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3391 PGMPhysReleasePageMappingLock(pVM, &Lock);
3392 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3393 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3394 cb -= GUEST_PAGE_SIZE;
3395 }
3396 /* won't ever get here. */
3397}
3398
3399
3400/**
3401 * Write to guest physical memory referenced by GC pointer and update the PTE.
3402 *
3403 * This function uses the current CR3/CR0/CR4 of the guest and will
3404 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3405 *
3406 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3407 *
3408 * @returns VBox status code.
3409 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3410 * @param GCPtrDst The destination address (GC pointer).
3411 * @param pvSrc The source address.
3412 * @param cb The number of bytes to write.
3413 */
3414VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3415{
3416 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3417 VMCPU_ASSERT_EMT(pVCpu);
3418
3419 /*
3420 * Treat the first page as a special case.
3421 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3422 */
3423 if (!cb)
3424 return VINF_SUCCESS;
3425
3426 /* map the 1st page */
3427 void *pvDst;
3428 PGMPAGEMAPLOCK Lock;
3429 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3430 if (RT_FAILURE(rc))
3431 return rc;
3432
3433 /* optimize for the case where access is completely within the first page. */
3434 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3435 if (RT_LIKELY(cb <= cbPage))
3436 {
3437 memcpy(pvDst, pvSrc, cb);
3438 PGMPhysReleasePageMappingLock(pVM, &Lock);
3439 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3440 return VINF_SUCCESS;
3441 }
3442
3443 /* copy to the end of the page. */
3444 memcpy(pvDst, pvSrc, cbPage);
3445 PGMPhysReleasePageMappingLock(pVM, &Lock);
3446 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3447 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3448 pvSrc = (const uint8_t *)pvSrc + cbPage;
3449 cb -= cbPage;
3450
3451 /*
3452 * Page by page.
3453 */
3454 for (;;)
3455 {
3456 /* map the page */
3457 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3458 if (RT_FAILURE(rc))
3459 return rc;
3460
3461 /* last page? */
3462 if (cb <= GUEST_PAGE_SIZE)
3463 {
3464 memcpy(pvDst, pvSrc, cb);
3465 PGMPhysReleasePageMappingLock(pVM, &Lock);
3466 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3467 return VINF_SUCCESS;
3468 }
3469
3470 /* copy the entire page and advance */
3471 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
3472 PGMPhysReleasePageMappingLock(pVM, &Lock);
3473 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3474 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
3475 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
3476 cb -= GUEST_PAGE_SIZE;
3477 }
3478 /* won't ever get here. */
3479}
3480
3481
3482/**
3483 * Read from guest physical memory referenced by GC pointer.
3484 *
3485 * This function uses the current CR3/CR0/CR4 of the guest and will
3486 * respect access handlers and set accessed bits.
3487 *
3488 * @returns Strict VBox status, see PGMPhysRead for details.
3489 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3490 * specified virtual address.
3491 *
3492 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3493 * @param pvDst The destination address.
3494 * @param GCPtrSrc The source address (GC pointer).
3495 * @param cb The number of bytes to read.
3496 * @param enmOrigin Who is calling.
3497 * @thread EMT(pVCpu)
3498 */
3499VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3500{
3501 int rc;
3502 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3503 VMCPU_ASSERT_EMT(pVCpu);
3504
3505 /*
3506 * Anything to do?
3507 */
3508 if (!cb)
3509 return VINF_SUCCESS;
3510
3511 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3512
3513 /*
3514 * Optimize reads within a single page.
3515 */
3516 if (((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3517 {
3518 /* Convert virtual to physical address + flags */
3519 PGMPTWALK Walk;
3520 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3521 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3522 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3523
3524 /* mark the guest page as accessed. */
3525 if (!(Walk.fEffective & X86_PTE_A))
3526 {
3527 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3528 AssertRC(rc);
3529 }
3530
3531 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3532 }
3533
3534 /*
3535 * Page by page.
3536 */
3537 for (;;)
3538 {
3539 /* Convert virtual to physical address + flags */
3540 PGMPTWALK Walk;
3541 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
3542 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3543 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3544
3545 /* mark the guest page as accessed. */
3546 if (!(Walk.fEffective & X86_PTE_A))
3547 {
3548 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3549 AssertRC(rc);
3550 }
3551
3552 /* copy */
3553 size_t cbRead = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
3554 if (cbRead < cb)
3555 {
3556 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3557 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3558 { /* likely */ }
3559 else
3560 return rcStrict;
3561 }
3562 else /* Last page (cbRead is GUEST_PAGE_SIZE, we only need cb!) */
3563 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3564
3565 /* next */
3566 Assert(cb > cbRead);
3567 cb -= cbRead;
3568 pvDst = (uint8_t *)pvDst + cbRead;
3569 GCPtrSrc += cbRead;
3570 }
3571}
3572
3573
3574/**
3575 * Write to guest physical memory referenced by GC pointer.
3576 *
3577 * This function uses the current CR3/CR0/CR4 of the guest and will
3578 * respect access handlers and set dirty and accessed bits.
3579 *
3580 * @returns Strict VBox status, see PGMPhysWrite for details.
3581 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3582 * specified virtual address.
3583 *
3584 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3585 * @param GCPtrDst The destination address (GC pointer).
3586 * @param pvSrc The source address.
3587 * @param cb The number of bytes to write.
3588 * @param enmOrigin Who is calling.
3589 */
3590VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3591{
3592 int rc;
3593 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3594 VMCPU_ASSERT_EMT(pVCpu);
3595
3596 /*
3597 * Anything to do?
3598 */
3599 if (!cb)
3600 return VINF_SUCCESS;
3601
3602 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3603
3604 /*
3605 * Optimize writes within a single page.
3606 */
3607 if (((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
3608 {
3609 /* Convert virtual to physical address + flags */
3610 PGMPTWALK Walk;
3611 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3612 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3613 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3614
3615 /* Mention when we ignore X86_PTE_RW... */
3616 if (!(Walk.fEffective & X86_PTE_RW))
3617 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3618
3619 /* Mark the guest page as accessed and dirty if necessary. */
3620 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3621 {
3622 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3623 AssertRC(rc);
3624 }
3625
3626 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3627 }
3628
3629 /*
3630 * Page by page.
3631 */
3632 for (;;)
3633 {
3634 /* Convert virtual to physical address + flags */
3635 PGMPTWALK Walk;
3636 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
3637 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3638 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3639
3640 /* Mention when we ignore X86_PTE_RW... */
3641 if (!(Walk.fEffective & X86_PTE_RW))
3642 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3643
3644 /* Mark the guest page as accessed and dirty if necessary. */
3645 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3646 {
3647 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3648 AssertRC(rc);
3649 }
3650
3651 /* copy */
3652 size_t cbWrite = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
3653 if (cbWrite < cb)
3654 {
3655 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3656 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3657 { /* likely */ }
3658 else
3659 return rcStrict;
3660 }
3661 else /* Last page (cbWrite is GUEST_PAGE_SIZE, we only need cb!) */
3662 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3663
3664 /* next */
3665 Assert(cb > cbWrite);
3666 cb -= cbWrite;
3667 pvSrc = (uint8_t *)pvSrc + cbWrite;
3668 GCPtrDst += cbWrite;
3669 }
3670}
3671
3672
3673/**
3674 * Return the page type of the specified physical address.
3675 *
3676 * @returns The page type.
3677 * @param pVM The cross context VM structure.
3678 * @param GCPhys Guest physical address
3679 */
3680VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
3681{
3682 PGM_LOCK_VOID(pVM);
3683 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3684 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
3685 PGM_UNLOCK(pVM);
3686
3687 return enmPgType;
3688}
3689
3690
3691/**
3692 * Converts a GC physical address to a HC ring-3 pointer, with some
3693 * additional checks.
3694 *
3695 * @returns VBox status code (no informational statuses).
3696 *
3697 * @param pVM The cross context VM structure.
3698 * @param pVCpu The cross context virtual CPU structure of the
3699 * calling EMT.
3700 * @param GCPhys The GC physical address to convert. This API mask
3701 * the A20 line when necessary.
3702 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
3703 * be done while holding the PGM lock.
3704 * @param ppb Where to store the pointer corresponding to GCPhys
3705 * on success.
3706 * @param pfTlb The TLB flags and revision. We only add stuff.
3707 *
3708 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
3709 * PGMPhysIemGCPhys2Ptr.
3710 *
3711 * @thread EMT(pVCpu).
3712 */
3713VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
3714 R3R0PTRTYPE(uint8_t *) *ppb,
3715 uint64_t *pfTlb)
3716{
3717 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3718 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
3719
3720 PGM_LOCK_VOID(pVM);
3721
3722 PPGMRAMRANGE pRam;
3723 PPGMPAGE pPage;
3724 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3725 if (RT_SUCCESS(rc))
3726 {
3727 if (!PGM_PAGE_IS_BALLOONED(pPage))
3728 {
3729 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3730 {
3731 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
3732 {
3733 /*
3734 * No access handler.
3735 */
3736 switch (PGM_PAGE_GET_STATE(pPage))
3737 {
3738 case PGM_PAGE_STATE_ALLOCATED:
3739 *pfTlb |= *puTlbPhysRev;
3740 break;
3741 case PGM_PAGE_STATE_BALLOONED:
3742 AssertFailed();
3743 RT_FALL_THRU();
3744 case PGM_PAGE_STATE_ZERO:
3745 case PGM_PAGE_STATE_SHARED:
3746 case PGM_PAGE_STATE_WRITE_MONITORED:
3747 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3748 break;
3749 }
3750
3751 PPGMPAGEMAPTLBE pTlbe;
3752 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3753 AssertLogRelRCReturn(rc, rc);
3754 *ppb = (uint8_t *)pTlbe->pv;
3755 }
3756 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
3757 {
3758 /*
3759 * MMIO or similar all access handler: Catch all access.
3760 */
3761 *pfTlb |= *puTlbPhysRev
3762 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3763 *ppb = NULL;
3764 }
3765 else
3766 {
3767 /*
3768 * Write access handler: Catch write accesses if active.
3769 */
3770 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
3771 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3772 else
3773 switch (PGM_PAGE_GET_STATE(pPage))
3774 {
3775 case PGM_PAGE_STATE_ALLOCATED:
3776 *pfTlb |= *puTlbPhysRev;
3777 break;
3778 case PGM_PAGE_STATE_BALLOONED:
3779 AssertFailed();
3780 RT_FALL_THRU();
3781 case PGM_PAGE_STATE_ZERO:
3782 case PGM_PAGE_STATE_SHARED:
3783 case PGM_PAGE_STATE_WRITE_MONITORED:
3784 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
3785 break;
3786 }
3787
3788 PPGMPAGEMAPTLBE pTlbe;
3789 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3790 AssertLogRelRCReturn(rc, rc);
3791 *ppb = (uint8_t *)pTlbe->pv;
3792 }
3793 }
3794 else
3795 {
3796 /* Alias MMIO: For now, we catch all access. */
3797 *pfTlb |= *puTlbPhysRev
3798 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3799 *ppb = NULL;
3800 }
3801 }
3802 else
3803 {
3804 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */
3805 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
3806 *ppb = NULL;
3807 }
3808 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 pPage=%R[pgmpage]\n", GCPhys, *ppb, *pfTlb, pPage));
3809 }
3810 else
3811 {
3812 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ
3813 | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 | PGMIEMGCPHYS2PTR_F_UNASSIGNED;
3814 *ppb = NULL;
3815 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
3816 }
3817
3818 PGM_UNLOCK(pVM);
3819 return VINF_SUCCESS;
3820}
3821
3822
3823/**
3824 * Converts a GC physical address to a HC ring-3 pointer, with some
3825 * additional checks.
3826 *
3827 * @returns VBox status code (no informational statuses).
3828 * @retval VINF_SUCCESS on success.
3829 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3830 * access handler of some kind.
3831 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3832 * accesses or is odd in any way.
3833 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3834 *
3835 * @param pVM The cross context VM structure.
3836 * @param pVCpu The cross context virtual CPU structure of the
3837 * calling EMT.
3838 * @param GCPhys The GC physical address to convert. This API mask
3839 * the A20 line when necessary.
3840 * @param fWritable Whether write access is required.
3841 * @param fByPassHandlers Whether to bypass access handlers.
3842 * @param ppv Where to store the pointer corresponding to GCPhys
3843 * on success.
3844 * @param pLock
3845 *
3846 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
3847 * @thread EMT(pVCpu).
3848 */
3849VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
3850 void **ppv, PPGMPAGEMAPLOCK pLock)
3851{
3852 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
3853
3854 PGM_LOCK_VOID(pVM);
3855
3856 PPGMRAMRANGE pRam;
3857 PPGMPAGE pPage;
3858 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3859 if (RT_SUCCESS(rc))
3860 {
3861 if (PGM_PAGE_IS_BALLOONED(pPage))
3862 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3863 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3864 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3865 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3866 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3867 rc = VINF_SUCCESS;
3868 else
3869 {
3870 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3871 {
3872 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3873 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3874 }
3875 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3876 {
3877 Assert(!fByPassHandlers);
3878 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3879 }
3880 }
3881 if (RT_SUCCESS(rc))
3882 {
3883 int rc2;
3884
3885 /* Make sure what we return is writable. */
3886 if (fWritable)
3887 switch (PGM_PAGE_GET_STATE(pPage))
3888 {
3889 case PGM_PAGE_STATE_ALLOCATED:
3890 break;
3891 case PGM_PAGE_STATE_BALLOONED:
3892 AssertFailed();
3893 break;
3894 case PGM_PAGE_STATE_ZERO:
3895 case PGM_PAGE_STATE_SHARED:
3896 case PGM_PAGE_STATE_WRITE_MONITORED:
3897 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
3898 AssertLogRelRCReturn(rc2, rc2);
3899 break;
3900 }
3901
3902 /* Get a ring-3 mapping of the address. */
3903 PPGMPAGEMAPTLBE pTlbe;
3904 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3905 AssertLogRelRCReturn(rc2, rc2);
3906
3907 /* Lock it and calculate the address. */
3908 if (fWritable)
3909 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
3910 else
3911 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
3912 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3913
3914 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
3915 }
3916 else
3917 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
3918
3919 /* else: handler catching all access, no pointer returned. */
3920 }
3921 else
3922 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
3923
3924 PGM_UNLOCK(pVM);
3925 return rc;
3926}
3927
3928
3929/**
3930 * Checks if the give GCPhys page requires special handling for the given access
3931 * because it's MMIO or otherwise monitored.
3932 *
3933 * @returns VBox status code (no informational statuses).
3934 * @retval VINF_SUCCESS on success.
3935 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
3936 * access handler of some kind.
3937 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
3938 * accesses or is odd in any way.
3939 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
3940 *
3941 * @param pVM The cross context VM structure.
3942 * @param GCPhys The GC physical address to convert. Since this is
3943 * only used for filling the REM TLB, the A20 mask must
3944 * be applied before calling this API.
3945 * @param fWritable Whether write access is required.
3946 * @param fByPassHandlers Whether to bypass access handlers.
3947 *
3948 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
3949 * a stop gap thing that should be removed once there is a better TLB
3950 * for virtual address accesses.
3951 */
3952VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
3953{
3954 PGM_LOCK_VOID(pVM);
3955 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
3956
3957 PPGMRAMRANGE pRam;
3958 PPGMPAGE pPage;
3959 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3960 if (RT_SUCCESS(rc))
3961 {
3962 if (PGM_PAGE_IS_BALLOONED(pPage))
3963 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3964 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3965 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3966 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
3967 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
3968 rc = VINF_SUCCESS;
3969 else
3970 {
3971 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
3972 {
3973 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
3974 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
3975 }
3976 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
3977 {
3978 Assert(!fByPassHandlers);
3979 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
3980 }
3981 }
3982 }
3983
3984 PGM_UNLOCK(pVM);
3985 return rc;
3986}
3987
3988#ifdef VBOX_WITH_NATIVE_NEM
3989
3990/**
3991 * Interface used by NEM to check what to do on a memory access exit.
3992 *
3993 * @returns VBox status code.
3994 * @param pVM The cross context VM structure.
3995 * @param pVCpu The cross context per virtual CPU structure.
3996 * Optional.
3997 * @param GCPhys The guest physical address.
3998 * @param fMakeWritable Whether to try make the page writable or not. If it
3999 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
4000 * be returned and the return code will be unaffected
4001 * @param pInfo Where to return the page information. This is
4002 * initialized even on failure.
4003 * @param pfnChecker Page in-sync checker callback. Optional.
4004 * @param pvUser User argument to pass to pfnChecker.
4005 */
4006VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo,
4007 PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
4008{
4009 PGM_LOCK_VOID(pVM);
4010
4011 PPGMPAGE pPage;
4012 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
4013 if (RT_SUCCESS(rc))
4014 {
4015 /* Try make it writable if requested. */
4016 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
4017 if (fMakeWritable)
4018 switch (PGM_PAGE_GET_STATE(pPage))
4019 {
4020 case PGM_PAGE_STATE_SHARED:
4021 case PGM_PAGE_STATE_WRITE_MONITORED:
4022 case PGM_PAGE_STATE_ZERO:
4023 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
4024 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
4025 rc = VINF_SUCCESS;
4026 break;
4027 }
4028
4029 /* Fill in the info. */
4030 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
4031 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
4032 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
4033 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
4034 pInfo->enmType = enmType;
4035 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
4036 switch (PGM_PAGE_GET_STATE(pPage))
4037 {
4038 case PGM_PAGE_STATE_ALLOCATED:
4039 pInfo->fZeroPage = 0;
4040 break;
4041
4042 case PGM_PAGE_STATE_ZERO:
4043 pInfo->fZeroPage = 1;
4044 break;
4045
4046 case PGM_PAGE_STATE_WRITE_MONITORED:
4047 pInfo->fZeroPage = 0;
4048 break;
4049
4050 case PGM_PAGE_STATE_SHARED:
4051 pInfo->fZeroPage = 0;
4052 break;
4053
4054 case PGM_PAGE_STATE_BALLOONED:
4055 pInfo->fZeroPage = 1;
4056 break;
4057
4058 default:
4059 pInfo->fZeroPage = 1;
4060 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
4061 }
4062
4063 /* Call the checker and update NEM state. */
4064 if (pfnChecker)
4065 {
4066 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
4067 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
4068 }
4069
4070 /* Done. */
4071 PGM_UNLOCK(pVM);
4072 }
4073 else
4074 {
4075 PGM_UNLOCK(pVM);
4076
4077 pInfo->HCPhys = NIL_RTHCPHYS;
4078 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
4079 pInfo->u2NemState = 0;
4080 pInfo->fHasHandlers = 0;
4081 pInfo->fZeroPage = 0;
4082 pInfo->enmType = PGMPAGETYPE_INVALID;
4083 }
4084
4085 return rc;
4086}
4087
4088
4089/**
4090 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
4091 * or higher.
4092 *
4093 * @returns VBox status code from callback.
4094 * @param pVM The cross context VM structure.
4095 * @param pVCpu The cross context per CPU structure. This is
4096 * optional as its only for passing to callback.
4097 * @param uMinState The minimum NEM state value to call on.
4098 * @param pfnCallback The callback function.
4099 * @param pvUser User argument for the callback.
4100 */
4101VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
4102 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
4103{
4104 /*
4105 * Just brute force this problem.
4106 */
4107 PGM_LOCK_VOID(pVM);
4108 int rc = VINF_SUCCESS;
4109 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
4110 {
4111 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
4112 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4113 {
4114 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
4115 if (u2State < uMinState)
4116 { /* likely */ }
4117 else
4118 {
4119 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
4120 if (RT_SUCCESS(rc))
4121 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
4122 else
4123 break;
4124 }
4125 }
4126 }
4127 PGM_UNLOCK(pVM);
4128
4129 return rc;
4130}
4131
4132
4133/**
4134 * Helper for setting the NEM state for a range of pages.
4135 *
4136 * @param paPages Array of pages to modify.
4137 * @param cPages How many pages to modify.
4138 * @param u2State The new state value.
4139 */
4140void pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State)
4141{
4142 PPGMPAGE pPage = paPages;
4143 while (cPages-- > 0)
4144 {
4145 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
4146 pPage++;
4147 }
4148}
4149
4150#endif /* VBOX_WITH_NATIVE_NEM */
4151
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette