VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 105724

Last change on this file since 105724 was 104935, checked in by vboxsync, 5 months ago

VMM/PGM: Lockless pgmPhysGCPhys2R3Ptr variant for use in PGMGstQueryPage. bugref:10687

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 210.4 KB
Line 
1/* $Id: PGMAllPhys.cpp 104935 2024-06-15 01:40:08Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM_PHYS
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/trpm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/iom.h>
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/nem.h>
41#include "PGMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include "PGMInline.h"
44#include <VBox/param.h>
45#include <VBox/err.h>
46#include <iprt/assert.h>
47#include <iprt/string.h>
48#include <VBox/log.h>
49#ifdef IN_RING3
50# include <iprt/thread.h>
51#elif defined(IN_RING0)
52# include <iprt/mem.h>
53# include <iprt/memobj.h>
54#endif
55
56
57/*********************************************************************************************************************************
58* Defined Constants And Macros *
59*********************************************************************************************************************************/
60/** Enable the physical TLB. */
61#define PGM_WITH_PHYS_TLB
62
63/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
64 * Checks if valid physical access handler return code (normal handler, not PF).
65 *
66 * Checks if the given strict status code is one of the expected ones for a
67 * physical access handler in the current context.
68 *
69 * @returns true or false.
70 * @param a_rcStrict The status code.
71 * @param a_fWrite Whether it is a write or read being serviced.
72 *
73 * @remarks We wish to keep the list of statuses here as short as possible.
74 * When changing, please make sure to update the PGMPhysRead,
75 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
76 */
77#ifdef IN_RING3
78# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
79 ( (a_rcStrict) == VINF_SUCCESS \
80 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
81#elif defined(IN_RING0)
82#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
83 ( (a_rcStrict) == VINF_SUCCESS \
84 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
85 \
86 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
87 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
88 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
89 \
90 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
91 || (a_rcStrict) == VINF_EM_DBG_STOP \
92 || (a_rcStrict) == VINF_EM_DBG_EVENT \
93 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
94 || (a_rcStrict) == VINF_EM_OFF \
95 || (a_rcStrict) == VINF_EM_SUSPEND \
96 || (a_rcStrict) == VINF_EM_RESET \
97 )
98#else
99# error "Context?"
100#endif
101
102/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
103 * Checks if valid virtual access handler return code (normal handler, not PF).
104 *
105 * Checks if the given strict status code is one of the expected ones for a
106 * virtual access handler in the current context.
107 *
108 * @returns true or false.
109 * @param a_rcStrict The status code.
110 * @param a_fWrite Whether it is a write or read being serviced.
111 *
112 * @remarks We wish to keep the list of statuses here as short as possible.
113 * When changing, please make sure to update the PGMPhysRead,
114 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
115 */
116#ifdef IN_RING3
117# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
118 ( (a_rcStrict) == VINF_SUCCESS \
119 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
120#elif defined(IN_RING0)
121# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
122 (false /* no virtual handlers in ring-0! */ )
123#else
124# error "Context?"
125#endif
126
127
128
129/**
130 * Calculate the actual table size.
131 *
132 * The memory is layed out like this:
133 * - PGMPHYSHANDLERTREE (8 bytes)
134 * - Allocation bitmap (8-byte size align)
135 * - Slab of PGMPHYSHANDLER. Start is 64 byte aligned.
136 */
137uint32_t pgmHandlerPhysicalCalcTableSizes(uint32_t *pcEntries, uint32_t *pcbTreeAndBitmap)
138{
139 /*
140 * A minimum of 64 entries and a maximum of ~64K.
141 */
142 uint32_t cEntries = *pcEntries;
143 if (cEntries <= 64)
144 cEntries = 64;
145 else if (cEntries >= _64K)
146 cEntries = _64K;
147 else
148 cEntries = RT_ALIGN_32(cEntries, 16);
149
150 /*
151 * Do the initial calculation.
152 */
153 uint32_t cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
154 uint32_t cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
155 uint32_t cbTable = cEntries * sizeof(PGMPHYSHANDLER);
156 uint32_t cbTotal = cbTreeAndBitmap + cbTable;
157
158 /*
159 * Align the total and try use up extra space from that.
160 */
161 uint32_t cbTotalAligned = RT_ALIGN_32(cbTotal, RT_MAX(HOST_PAGE_SIZE, _16K));
162 uint32_t cAvail = cbTotalAligned - cbTotal;
163 cAvail /= sizeof(PGMPHYSHANDLER);
164 if (cAvail >= 1)
165 for (;;)
166 {
167 cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
168 cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
169 cbTable = cEntries * sizeof(PGMPHYSHANDLER);
170 cbTotal = cbTreeAndBitmap + cbTable;
171 if (cbTotal <= cbTotalAligned)
172 break;
173 cEntries--;
174 Assert(cEntries >= 16);
175 }
176
177 /*
178 * Return the result.
179 */
180 *pcbTreeAndBitmap = cbTreeAndBitmap;
181 *pcEntries = cEntries;
182 return cbTotalAligned;
183}
184
185
186
187/*********************************************************************************************************************************
188* Access Handlers for ROM and MMIO2 *
189*********************************************************************************************************************************/
190
191#ifndef IN_RING3
192
193/**
194 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
195 * \#PF access handler callback for guest ROM range write access.}
196 *
197 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
198 */
199DECLCALLBACK(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
200 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
201
202{
203 AssertReturn(uUser < RT_ELEMENTS(pVM->pgmr0.s.apRomRanges), VINF_EM_RAW_EMULATE_INSTR);
204 PPGMROMRANGE const pRom = pVM->pgmr0.s.apRomRanges[uUser];
205 AssertReturn(pRom, VINF_EM_RAW_EMULATE_INSTR);
206
207 uint32_t const iPage = (GCPhysFault - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
208 AssertReturn(iPage < (pRom->cb >> GUEST_PAGE_SHIFT), VERR_INTERNAL_ERROR_3);
209#ifdef IN_RING0
210 AssertReturn(iPage < pVM->pgmr0.s.acRomRangePages[uUser], VERR_INTERNAL_ERROR_2);
211#endif
212
213 RT_NOREF(uErrorCode, pvFault);
214 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
215
216 int rc;
217 switch (pRom->aPages[iPage].enmProt)
218 {
219 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
220 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
221 {
222 /*
223 * If it's a simple instruction which doesn't change the cpu state
224 * we will simply skip it. Otherwise we'll have to defer it to REM.
225 */
226 uint32_t cbOp;
227 PDISSTATE pDis = &pVCpu->pgm.s.Dis;
228 rc = EMInterpretDisasCurrent(pVCpu, pDis, &cbOp);
229 if ( RT_SUCCESS(rc)
230 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
231 && !(pDis->x86.fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
232 {
233 switch (pDis->x86.bOpCode)
234 {
235 /** @todo Find other instructions we can safely skip, possibly
236 * adding this kind of detection to DIS or EM. */
237 case OP_MOV:
238 pCtx->rip += cbOp;
239 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteHandled);
240 return VINF_SUCCESS;
241 }
242 }
243 break;
244 }
245
246 case PGMROMPROT_READ_RAM_WRITE_RAM:
247 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
248 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
249 AssertRC(rc);
250 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
251
252 case PGMROMPROT_READ_ROM_WRITE_RAM:
253 /* Handle it in ring-3 because it's *way* easier there. */
254 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
255 break;
256
257 default:
258 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
259 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
260 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
261 }
262
263 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteUnhandled);
264 return VINF_EM_RAW_EMULATE_INSTR;
265}
266
267#endif /* !IN_RING3 */
268
269
270/**
271 * @callback_method_impl{FNPGMPHYSHANDLER,
272 * Access handler callback for ROM write accesses.}
273 *
274 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
275 */
276DECLCALLBACK(VBOXSTRICTRC)
277pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
278 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
279{
280 AssertReturn(uUser < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRomRanges), VERR_INTERNAL_ERROR_3);
281 PPGMROMRANGE const pRom = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRomRanges[uUser];
282 AssertReturn(pRom, VERR_INTERNAL_ERROR_3);
283
284 uint32_t const iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
285 AssertReturn(iPage < (pRom->cb >> GUEST_PAGE_SHIFT), VERR_INTERNAL_ERROR_2);
286#ifdef IN_RING0
287 AssertReturn(iPage < pVM->pgmr0.s.acRomRangePages[uUser], VERR_INTERNAL_ERROR_2);
288#endif
289 PPGMROMPAGE const pRomPage = &pRom->aPages[iPage];
290
291 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
292 RT_NOREF(pVCpu, pvPhys, enmOrigin);
293
294 if (enmAccessType == PGMACCESSTYPE_READ)
295 {
296 switch (pRomPage->enmProt)
297 {
298 /*
299 * Take the default action.
300 */
301 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
302 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
303 case PGMROMPROT_READ_ROM_WRITE_RAM:
304 case PGMROMPROT_READ_RAM_WRITE_RAM:
305 return VINF_PGM_HANDLER_DO_DEFAULT;
306
307 default:
308 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
309 pRom->aPages[iPage].enmProt, iPage, GCPhys),
310 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
311 }
312 }
313 else
314 {
315 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
316 switch (pRomPage->enmProt)
317 {
318 /*
319 * Ignore writes.
320 */
321 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
322 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
323 return VINF_SUCCESS;
324
325 /*
326 * Write to the RAM page.
327 */
328 case PGMROMPROT_READ_ROM_WRITE_RAM:
329 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
330 {
331 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
332 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> GUEST_PAGE_SHIFT) == iPage);
333
334 /*
335 * Take the lock, do lazy allocation, map the page and copy the data.
336 *
337 * Note that we have to bypass the mapping TLB since it works on
338 * guest physical addresses and entering the shadow page would
339 * kind of screw things up...
340 */
341 PGM_LOCK_VOID(pVM);
342
343 PPGMPAGE pShadowPage = &pRomPage->Shadow;
344 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
345 {
346 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
347 AssertLogRelMsgReturnStmt(pShadowPage, ("%RGp\n", GCPhys), PGM_UNLOCK(pVM), VERR_PGM_PHYS_PAGE_GET_IPE);
348 }
349
350 void *pvDstPage;
351 int rc;
352#if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
353 if (PGM_IS_IN_NEM_MODE(pVM) && PGMROMPROT_IS_ROM(pRomPage->enmProt))
354 {
355 pvDstPage = &pRom->pbR3Alternate[GCPhys - pRom->GCPhys];
356 rc = VINF_SUCCESS;
357 }
358 else
359#endif
360 {
361 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK, &pvDstPage);
362 if (RT_SUCCESS(rc))
363 pvDstPage = (uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK);
364 }
365 if (RT_SUCCESS(rc))
366 {
367 memcpy((uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK), pvBuf, cbBuf);
368 pRomPage->LiveSave.fWrittenTo = true;
369
370 AssertMsg( rc == VINF_SUCCESS
371 || ( rc == VINF_PGM_SYNC_CR3
372 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
373 , ("%Rrc\n", rc));
374 rc = VINF_SUCCESS;
375 }
376
377 PGM_UNLOCK(pVM);
378 return rc;
379 }
380
381 default:
382 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
383 pRom->aPages[iPage].enmProt, iPage, GCPhys),
384 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
385 }
386 }
387}
388
389
390/**
391 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler.
392 */
393static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uint64_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr)
394{
395 /*
396 * Get the MMIO2 range.
397 */
398 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges), VERR_INTERNAL_ERROR_3);
399 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3);
400 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[hMmio2 - 1];
401 Assert(pMmio2->idMmio2 == hMmio2);
402 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
403 VERR_INTERNAL_ERROR_4);
404
405 /*
406 * Get the page and make sure it's an MMIO2 page.
407 */
408 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
409 AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR);
410 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR);
411
412 /*
413 * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty.
414 * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single
415 * page is dirty, saving the need for additional storage (bitmap).)
416 */
417 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY;
418
419 /*
420 * Disable the handler for this page.
421 */
422 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->GCPhys, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
423 AssertRC(rc);
424#ifndef IN_RING3
425 if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0)
426 {
427 rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
428 AssertMsgReturn(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT,
429 ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc);
430 }
431#else
432 RT_NOREF(pVCpu, GCPtr);
433#endif
434 return VINF_SUCCESS;
435}
436
437
438#ifndef IN_RING3
439/**
440 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
441 * \#PF access handler callback for guest MMIO2 dirty page tracing.}
442 *
443 * @remarks The @a uUser is the MMIO2 index.
444 */
445DECLCALLBACK(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
446 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
447{
448 RT_NOREF(pVCpu, uErrorCode, pCtx);
449 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
450 if (RT_SUCCESS(rcStrict))
451 {
452 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhysFault, pvFault);
453 PGM_UNLOCK(pVM);
454 }
455 return rcStrict;
456}
457#endif /* !IN_RING3 */
458
459
460/**
461 * @callback_method_impl{FNPGMPHYSHANDLER,
462 * Access handler callback for MMIO2 dirty page tracing.}
463 *
464 * @remarks The @a uUser is the MMIO2 index.
465 */
466DECLCALLBACK(VBOXSTRICTRC)
467pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
468 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
469{
470 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
471 if (RT_SUCCESS(rcStrict))
472 {
473 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhys, ~(RTGCPTR)0);
474 PGM_UNLOCK(pVM);
475 if (rcStrict == VINF_SUCCESS)
476 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
477 }
478 RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
479 return rcStrict;
480}
481
482
483
484/*********************************************************************************************************************************
485* RAM Ranges *
486*********************************************************************************************************************************/
487
488#ifdef VBOX_STRICT
489/**
490 * Asserts that the RAM range structures are sane.
491 */
492DECLHIDDEN(bool) pgmPhysAssertRamRangesLocked(PVMCC pVM, bool fInUpdate, bool fRamRelaxed)
493{
494 bool fRet = true;
495
496 /*
497 * Check the generation ID. This is stable since we own the PGM lock.
498 */
499 AssertStmt((pVM->pgm.s.RamRangeUnion.idGeneration & 1U) == (unsigned)fInUpdate, fRet = false);
500
501 /*
502 * Check the entry count and max ID.
503 */
504 uint32_t const idRamRangeMax = pVM->pgm.s.idRamRangeMax;
505 /* Since this is set to the highest ID, it cannot be the same as the table size. */
506 AssertStmt(idRamRangeMax < RT_ELEMENTS(pVM->pgm.s.apRamRanges), fRet = false);
507
508 /* Because ID=0 is reserved, it's one less than the table size and at most the
509 same as the max ID. */
510 uint32_t const cLookupEntries = pVM->pgm.s.RamRangeUnion.cLookupEntries;
511 AssertStmt(cLookupEntries < RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup), fRet = false);
512 AssertStmt(cLookupEntries <= idRamRangeMax, fRet = false);
513
514 /*
515 * Check the pointer table(s).
516 */
517 /* The first entry shall be empty. */
518 AssertStmt(pVM->pgm.s.apRamRanges[0] == NULL, fRet = false);
519# ifdef IN_RING0
520 AssertStmt(pVM->pgmr0.s.apRamRanges[0] == NULL, fRet = false);
521 AssertStmt(pVM->pgmr0.s.acRamRangePages[0] == 0, fRet = false);
522# endif
523
524 uint32_t cMappedRanges = 0;
525 for (uint32_t idRamRange = 1; idRamRange <= idRamRangeMax; idRamRange++)
526 {
527# ifdef IN_RING0
528 PPGMRAMRANGE const pRamRange = pVM->pgmr0.s.apRamRanges[idRamRange];
529 AssertContinueStmt(pRamRange, fRet = false);
530 AssertStmt(pVM->pgm.s.apRamRanges[idRamRange] != NIL_RTR3PTR, fRet = false);
531 AssertStmt( (pRamRange->cb >> GUEST_PAGE_SHIFT) == pVM->pgmr0.s.acRamRangePages[idRamRange]
532 || ( (pRamRange->cb >> GUEST_PAGE_SHIFT) < pVM->pgmr0.s.acRamRangePages[idRamRange]
533 && !(pRamRange->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX)),
534 fRet = false);
535# else
536 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apRamRanges[idRamRange];
537 AssertContinueStmt(pRamRange, fRet = false);
538# endif
539 AssertStmt(pRamRange->idRange == idRamRange, fRet = false);
540 if (pRamRange->GCPhys != NIL_RTGCPHYS)
541 {
542 cMappedRanges++;
543 AssertStmt((pRamRange->GCPhys & GUEST_PAGE_OFFSET_MASK) == 0, fRet = false);
544 AssertStmt((pRamRange->GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, fRet = false);
545 AssertStmt(pRamRange->GCPhysLast > pRamRange->GCPhys, fRet = false);
546 AssertStmt(pRamRange->GCPhysLast - pRamRange->GCPhys + 1U == pRamRange->cb, fRet = false);
547 }
548 else
549 {
550 AssertStmt(pRamRange->GCPhysLast == NIL_RTGCPHYS, fRet = false);
551 AssertStmt(PGM_RAM_RANGE_IS_AD_HOC(pRamRange) || fRamRelaxed, fRet = false);
552 }
553 }
554
555 /*
556 * Check that the lookup table is sorted and contains the right information.
557 */
558 AssertMsgStmt(cMappedRanges == cLookupEntries,
559 ("cMappedRanges=%#x cLookupEntries=%#x\n", cMappedRanges, cLookupEntries),
560 fRet = false);
561 RTGCPHYS GCPhysPrev = ~(RTGCPHYS)0;
562 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++)
563 {
564 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
565 AssertContinueStmt(idRamRange > 0 && idRamRange <= idRamRangeMax, fRet = false);
566 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm,pgmr0,pgmrc).s.apRamRanges[idRamRange];
567 AssertContinueStmt(pRamRange, fRet = false);
568
569 AssertStmt(pRamRange->idRange == idRamRange, fRet = false);
570 AssertStmt(pRamRange->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]),
571 fRet = false);
572 AssertStmt(pRamRange->GCPhysLast == pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast, fRet = false);
573
574 AssertStmt(pRamRange->GCPhys >= GCPhysPrev + 1U, fRet = false);
575 GCPhysPrev = pRamRange->GCPhysLast;
576 }
577
578 return fRet;
579}
580#endif /* VBOX_STRICT */
581
582
583/**
584 * Invalidates the RAM range TLBs.
585 *
586 * @param pVM The cross context VM structure.
587 */
588void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
589{
590 PGM_LOCK_VOID(pVM);
591
592 /* This is technically only required when freeing the PCNet MMIO2 range
593 during ancient saved state loading. The code freeing the RAM range
594 will make sure this function is called in both rings. */
595 RT_ZERO(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb);
596 VMCC_FOR_EACH_VMCPU_STMT(pVM, RT_ZERO(pVCpu->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb));
597
598 PGM_UNLOCK(pVM);
599}
600
601
602/**
603 * Tests if a value of type RTGCPHYS is negative if the type had been signed
604 * instead of unsigned.
605 *
606 * @returns @c true if negative, @c false if positive or zero.
607 * @param a_GCPhys The value to test.
608 * @todo Move me to iprt/types.h.
609 */
610#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
611
612
613/**
614 * Slow worker for pgmPhysGetRange.
615 *
616 * @copydoc pgmPhysGetRange
617 * @note Caller owns the PGM lock.
618 */
619DECLHIDDEN(PPGMRAMRANGE) pgmPhysGetRangeSlow(PVMCC pVM, RTGCPHYS GCPhys)
620{
621 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
622
623 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
624 uint32_t idxStart = 0;
625 for (;;)
626 {
627 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2;
628 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
629 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst;
630 RTGCPHYS const off = GCPhys - GCPhysEntryFirst;
631 if (off <= cbEntryMinus1)
632 {
633 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
634 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), NULL);
635 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
636 Assert(pRamRange);
637 pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange;
638 return pRamRange;
639 }
640 if (RTGCPHYS_IS_NEGATIVE(off))
641 {
642 if (idxStart < idxLookup)
643 idxEnd = idxLookup;
644 else
645 break;
646 }
647 else
648 {
649 idxLookup += 1;
650 if (idxLookup < idxEnd)
651 idxStart = idxLookup;
652 else
653 break;
654 }
655 }
656 return NULL;
657}
658
659
660/**
661 * Slow worker for pgmPhysGetRangeAtOrAbove.
662 *
663 * @copydoc pgmPhysGetRangeAtOrAbove
664 */
665DECLHIDDEN(PPGMRAMRANGE) pgmPhysGetRangeAtOrAboveSlow(PVMCC pVM, RTGCPHYS GCPhys)
666{
667 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
668
669 uint32_t idRamRangeLastLeft = UINT32_MAX;
670 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
671 uint32_t idxStart = 0;
672 for (;;)
673 {
674 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2;
675 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
676 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst;
677 RTGCPHYS const off = GCPhys - GCPhysEntryFirst;
678 if (off <= cbEntryMinus1)
679 {
680 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
681 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), NULL);
682 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
683 Assert(pRamRange);
684 pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange;
685 return pRamRange;
686 }
687 if (RTGCPHYS_IS_NEGATIVE(off))
688 {
689 idRamRangeLastLeft = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
690 if (idxStart < idxLookup)
691 idxEnd = idxLookup;
692 else
693 break;
694 }
695 else
696 {
697 idxLookup += 1;
698 if (idxLookup < idxEnd)
699 idxStart = idxLookup;
700 else
701 break;
702 }
703 }
704 if (idRamRangeLastLeft != UINT32_MAX)
705 {
706 AssertReturn(idRamRangeLastLeft < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), NULL);
707 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRangeLastLeft];
708 Assert(pRamRange);
709 return pRamRange;
710 }
711 return NULL;
712}
713
714
715/**
716 * Slow worker for pgmPhysGetPage.
717 *
718 * @copydoc pgmPhysGetPage
719 */
720DECLHIDDEN(PPGMPAGE) pgmPhysGetPageSlow(PVMCC pVM, RTGCPHYS GCPhys)
721{
722 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
723
724 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
725 uint32_t idxStart = 0;
726 for (;;)
727 {
728 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2;
729 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
730 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst;
731 RTGCPHYS const off = GCPhys - GCPhysEntryFirst;
732 if (off <= cbEntryMinus1)
733 {
734 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
735 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), NULL);
736 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
737 AssertReturn(pRamRange, NULL);
738 pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange;
739
740 /* Get the page. */
741 Assert(off < pRamRange->cb);
742 RTGCPHYS const idxPage = off >> GUEST_PAGE_SHIFT;
743#ifdef IN_RING0
744 AssertReturn(idxPage < pVM->pgmr0.s.acRamRangePages[idRamRange], NULL);
745#endif
746 return &pRamRange->aPages[idxPage];
747 }
748 if (RTGCPHYS_IS_NEGATIVE(off))
749 {
750 if (idxStart < idxLookup)
751 idxEnd = idxLookup;
752 else
753 break;
754 }
755 else
756 {
757 idxLookup += 1;
758 if (idxLookup < idxEnd)
759 idxStart = idxLookup;
760 else
761 break;
762 }
763 }
764 return NULL;
765}
766
767
768/**
769 * Slow worker for pgmPhysGetPageEx.
770 *
771 * @copydoc pgmPhysGetPageEx
772 */
773DECLHIDDEN(int) pgmPhysGetPageExSlow(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
774{
775 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
776
777 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
778 uint32_t idxStart = 0;
779 for (;;)
780 {
781 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2;
782 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
783 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst;
784 RTGCPHYS const off = GCPhys - GCPhysEntryFirst;
785 if (off <= cbEntryMinus1)
786 {
787 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
788 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), VERR_PGM_PHYS_RAM_LOOKUP_IPE);
789 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
790 AssertReturn(pRamRange, VERR_PGM_PHYS_RAM_LOOKUP_IPE);
791 pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange;
792
793 /* Get the page. */
794 Assert(off < pRamRange->cb);
795 RTGCPHYS const idxPage = off >> GUEST_PAGE_SHIFT;
796#ifdef IN_RING0
797 AssertReturn(idxPage < pVM->pgmr0.s.acRamRangePages[idRamRange], VERR_PGM_PHYS_RAM_LOOKUP_IPE);
798#endif
799 *ppPage = &pRamRange->aPages[idxPage];
800 return VINF_SUCCESS;
801 }
802 if (RTGCPHYS_IS_NEGATIVE(off))
803 {
804 if (idxStart < idxLookup)
805 idxEnd = idxLookup;
806 else
807 break;
808 }
809 else
810 {
811 idxLookup += 1;
812 if (idxLookup < idxEnd)
813 idxStart = idxLookup;
814 else
815 break;
816 }
817 }
818
819 *ppPage = NULL;
820 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
821}
822
823
824/**
825 * Slow worker for pgmPhysGetPageAndRangeEx.
826 *
827 * @copydoc pgmPhysGetPageAndRangeEx
828 */
829DECLHIDDEN(int) pgmPhysGetPageAndRangeExSlow(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
830{
831 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
832
833 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
834 uint32_t idxStart = 0;
835 for (;;)
836 {
837 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2;
838 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
839 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst;
840 RTGCPHYS const off = GCPhys - GCPhysEntryFirst;
841 if (off <= cbEntryMinus1)
842 {
843 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
844 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), VERR_PGM_PHYS_RAM_LOOKUP_IPE);
845 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
846 AssertReturn(pRamRange, VERR_PGM_PHYS_RAM_LOOKUP_IPE);
847 pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange;
848
849 /* Get the page. */
850 Assert(off < pRamRange->cb);
851 RTGCPHYS const idxPage = off >> GUEST_PAGE_SHIFT;
852#ifdef IN_RING0
853 AssertReturn(idxPage < pVM->pgmr0.s.acRamRangePages[idRamRange], VERR_PGM_PHYS_RAM_LOOKUP_IPE);
854#endif
855 *ppRam = pRamRange;
856 *ppPage = &pRamRange->aPages[idxPage];
857 return VINF_SUCCESS;
858 }
859 if (RTGCPHYS_IS_NEGATIVE(off))
860 {
861 if (idxStart < idxLookup)
862 idxEnd = idxLookup;
863 else
864 break;
865 }
866 else
867 {
868 idxLookup += 1;
869 if (idxLookup < idxEnd)
870 idxStart = idxLookup;
871 else
872 break;
873 }
874 }
875
876 *ppRam = NULL;
877 *ppPage = NULL;
878 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
879}
880
881
882/**
883 * Slow worker for pgmPhysGetPageAndRangeExLockless.
884 *
885 * @copydoc pgmPhysGetPageAndRangeExLockless
886 */
887DECLHIDDEN(int) pgmPhysGetPageAndRangeExSlowLockless(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys,
888 PGMPAGE volatile **ppPage, PGMRAMRANGE volatile **ppRam)
889{
890 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,RamRangeTlbMisses));
891
892 PGM::PGMRAMRANGEGENANDLOOKUPCOUNT RamRangeUnion;
893 RamRangeUnion.u64Combined = ASMAtomicUoReadU64(&pVM->pgm.s.RamRangeUnion.u64Combined);
894
895 uint32_t idxEnd = RT_MIN(RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
896 uint32_t idxStart = 0;
897 for (;;)
898 {
899 /* Read the entry as atomically as possible: */
900 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2;
901 PGMRAMRANGELOOKUPENTRY Entry;
902#if (RTASM_HAVE_READ_U128+0) & 1
903 Entry.u128Normal = ASMAtomicUoReadU128U(&pVM->pgm.s.aRamRangeLookup[idxLookup].u128Volatile);
904#else
905 Entry.u128Normal.s.Lo = pVM->pgm.s.aRamRangeLookup[idxLookup].u128Volatile.s.Lo;
906 Entry.u128Normal.s.Hi = pVM->pgm.s.aRamRangeLookup[idxLookup].u128Volatile.s.Hi;
907 ASMCompilerBarrier(); /*paranoia^2*/
908 if (RT_LIKELY(Entry.u128Normal.s.Lo == pVM->pgm.s.aRamRangeLookup[idxLookup].u128Volatile.s.Lo))
909 { /* likely */ }
910 else
911 break;
912#endif
913
914 /* Check how GCPhys relates to the entry: */
915 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(Entry);
916 RTGCPHYS const cbEntryMinus1 = Entry.GCPhysLast - GCPhysEntryFirst;
917 RTGCPHYS const off = GCPhys - GCPhysEntryFirst;
918 if (off <= cbEntryMinus1)
919 {
920 /* We seem to have a match. If, however, anything doesn't match up
921 bail and redo owning the lock. No asserting here as we may be
922 racing removal/insertion. */
923 if (!RTGCPHYS_IS_NEGATIVE(off))
924 {
925 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(Entry);
926 if (idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges))
927 {
928 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
929 if (pRamRange)
930 {
931 if ( pRamRange->GCPhys == GCPhysEntryFirst
932 && pRamRange->cb == cbEntryMinus1 + 1U)
933 {
934 RTGCPHYS const idxPage = off >> GUEST_PAGE_SHIFT;
935#ifdef IN_RING0
936 if (idxPage < pVM->pgmr0.s.acRamRangePages[idRamRange])
937#endif
938 {
939 pVCpu->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange;
940 *ppRam = pRamRange;
941 *ppPage = &pRamRange->aPages[idxPage];
942 return VINF_SUCCESS;
943 }
944 }
945 }
946 }
947 }
948 break;
949 }
950 if (RTGCPHYS_IS_NEGATIVE(off))
951 {
952 if (idxStart < idxLookup)
953 idxEnd = idxLookup;
954 else
955 break;
956 }
957 else
958 {
959 idxLookup += 1;
960 if (idxLookup < idxEnd)
961 idxStart = idxLookup;
962 else
963 break;
964 }
965 }
966
967 /*
968 * If we get down here, we do the lookup again but while owning the PGM lock.
969 */
970 *ppRam = NULL;
971 *ppPage = NULL;
972 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,RamRangeTlbLocking));
973
974 PGM_LOCK_VOID(pVM);
975 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, (PPGMPAGE *)ppPage, (PPGMRAMRANGE *)ppRam);
976 PGM_UNLOCK(pVM);
977
978 PGMRAMRANGE volatile * const pRam = *ppRam;
979 if (pRam)
980 pVCpu->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = (PPGMRAMRANGE)pRam;
981 return rc;
982}
983
984
985/**
986 * Common worker for pgmR3PhysAllocateRamRange, PGMR0PhysAllocateRamRangeReq,
987 * and pgmPhysMmio2RegisterWorker2.
988 */
989DECLHIDDEN(int) pgmPhysRamRangeAllocCommon(PVMCC pVM, uint32_t cPages, uint32_t fFlags, uint32_t *pidNewRange)
990{
991
992 /*
993 * Allocate the RAM range structure and map it into ring-3.
994 */
995 size_t const cbRamRange = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]), HOST_PAGE_SIZE);
996#ifdef IN_RING0
997 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
998 int rc = RTR0MemObjAllocPage(&hMemObj, cbRamRange, false /*fExecutable*/);
999#else
1000 PPGMRAMRANGE pRamRange;
1001 int rc = SUPR3PageAlloc(cbRamRange >> HOST_PAGE_SHIFT, 0 /*fFlags*/, (void **)&pRamRange);
1002#endif
1003 if (RT_SUCCESS(rc))
1004 {
1005 /* Zero the memory and do basic range init before mapping it into userland. */
1006#ifdef IN_RING0
1007 PPGMRAMRANGE const pRamRange = (PPGMRAMRANGE)RTR0MemObjAddress(hMemObj);
1008 if (!RTR0MemObjWasZeroInitialized(hMemObj))
1009#endif
1010 RT_BZERO(pRamRange, cbRamRange);
1011
1012 pRamRange->GCPhys = NIL_RTGCPHYS;
1013 pRamRange->cb = (RTGCPHYS)cPages << GUEST_PAGE_SHIFT;
1014 pRamRange->GCPhysLast = NIL_RTGCPHYS;
1015 pRamRange->fFlags = fFlags;
1016 pRamRange->idRange = UINT32_MAX / 2;
1017
1018#ifdef IN_RING0
1019 /* Map it into userland. */
1020 RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ;
1021 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0 /*uAlignment*/,
1022 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
1023 if (RT_SUCCESS(rc))
1024#endif
1025 {
1026 /*
1027 * Grab the lock (unlikely to fail or block as caller typically owns it already).
1028 */
1029 rc = PGM_LOCK(pVM);
1030 if (RT_SUCCESS(rc))
1031 {
1032 /*
1033 * Allocate a range ID.
1034 */
1035 uint32_t idRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.idRamRangeMax + 1;
1036 if (idRamRange != 0 && idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges))
1037 {
1038#ifdef IN_RING0
1039 if (pVM->pgmr0.s.apRamRanges[idRamRange] == NULL)
1040#endif
1041 {
1042 if (pVM->pgm.s.apRamRanges[idRamRange] == NIL_RTR3PTR)
1043 {
1044 /*
1045 * Commit it.
1046 */
1047#ifdef IN_RING0
1048 pVM->pgmr0.s.apRamRanges[idRamRange] = pRamRange;
1049 pVM->pgmr0.s.acRamRangePages[idRamRange] = cPages;
1050 pVM->pgmr0.s.ahRamRangeMemObjs[idRamRange] = hMemObj;
1051 pVM->pgmr0.s.ahRamRangeMapObjs[idRamRange] = hMapObj;
1052 pVM->pgmr0.s.idRamRangeMax = idRamRange;
1053#endif
1054
1055 pVM->pgm.s.idRamRangeMax = idRamRange;
1056#ifdef IN_RING0
1057 pVM->pgm.s.apRamRanges[idRamRange] = RTR0MemObjAddressR3(hMapObj);
1058#else
1059 pVM->pgm.s.apRamRanges[idRamRange] = pRamRange;
1060#endif
1061
1062 pRamRange->idRange = idRamRange;
1063 *pidNewRange = idRamRange;
1064
1065 PGM_UNLOCK(pVM);
1066 return VINF_SUCCESS;
1067 }
1068 }
1069
1070 /*
1071 * Bail out.
1072 */
1073 rc = VERR_INTERNAL_ERROR_5;
1074 }
1075 else
1076 rc = VERR_PGM_TOO_MANY_RAM_RANGES;
1077 PGM_UNLOCK(pVM);
1078 }
1079#ifdef IN_RING0
1080 RTR0MemObjFree(hMapObj, false /*fFreeMappings*/);
1081#endif
1082 }
1083#ifdef IN_RING0
1084 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
1085#else
1086 SUPR3PageFree(pRamRange, cbRamRange >> HOST_PAGE_SHIFT);
1087#endif
1088 }
1089 *pidNewRange = UINT32_MAX;
1090 return rc;
1091}
1092
1093
1094#ifdef IN_RING0
1095/**
1096 * This is called during VM initialization to allocate a RAM range.
1097 *
1098 * The range is not entered into the lookup table, that is something the caller
1099 * has to do. The PGMPAGE entries are zero'ed, but otherwise uninitialized.
1100 *
1101 * @returns VBox status code.
1102 * @param pGVM Pointer to the global VM structure.
1103 * @param pReq Where to get the parameters and return the range ID.
1104 * @thread EMT(0)
1105 */
1106VMMR0_INT_DECL(int) PGMR0PhysAllocateRamRangeReq(PGVM pGVM, PPGMPHYSALLOCATERAMRANGEREQ pReq)
1107{
1108 /*
1109 * Validate input (ASSUME pReq is a copy and can't be modified by ring-3
1110 * while we're here).
1111 */
1112 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1113 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x < %#zx\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1114
1115 AssertReturn(pReq->cbGuestPage == GUEST_PAGE_SIZE, VERR_INCOMPATIBLE_CONFIG);
1116
1117 AssertReturn(pReq->cGuestPages > 0, VERR_OUT_OF_RANGE);
1118 AssertReturn(pReq->cGuestPages <= PGM_MAX_PAGES_PER_RAM_RANGE, VERR_OUT_OF_RANGE);
1119
1120 AssertMsgReturn(!(pReq->fFlags & ~(uint32_t)PGM_RAM_RANGE_FLAGS_VALID_MASK), ("fFlags=%#RX32\n", pReq->fFlags),
1121 VERR_INVALID_FLAGS);
1122
1123 /** @todo better VM state guard, enmVMState is ring-3 writable. */
1124 VMSTATE const enmState = pGVM->enmVMState;
1125 AssertMsgReturn(enmState == VMSTATE_CREATING, ("enmState=%d\n", enmState), VERR_VM_INVALID_VM_STATE);
1126 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
1127
1128 /*
1129 * Call common worker.
1130 */
1131 return pgmPhysRamRangeAllocCommon(pGVM, pReq->cGuestPages, pReq->fFlags, &pReq->idNewRange);
1132}
1133#endif /* IN_RING0 */
1134
1135
1136/**
1137 * Frees a RAM range.
1138 *
1139 * This is not a typical occurence. Currently only used for a special MMIO2
1140 * saved state compatibility scenario involving PCNet and state saved before
1141 * VBox v4.3.6.
1142 */
1143static int pgmPhysRamRangeFree(PVMCC pVM, PPGMRAMRANGE pRamRange)
1144{
1145 /*
1146 * Some basic input validation.
1147 */
1148 AssertPtrReturn(pRamRange, VERR_INVALID_PARAMETER);
1149 uint32_t const idRamRange = ASMAtomicReadU32(&pRamRange->idRange);
1150 ASMCompilerBarrier();
1151 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), VERR_INVALID_PARAMETER);
1152 AssertReturn(pRamRange == pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange], VERR_INVALID_PARAMETER);
1153 AssertReturn(pRamRange->GCPhys == NIL_RTGCPHYS, VERR_RESOURCE_BUSY);
1154
1155 /*
1156 * Kill the range pointers and associated data.
1157 */
1158 pVM->pgm.s.apRamRanges[idRamRange] = NIL_RTR3PTR;
1159#ifdef IN_RING0
1160 pVM->pgmr0.s.apRamRanges[idRamRange] = NULL;
1161#endif
1162
1163 /*
1164 * Zap the pages and other RAM ranges properties to ensure there aren't any
1165 * stale references to anything hanging around should the freeing go awry.
1166 */
1167#ifdef IN_RING0
1168 uint32_t const cPages = pVM->pgmr0.s.acRamRangePages[idRamRange];
1169 pVM->pgmr0.s.acRamRangePages[idRamRange] = 0;
1170#else
1171 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT;
1172#endif
1173 RT_BZERO(pRamRange->aPages, cPages * sizeof(pRamRange->aPages[0]));
1174
1175 pRamRange->fFlags = UINT32_MAX;
1176 pRamRange->cb = NIL_RTGCPHYS;
1177 pRamRange->pbR3 = NIL_RTR3PTR;
1178 pRamRange->pszDesc = NIL_RTR3PTR;
1179 pRamRange->paLSPages = NIL_RTR3PTR;
1180 pRamRange->idRange = UINT32_MAX / 8;
1181
1182 /*
1183 * Free the RAM range itself.
1184 */
1185#ifdef IN_RING0
1186 Assert(pVM->pgmr0.s.ahRamRangeMapObjs[idRamRange] != NIL_RTR0MEMOBJ);
1187 int rc = RTR0MemObjFree(pVM->pgmr0.s.ahRamRangeMapObjs[idRamRange], true /*fFreeMappings*/);
1188 if (RT_SUCCESS(rc))
1189 {
1190 pVM->pgmr0.s.ahRamRangeMapObjs[idRamRange] = NIL_RTR0MEMOBJ;
1191 rc = RTR0MemObjFree(pVM->pgmr0.s.ahRamRangeMemObjs[idRamRange], true /*fFreeMappings*/);
1192 if (RT_SUCCESS(rc))
1193 pVM->pgmr0.s.ahRamRangeMemObjs[idRamRange] = NIL_RTR0MEMOBJ;
1194 }
1195#else
1196 size_t const cbRamRange = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]), HOST_PAGE_SIZE);
1197 int rc = SUPR3PageFree(pRamRange, cbRamRange >> HOST_PAGE_SHIFT);
1198#endif
1199
1200 /*
1201 * Decrease the max ID if removal was successful and this was the final
1202 * RAM range entry.
1203 */
1204 if ( RT_SUCCESS(rc)
1205 && idRamRange == pVM->CTX_EXPR(pgm, pgmr0, pgm).s.idRamRangeMax)
1206 {
1207 pVM->pgm.s.idRamRangeMax = idRamRange - 1;
1208#ifdef IN_RING0
1209 pVM->pgmr0.s.idRamRangeMax = idRamRange - 1;
1210#endif
1211 }
1212
1213 /*
1214 * Make sure the RAM range TLB does not contain any stale pointers to this range.
1215 */
1216 pgmPhysInvalidRamRangeTlbs(pVM);
1217 return rc;
1218}
1219
1220
1221
1222/*********************************************************************************************************************************
1223* MMIO2 *
1224*********************************************************************************************************************************/
1225
1226/**
1227 * Calculates the number of chunks
1228 *
1229 * @returns Number of registration chunk needed.
1230 * @param cb The size of the MMIO/MMIO2 range.
1231 * @param pcPagesPerChunk Where to return the number of guest pages tracked by
1232 * each chunk. Optional.
1233 */
1234DECLHIDDEN(uint16_t) pgmPhysMmio2CalcChunkCount(RTGCPHYS cb, uint32_t *pcPagesPerChunk)
1235{
1236 /*
1237 * This is the same calculation as PGMR3PhysRegisterRam does, except we'll be
1238 * needing a few bytes extra the PGMREGMMIO2RANGE structure.
1239 *
1240 * Note! In additions, we've got a 24 bit sub-page range for MMIO2 ranges, leaving
1241 * us with an absolute maximum of 16777215 pages per chunk (close to 64 GB).
1242 */
1243 AssertCompile(PGM_MAX_PAGES_PER_RAM_RANGE < _16M);
1244 uint32_t const cPagesPerChunk = PGM_MAX_PAGES_PER_RAM_RANGE;
1245
1246 if (pcPagesPerChunk)
1247 *pcPagesPerChunk = cPagesPerChunk;
1248
1249 /* Calc the number of chunks we need. */
1250 RTGCPHYS const cGuestPages = cb >> GUEST_PAGE_SHIFT;
1251 uint16_t cChunks = (uint16_t)((cGuestPages + cPagesPerChunk - 1) / cPagesPerChunk);
1252#ifdef IN_RING3
1253 AssertRelease((RTGCPHYS)cChunks * cPagesPerChunk >= cGuestPages);
1254#else
1255 AssertReturn((RTGCPHYS)cChunks * cPagesPerChunk >= cGuestPages, 0);
1256#endif
1257 return cChunks;
1258}
1259
1260
1261/**
1262 * Worker for PGMR3PhysMmio2Register and PGMR0PhysMmio2RegisterReq.
1263 *
1264 * (The caller already know which MMIO2 region ID will be assigned and how many
1265 * chunks will be used, so no output parameters required.)
1266 */
1267DECLHIDDEN(int) pgmPhysMmio2RegisterWorker(PVMCC pVM, uint32_t const cGuestPages, uint8_t const idMmio2,
1268 const uint8_t cChunks, PPDMDEVINSR3 const pDevIns, uint8_t
1269 const iSubDev, uint8_t const iRegion, uint32_t const fFlags)
1270{
1271 /*
1272 * Get the number of pages per chunk.
1273 */
1274 uint32_t cGuestPagesPerChunk;
1275 AssertReturn(pgmPhysMmio2CalcChunkCount((RTGCPHYS)cGuestPages << GUEST_PAGE_SHIFT, &cGuestPagesPerChunk) == cChunks,
1276 VERR_PGM_PHYS_MMIO_EX_IPE);
1277 Assert(idMmio2 != 0);
1278
1279 /*
1280 * The first thing we need to do is the allocate the memory that will be
1281 * backing the whole range.
1282 */
1283 RTGCPHYS const cbMmio2Backing = (RTGCPHYS)cGuestPages << GUEST_PAGE_SHIFT;
1284 uint32_t const cHostPages = (cbMmio2Backing + HOST_PAGE_SIZE - 1U) >> HOST_PAGE_SHIFT;
1285 size_t const cbMmio2Aligned = cHostPages << HOST_PAGE_SHIFT;
1286 R3PTRTYPE(uint8_t *) pbMmio2BackingR3 = NIL_RTR3PTR;
1287#ifdef IN_RING0
1288 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
1289# ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1290 int rc = RTR0MemObjAllocPage(&hMemObj, cbMmio2Aligned, false /*fExecutable*/);
1291# else
1292 int rc = RTR0MemObjAllocPhysNC(&hMemObj, cbMmio2Aligned, NIL_RTHCPHYS);
1293# endif
1294#else /* !IN_RING0 */
1295 AssertReturn(PGM_IS_IN_NEM_MODE(pVM), VERR_INTERNAL_ERROR_4);
1296 int rc = SUPR3PageAlloc(cHostPages, pVM->pgm.s.fUseLargePages ? SUP_PAGE_ALLOC_F_LARGE_PAGES : 0, (void **)&pbMmio2BackingR3);
1297#endif /* !IN_RING0 */
1298 if (RT_SUCCESS(rc))
1299 {
1300 /*
1301 * Make sure it's is initialized to zeros before it's mapped to userland.
1302 */
1303#ifdef IN_RING0
1304# ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1305 uint8_t *pbMmio2BackingR0 = (uint8_t *)RTR0MemObjAddress(hMemObj);
1306 AssertPtr(pbMmio2BackingR0);
1307# endif
1308 rc = RTR0MemObjZeroInitialize(hMemObj, false /*fForce*/);
1309 AssertRCReturnStmt(rc, RTR0MemObjFree(hMemObj, true /*fFreeMappings*/), rc);
1310#else
1311 RT_BZERO(pbMmio2BackingR3, cbMmio2Aligned);
1312#endif
1313
1314#ifdef IN_RING0
1315 /*
1316 * Map it into ring-3.
1317 */
1318 RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ;
1319 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
1320 if (RT_SUCCESS(rc))
1321 {
1322 pbMmio2BackingR3 = RTR0MemObjAddressR3(hMapObj);
1323#endif
1324
1325 /*
1326 * Create the MMIO2 registration records and associated RAM ranges.
1327 * The RAM range allocation may fail here.
1328 */
1329 RTGCPHYS offMmio2Backing = 0;
1330 uint32_t cGuestPagesLeft = cGuestPages;
1331 for (uint32_t iChunk = 0, idx = idMmio2 - 1; iChunk < cChunks; iChunk++, idx++)
1332 {
1333 uint32_t const cPagesTrackedByChunk = RT_MIN(cGuestPagesLeft, cGuestPagesPerChunk);
1334
1335 /*
1336 * Allocate the RAM range for this chunk.
1337 */
1338 uint32_t idRamRange = UINT32_MAX;
1339 rc = pgmPhysRamRangeAllocCommon(pVM, cPagesTrackedByChunk, PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX, &idRamRange);
1340 if (RT_FAILURE(rc))
1341 {
1342 /* We only zap the pointers to the backing storage.
1343 PGMR3Term and friends will clean up the RAM ranges and stuff. */
1344 while (iChunk-- > 0)
1345 {
1346 idx--;
1347#ifdef IN_RING0
1348 pVM->pgmr0.s.acMmio2RangePages[idx] = 0;
1349# ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1350 pVM->pgmr0.s.apbMmio2Backing[idx] = NULL;
1351# endif
1352#endif
1353
1354 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
1355 pMmio2->pbR3 = NIL_RTR3PTR;
1356
1357 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apMmio2RamRanges[idx];
1358 pRamRange->pbR3 = NIL_RTR3PTR;
1359 RT_BZERO(&pRamRange->aPages[0], sizeof(pRamRange->aPages) * cGuestPagesPerChunk);
1360 }
1361 break;
1362 }
1363
1364 pVM->pgm.s.apMmio2RamRanges[idx] = pVM->pgm.s.apRamRanges[idRamRange];
1365#ifdef IN_RING0
1366 pVM->pgmr0.s.apMmio2RamRanges[idx] = pVM->pgmr0.s.apRamRanges[idRamRange];
1367 pVM->pgmr0.s.acMmio2RangePages[idx] = cPagesTrackedByChunk;
1368#endif
1369
1370 /* Initialize the RAM range. */
1371 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
1372 pRamRange->pbR3 = pbMmio2BackingR3 + offMmio2Backing;
1373 uint32_t iDstPage = cPagesTrackedByChunk;
1374#ifdef IN_RING0
1375 AssertRelease(HOST_PAGE_SHIFT == GUEST_PAGE_SHIFT);
1376 while (iDstPage-- > 0)
1377 {
1378 RTHCPHYS HCPhys = RTR0MemObjGetPagePhysAddr(hMemObj, iDstPage + (offMmio2Backing >> HOST_PAGE_SHIFT));
1379 Assert(HCPhys != NIL_RTHCPHYS);
1380 PGM_PAGE_INIT(&pRamRange->aPages[iDstPage], HCPhys, PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage),
1381 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
1382 }
1383#else
1384 Assert(PGM_IS_IN_NEM_MODE(pVM));
1385 while (iDstPage-- > 0)
1386 PGM_PAGE_INIT(&pRamRange->aPages[iDstPage], UINT64_C(0x0000ffffffff0000),
1387 PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage),
1388 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
1389#endif
1390
1391 /*
1392 * Initialize the MMIO2 registration structure.
1393 */
1394 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
1395 pMmio2->pDevInsR3 = pDevIns;
1396 pMmio2->pbR3 = pbMmio2BackingR3 + offMmio2Backing;
1397 pMmio2->fFlags = 0;
1398 if (iChunk == 0)
1399 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_FIRST_CHUNK;
1400 if (iChunk + 1 == cChunks)
1401 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_LAST_CHUNK;
1402 if (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES)
1403 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES;
1404
1405 pMmio2->iSubDev = iSubDev;
1406 pMmio2->iRegion = iRegion;
1407 pMmio2->idSavedState = UINT8_MAX;
1408 pMmio2->idMmio2 = idMmio2 + iChunk;
1409 pMmio2->idRamRange = idRamRange;
1410 Assert(pMmio2->idRamRange == idRamRange);
1411 pMmio2->GCPhys = NIL_RTGCPHYS;
1412 pMmio2->cbReal = (RTGCPHYS)cPagesTrackedByChunk << GUEST_PAGE_SHIFT;
1413 pMmio2->pPhysHandlerR3 = NIL_RTR3PTR; /* Pre-alloc is done by ring-3 caller. */
1414 pMmio2->paLSPages = NIL_RTR3PTR;
1415
1416#if defined(IN_RING0) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1417 pVM->pgmr0.s.apbMmio2Backing[idx] = &pbMmio2BackingR0[offMmio2Backing];
1418#endif
1419
1420 /* Advance */
1421 cGuestPagesLeft -= cPagesTrackedByChunk;
1422 offMmio2Backing += (RTGCPHYS)cPagesTrackedByChunk << GUEST_PAGE_SHIFT;
1423 } /* chunk alloc loop */
1424 Assert(cGuestPagesLeft == 0 || RT_FAILURE_NP(rc));
1425 if (RT_SUCCESS(rc))
1426 {
1427 /*
1428 * Account for pages and ring-0 memory objects.
1429 */
1430 pVM->pgm.s.cAllPages += cGuestPages;
1431 pVM->pgm.s.cPrivatePages += cGuestPages;
1432#ifdef IN_RING0
1433 pVM->pgmr0.s.ahMmio2MemObjs[idMmio2 - 1] = hMemObj;
1434 pVM->pgmr0.s.ahMmio2MapObjs[idMmio2 - 1] = hMapObj;
1435#endif
1436 pVM->pgm.s.cMmio2Ranges = idMmio2 + cChunks - 1U;
1437
1438 /*
1439 * Done!.
1440 */
1441 return VINF_SUCCESS;
1442 }
1443
1444 /*
1445 * Bail.
1446 */
1447#ifdef IN_RING0
1448 RTR0MemObjFree(hMapObj, true /*fFreeMappings*/);
1449 }
1450 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
1451#else
1452 SUPR3PageFree(pbMmio2BackingR3, cHostPages);
1453#endif
1454 }
1455 else
1456 LogRel(("pgmPhysMmio2RegisterWorker: Failed to allocate %RGp bytes of MMIO2 backing memory: %Rrc\n", cbMmio2Aligned, rc));
1457 return rc;
1458}
1459
1460
1461#ifdef IN_RING0
1462/**
1463 * This is called during VM initialization to create an MMIO2 range.
1464 *
1465 * This does everything except setting the PGMRAMRANGE::pszDesc to a non-zero
1466 * value and preallocating the access handler for dirty bitmap tracking.
1467 *
1468 * The caller already knows which MMIO2 ID will be assigned to the registration
1469 * and how many chunks it requires, so there are no output fields in the request
1470 * structure.
1471 *
1472 * @returns VBox status code.
1473 * @param pGVM Pointer to the global VM structure.
1474 * @param pReq Where to get the parameters.
1475 * @thread EMT(0)
1476 */
1477VMMR0_INT_DECL(int) PGMR0PhysMmio2RegisterReq(PGVM pGVM, PPGMPHYSMMIO2REGISTERREQ pReq)
1478{
1479 /*
1480 * Validate input (ASSUME pReq is a copy and can't be modified by ring-3
1481 * while we're here).
1482 */
1483 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1484 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x < %#zx\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1485
1486 /** @todo better VM state guard, enmVMState is ring-3 writable. */
1487 VMSTATE const enmState = pGVM->enmVMState;
1488 AssertMsgReturn( enmState == VMSTATE_CREATING
1489 || enmState == VMSTATE_LOADING /* pre 4.3.6 state loading needs to ignore a MMIO2 region in PCNet. */
1490 , ("enmState=%d\n", enmState), VERR_VM_INVALID_VM_STATE);
1491 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
1492
1493 AssertReturn(pReq->cbGuestPage == GUEST_PAGE_SIZE, VERR_INCOMPATIBLE_CONFIG);
1494 AssertReturn(GUEST_PAGE_SIZE == HOST_PAGE_SIZE, VERR_INCOMPATIBLE_CONFIG);
1495
1496 AssertReturn(pReq->cGuestPages > 0, VERR_OUT_OF_RANGE);
1497 AssertReturn(pReq->cGuestPages <= PGM_MAX_PAGES_PER_MMIO2_REGION, VERR_OUT_OF_RANGE);
1498 AssertReturn(pReq->cGuestPages <= (MM_MMIO_64_MAX >> GUEST_PAGE_SHIFT), VERR_OUT_OF_RANGE);
1499
1500 AssertMsgReturn(!(pReq->fFlags & ~PGMPHYS_MMIO2_FLAGS_VALID_MASK), ("fFlags=%#x\n", pReq->fFlags), VERR_INVALID_FLAGS);
1501
1502 AssertMsgReturn( pReq->cChunks > 0
1503 && pReq->cChunks < PGM_MAX_MMIO2_RANGES
1504 && pReq->cChunks == pgmPhysMmio2CalcChunkCount((RTGCPHYS)pReq->cGuestPages << GUEST_PAGE_SHIFT, NULL),
1505 ("cChunks=%#x cGuestPages=%#x\n", pReq->cChunks, pReq->cGuestPages),
1506 VERR_INVALID_PARAMETER);
1507
1508 AssertMsgReturn( pReq->idMmio2 != 0
1509 && pReq->idMmio2 <= PGM_MAX_MMIO2_RANGES
1510 && (unsigned)pReq->idMmio2 + pReq->cChunks - 1U <= PGM_MAX_MMIO2_RANGES,
1511 ("idMmio2=%#x cChunks=%#x\n", pReq->idMmio2, pReq->cChunks),
1512 VERR_INVALID_PARAMETER);
1513
1514 for (uint32_t iChunk = 0, idx = pReq->idMmio2 - 1; iChunk < pReq->cChunks; iChunk++, idx++)
1515 {
1516 AssertReturn(pGVM->pgmr0.s.ahMmio2MapObjs[idx] == NIL_RTR0MEMOBJ, VERR_INVALID_STATE);
1517 AssertReturn(pGVM->pgmr0.s.ahMmio2MemObjs[idx] == NIL_RTR0MEMOBJ, VERR_INVALID_STATE);
1518 AssertReturn(pGVM->pgmr0.s.apMmio2RamRanges[idx] == NULL, VERR_INVALID_STATE);
1519 }
1520
1521 /*
1522 * Make sure we're owning the PGM lock (caller should be), recheck idMmio2
1523 * and call the worker function we share with ring-3.
1524 */
1525 int rc = PGM_LOCK(pGVM);
1526 AssertRCReturn(rc, rc);
1527
1528 AssertReturnStmt(pGVM->pgm.s.cMmio2Ranges + 1U == pReq->idMmio2,
1529 PGM_UNLOCK(pGVM), VERR_INVALID_PARAMETER);
1530 AssertReturnStmt(pGVM->pgmr0.s.idRamRangeMax + 1U + pReq->cChunks <= RT_ELEMENTS(pGVM->pgmr0.s.apRamRanges),
1531 PGM_UNLOCK(pGVM), VERR_PGM_TOO_MANY_RAM_RANGES);
1532
1533 rc = pgmPhysMmio2RegisterWorker(pGVM, pReq->cGuestPages, pReq->idMmio2, pReq->cChunks,
1534 pReq->pDevIns, pReq->iSubDev, pReq->iRegion, pReq->fFlags);
1535
1536 PGM_UNLOCK(pGVM);
1537 return rc;
1538}
1539#endif /* IN_RING0 */
1540
1541
1542
1543/**
1544 * Worker for PGMR3PhysMmio2Deregister & PGMR0PhysMmio2DeregisterReq.
1545 */
1546DECLHIDDEN(int) pgmPhysMmio2DeregisterWorker(PVMCC pVM, uint8_t idMmio2, uint8_t cChunks, PPDMDEVINSR3 pDevIns)
1547{
1548 /*
1549 * The caller shall have made sure all this is true, but we check again
1550 * since we're paranoid.
1551 */
1552 AssertReturn(idMmio2 > 0 && idMmio2 <= RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges), VERR_INTERNAL_ERROR_2);
1553 AssertReturn(cChunks >= 1, VERR_INTERNAL_ERROR_2);
1554 uint8_t const idxFirst = idMmio2 - 1U;
1555 AssertReturn(idxFirst + cChunks <= pVM->pgm.s.cMmio2Ranges, VERR_INTERNAL_ERROR_2);
1556 uint32_t cGuestPages = 0; /* (For accounting and calulating backing memory size) */
1557 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++)
1558 {
1559 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 == pDevIns, VERR_NOT_OWNER);
1560 AssertReturn(!(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_MAPPED), VERR_RESOURCE_BUSY);
1561 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].GCPhys == NIL_RTGCPHYS, VERR_INVALID_STATE);
1562 if (iChunk == 0)
1563 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, VERR_INVALID_PARAMETER);
1564 else
1565 AssertReturn(!(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK), VERR_INVALID_PARAMETER);
1566 if (iChunk + 1 == cChunks)
1567 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK, VERR_INVALID_PARAMETER);
1568 else
1569 AssertReturn(!(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), VERR_INVALID_PARAMETER);
1570 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].pPhysHandlerR3 == NIL_RTR3PTR, VERR_INVALID_STATE); /* caller shall free this */
1571
1572#ifdef IN_RING0
1573 cGuestPages += pVM->pgmr0.s.acMmio2RangePages[idx];
1574#else
1575 cGuestPages += pVM->pgm.s.aMmio2Ranges[idx].cbReal >> GUEST_PAGE_SHIFT;
1576#endif
1577
1578 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apMmio2RamRanges[idx];
1579 AssertPtrReturn(pRamRange, VERR_INVALID_STATE);
1580 AssertReturn(pRamRange->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX, VERR_INVALID_STATE);
1581 AssertReturn(pRamRange->GCPhys == NIL_RTGCPHYS, VERR_INVALID_STATE);
1582 AssertReturn(pRamRange->GCPhysLast == NIL_RTGCPHYS, VERR_INVALID_STATE);
1583 }
1584
1585 /*
1586 * Remove everything except the backing memory first. We work the ranges
1587 * in reverse so that we can reduce the max RAM range ID when possible.
1588 */
1589#ifdef IN_RING3
1590 uint8_t * const pbMmio2Backing = pVM->pgm.s.aMmio2Ranges[idxFirst].pbR3;
1591 RTGCPHYS const cbMmio2Backing = RT_ALIGN_T((RTGCPHYS)cGuestPages << GUEST_PAGE_SHIFT, HOST_PAGE_SIZE, RTGCPHYS);
1592#endif
1593
1594 int rc = VINF_SUCCESS;
1595 uint32_t iChunk = cChunks;
1596 while (iChunk-- > 0)
1597 {
1598 uint32_t const idx = idxFirst + iChunk;
1599 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apMmio2RamRanges[idx];
1600
1601 /* Zap the MMIO2 region data. */
1602 pVM->pgm.s.apMmio2RamRanges[idx] = NIL_RTR3PTR;
1603#ifdef IN_RING0
1604 pVM->pgmr0.s.apMmio2RamRanges[idx] = NULL;
1605 pVM->pgmr0.s.acMmio2RangePages[idx] = 0;
1606#endif
1607 pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 = NIL_RTR3PTR;
1608 pVM->pgm.s.aMmio2Ranges[idx].pbR3 = NIL_RTR3PTR;
1609 pVM->pgm.s.aMmio2Ranges[idx].fFlags = 0;
1610 pVM->pgm.s.aMmio2Ranges[idx].iSubDev = UINT8_MAX;
1611 pVM->pgm.s.aMmio2Ranges[idx].iRegion = UINT8_MAX;
1612 pVM->pgm.s.aMmio2Ranges[idx].idSavedState = UINT8_MAX;
1613 pVM->pgm.s.aMmio2Ranges[idx].idMmio2 = UINT8_MAX;
1614 pVM->pgm.s.aMmio2Ranges[idx].idRamRange = UINT16_MAX;
1615 pVM->pgm.s.aMmio2Ranges[idx].GCPhys = NIL_RTGCPHYS;
1616 pVM->pgm.s.aMmio2Ranges[idx].cbReal = 0;
1617 pVM->pgm.s.aMmio2Ranges[idx].pPhysHandlerR3 = NIL_RTR3PTR;
1618 pVM->pgm.s.aMmio2Ranges[idx].paLSPages = NIL_RTR3PTR;
1619
1620 /* Free the RAM range. */
1621 int rc2 = pgmPhysRamRangeFree(pVM, pRamRange);
1622 AssertLogRelMsgStmt(RT_SUCCESS(rc2), ("rc=%Rrc idx=%u chunk=%u/%u\n", rc, idx, iChunk + 1, cChunks),
1623 rc = RT_SUCCESS(rc) ? rc2 : rc);
1624 }
1625
1626 /*
1627 * Final removal frees up the backing memory.
1628 */
1629#ifdef IN_RING3
1630 int const rcBacking = SUPR3PageFree(pbMmio2Backing, cbMmio2Backing >> HOST_PAGE_SHIFT);
1631 AssertLogRelMsgStmt(RT_SUCCESS(rcBacking), ("rc=%Rrc %p LB %#zx\n", rcBacking, pbMmio2Backing, cbMmio2Backing),
1632 rc = RT_SUCCESS(rc) ? rcBacking : rc);
1633#else
1634 int rcBacking = RTR0MemObjFree(pVM->pgmr0.s.ahMmio2MapObjs[idxFirst], true /*fFreeMappings*/);
1635 AssertLogRelMsgStmt(RT_SUCCESS(rcBacking),
1636 ("rc=%Rrc ahMmio2MapObjs[%u]=%p\n", rcBacking, pVM->pgmr0.s.ahMmio2MapObjs[idxFirst], idxFirst),
1637 rc = RT_SUCCESS(rc) ? rcBacking : rc);
1638 if (RT_SUCCESS(rcBacking))
1639 {
1640 pVM->pgmr0.s.ahMmio2MapObjs[idxFirst] = NIL_RTR0MEMOBJ;
1641
1642 rcBacking = RTR0MemObjFree(pVM->pgmr0.s.ahMmio2MemObjs[idxFirst], true /*fFreeMappings*/);
1643 AssertLogRelMsgStmt(RT_SUCCESS(rcBacking),
1644 ("rc=%Rrc ahMmio2MemObjs[%u]=%p\n", rcBacking, pVM->pgmr0.s.ahMmio2MemObjs[idxFirst], idxFirst),
1645 rc = RT_SUCCESS(rc) ? rcBacking : rc);
1646 if (RT_SUCCESS(rcBacking))
1647 pVM->pgmr0.s.ahMmio2MemObjs[idxFirst] = NIL_RTR0MEMOBJ;
1648 }
1649#endif
1650
1651 /*
1652 * Decrease the MMIO2 count if these were the last ones.
1653 */
1654 if (idxFirst + cChunks == pVM->pgm.s.cMmio2Ranges)
1655 pVM->pgm.s.cMmio2Ranges = idxFirst;
1656
1657 /*
1658 * Update page count stats.
1659 */
1660 pVM->pgm.s.cAllPages -= cGuestPages;
1661 pVM->pgm.s.cPrivatePages -= cGuestPages;
1662
1663 return rc;
1664}
1665
1666
1667#ifdef IN_RING0
1668/**
1669 * This is called during VM state loading to deregister an obsolete MMIO2 range.
1670 *
1671 * This does everything except TLB flushing and releasing the access handler.
1672 * The ranges must be unmapped and wihtout preallocated access handlers.
1673 *
1674 * @returns VBox status code.
1675 * @param pGVM Pointer to the global VM structure.
1676 * @param pReq Where to get the parameters.
1677 * @thread EMT(0)
1678 */
1679VMMR0_INT_DECL(int) PGMR0PhysMmio2DeregisterReq(PGVM pGVM, PPGMPHYSMMIO2DEREGISTERREQ pReq)
1680{
1681 /*
1682 * Validate input (ASSUME pReq is a copy and can't be modified by ring-3
1683 * while we're here).
1684 */
1685 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1686 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x < %#zx\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1687
1688 /** @todo better VM state guard, enmVMState is ring-3 writable. */
1689 /* Only LOADING, as this is special purpose for removing an unwanted PCNet MMIO2 region. */
1690 VMSTATE const enmState = pGVM->enmVMState;
1691 AssertMsgReturn(enmState == VMSTATE_LOADING, ("enmState=%d\n", enmState), VERR_VM_INVALID_VM_STATE);
1692 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
1693
1694 AssertMsgReturn( pReq->cChunks > 0
1695 && pReq->cChunks < PGM_MAX_MMIO2_RANGES,
1696 ("idMmio2=%#x cChunks=%#x\n", pReq->idMmio2, pReq->cChunks),
1697 VERR_INVALID_PARAMETER);
1698
1699 AssertMsgReturn( pReq->idMmio2 != 0
1700 && pReq->idMmio2 <= PGM_MAX_MMIO2_RANGES
1701 && (unsigned)pReq->idMmio2 + pReq->cChunks - 1U <= PGM_MAX_MMIO2_RANGES,
1702 ("idMmio2=%#x cChunks=%#x\n", pReq->idMmio2, pReq->cChunks),
1703 VERR_INVALID_PARAMETER);
1704
1705 /*
1706 * Validate that the requested range is for exactly one MMIO2 registration.
1707 *
1708 * This is safe to do w/o the lock because registration and deregistration
1709 * is restricted to EMT0, and we're on EMT0 so can't race ourselves.
1710 */
1711
1712 /* Check that the first entry is valid and has a memory object for the backing memory. */
1713 uint32_t idx = pReq->idMmio2 - 1;
1714 AssertReturn(pGVM->pgmr0.s.apMmio2RamRanges[idx] != NULL, VERR_INVALID_STATE);
1715 AssertReturn(pGVM->pgmr0.s.ahMmio2MemObjs[idx] != NIL_RTR0MEMOBJ, VERR_INVALID_STATE);
1716
1717 /* Any additional regions must also have RAM ranges, but shall not have any backing memory. */
1718 idx++;
1719 for (uint32_t iChunk = 1; iChunk < pReq->cChunks; iChunk++, idx++)
1720 {
1721 AssertReturn(pGVM->pgmr0.s.apMmio2RamRanges[idx] != NULL, VERR_INVALID_STATE);
1722 AssertReturn(pGVM->pgmr0.s.ahMmio2MemObjs[idx] == NIL_RTR0MEMOBJ, VERR_INVALID_STATE);
1723 }
1724
1725 /* Check that the next entry is for a different region. */
1726 AssertReturn( idx >= RT_ELEMENTS(pGVM->pgmr0.s.apMmio2RamRanges)
1727 || pGVM->pgmr0.s.apMmio2RamRanges[idx] == NULL
1728 || pGVM->pgmr0.s.ahMmio2MemObjs[idx] != NIL_RTR0MEMOBJ,
1729 VERR_INVALID_PARAMETER);
1730
1731 /*
1732 * Make sure we're owning the PGM lock (caller should be) and call the
1733 * common worker code.
1734 */
1735 int rc = PGM_LOCK(pGVM);
1736 AssertRCReturn(rc, rc);
1737
1738 rc = pgmPhysMmio2DeregisterWorker(pGVM, pReq->idMmio2, pReq->cChunks, pReq->pDevIns);
1739
1740 PGM_UNLOCK(pGVM);
1741 return rc;
1742}
1743#endif /* IN_RING0 */
1744
1745
1746
1747
1748/*********************************************************************************************************************************
1749* ROM *
1750*********************************************************************************************************************************/
1751
1752
1753/**
1754 * Common worker for pgmR3PhysRomRegisterLocked and
1755 * PGMR0PhysRomAllocateRangeReq.
1756 */
1757DECLHIDDEN(int) pgmPhysRomRangeAllocCommon(PVMCC pVM, uint32_t cPages, uint8_t idRomRange, uint32_t fFlags)
1758{
1759 /*
1760 * Allocate the ROM range structure and map it into ring-3.
1761 */
1762 size_t const cbRomRange = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMROMRANGE, aPages[cPages]), HOST_PAGE_SIZE);
1763#ifdef IN_RING0
1764 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
1765 int rc = RTR0MemObjAllocPage(&hMemObj, cbRomRange, false /*fExecutable*/);
1766#else
1767 PPGMROMRANGE pRomRange;
1768 int rc = SUPR3PageAlloc(cbRomRange >> HOST_PAGE_SHIFT, 0 /*fFlags*/, (void **)&pRomRange);
1769#endif
1770 if (RT_SUCCESS(rc))
1771 {
1772 /* Zero the memory and do basic range init before mapping it into userland. */
1773#ifdef IN_RING0
1774 PPGMROMRANGE const pRomRange = (PPGMROMRANGE)RTR0MemObjAddress(hMemObj);
1775 if (!RTR0MemObjWasZeroInitialized(hMemObj))
1776#endif
1777 RT_BZERO(pRomRange, cbRomRange);
1778
1779 pRomRange->GCPhys = NIL_RTGCPHYS;
1780 pRomRange->GCPhysLast = NIL_RTGCPHYS;
1781 pRomRange->cb = (RTGCPHYS)cPages << GUEST_PAGE_SHIFT;
1782 pRomRange->fFlags = fFlags;
1783 pRomRange->idSavedState = UINT8_MAX;
1784 pRomRange->idRamRange = UINT16_MAX;
1785 pRomRange->cbOriginal = 0;
1786 pRomRange->pvOriginal = NIL_RTR3PTR;
1787 pRomRange->pszDesc = NIL_RTR3PTR;
1788
1789#ifdef IN_RING0
1790 /* Map it into userland. */
1791 RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ;
1792 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0 /*uAlignment*/,
1793 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
1794 if (RT_SUCCESS(rc))
1795#endif
1796 {
1797 /*
1798 * Grab the lock (unlikely to fail or block as caller typically owns it already).
1799 */
1800 rc = PGM_LOCK(pVM);
1801 if (RT_SUCCESS(rc))
1802 {
1803 /*
1804 * Check that idRomRange is still free.
1805 */
1806 if (idRomRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRomRanges))
1807 {
1808#ifdef IN_RING0
1809 if (pVM->pgmr0.s.apRomRanges[idRomRange] == NULL)
1810#endif
1811 {
1812 if ( pVM->pgm.s.apRomRanges[idRomRange] == NIL_RTR3PTR
1813 && pVM->pgm.s.cRomRanges == idRomRange)
1814 {
1815 /*
1816 * Commit it.
1817 */
1818#ifdef IN_RING0
1819 pVM->pgmr0.s.apRomRanges[idRomRange] = pRomRange;
1820 pVM->pgmr0.s.acRomRangePages[idRomRange] = cPages;
1821 pVM->pgmr0.s.ahRomRangeMemObjs[idRomRange] = hMemObj;
1822 pVM->pgmr0.s.ahRomRangeMapObjs[idRomRange] = hMapObj;
1823#endif
1824
1825 pVM->pgm.s.cRomRanges = idRomRange + 1;
1826#ifdef IN_RING0
1827 pVM->pgm.s.apRomRanges[idRomRange] = RTR0MemObjAddressR3(hMapObj);
1828#else
1829 pVM->pgm.s.apRomRanges[idRomRange] = pRomRange;
1830#endif
1831
1832 PGM_UNLOCK(pVM);
1833 return VINF_SUCCESS;
1834 }
1835 }
1836
1837 /*
1838 * Bail out.
1839 */
1840 rc = VERR_INTERNAL_ERROR_5;
1841 }
1842 else
1843 rc = VERR_PGM_TOO_MANY_ROM_RANGES;
1844 PGM_UNLOCK(pVM);
1845 }
1846#ifdef IN_RING0
1847 RTR0MemObjFree(hMapObj, false /*fFreeMappings*/);
1848#endif
1849 }
1850#ifdef IN_RING0
1851 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
1852#else
1853 SUPR3PageFree(pRomRange, cbRomRange >> HOST_PAGE_SHIFT);
1854#endif
1855 }
1856 return rc;
1857}
1858
1859
1860#ifdef IN_RING0
1861/**
1862 * This is called during VM initialization to allocate a ROM range.
1863 *
1864 * The page array is zeroed, the rest is initialized as best we can based on the
1865 * information in @a pReq.
1866 *
1867 * @returns VBox status code.
1868 * @param pGVM Pointer to the global VM structure.
1869 * @param pReq Where to get the parameters and return the range ID.
1870 * @thread EMT(0)
1871 */
1872VMMR0_INT_DECL(int) PGMR0PhysRomAllocateRangeReq(PGVM pGVM, PPGMPHYSROMALLOCATERANGEREQ pReq)
1873{
1874 /*
1875 * Validate input (ASSUME pReq is a copy and can't be modified by ring-3
1876 * while we're here).
1877 */
1878 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1879 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x < %#zx\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1880
1881 AssertReturn(pReq->cbGuestPage == GUEST_PAGE_SIZE, VERR_INCOMPATIBLE_CONFIG);
1882
1883 AssertReturn(pReq->cGuestPages > 0, VERR_OUT_OF_RANGE);
1884 AssertReturn(pReq->cGuestPages <= PGM_MAX_PAGES_PER_ROM_RANGE, VERR_OUT_OF_RANGE);
1885
1886 AssertMsgReturn(!(pReq->fFlags & ~(uint32_t)PGMPHYS_ROM_FLAGS_VALID_MASK), ("fFlags=%#RX32\n", pReq->fFlags),
1887 VERR_INVALID_FLAGS);
1888
1889 AssertReturn(pReq->idRomRange < RT_ELEMENTS(pGVM->pgmr0.s.apRomRanges), VERR_OUT_OF_RANGE);
1890 AssertReturn(pReq->idRomRange == pGVM->pgm.s.cRomRanges, VERR_OUT_OF_RANGE);
1891
1892 /** @todo better VM state guard, enmVMState is ring-3 writable. */
1893 VMSTATE const enmState = pGVM->enmVMState;
1894 AssertMsgReturn(enmState == VMSTATE_CREATING, ("enmState=%d\n", enmState), VERR_VM_INVALID_VM_STATE);
1895 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
1896
1897 /*
1898 * Call common worker.
1899 */
1900 return pgmPhysRomRangeAllocCommon(pGVM, pReq->cGuestPages, pReq->idRomRange, pReq->fFlags);
1901}
1902#endif /* IN_RING0 */
1903
1904
1905/*********************************************************************************************************************************
1906* Other stuff
1907*********************************************************************************************************************************/
1908
1909
1910
1911/**
1912 * Checks if Address Gate 20 is enabled or not.
1913 *
1914 * @returns true if enabled.
1915 * @returns false if disabled.
1916 * @param pVCpu The cross context virtual CPU structure.
1917 */
1918VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
1919{
1920 /* Must check that pVCpu isn't NULL here because PDM device helper are a little lazy. */
1921 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu && pVCpu->pgm.s.fA20Enabled));
1922 return pVCpu && pVCpu->pgm.s.fA20Enabled;
1923}
1924
1925
1926/**
1927 * Validates a GC physical address.
1928 *
1929 * @returns true if valid.
1930 * @returns false if invalid.
1931 * @param pVM The cross context VM structure.
1932 * @param GCPhys The physical address to validate.
1933 */
1934VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
1935{
1936 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1937 return pPage != NULL;
1938}
1939
1940
1941/**
1942 * Checks if a GC physical address is a normal page,
1943 * i.e. not ROM, MMIO or reserved.
1944 *
1945 * @returns true if normal.
1946 * @returns false if invalid, ROM, MMIO or reserved page.
1947 * @param pVM The cross context VM structure.
1948 * @param GCPhys The physical address to check.
1949 */
1950VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
1951{
1952 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1953 return pPage
1954 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
1955}
1956
1957
1958/**
1959 * Converts a GC physical address to a HC physical address.
1960 *
1961 * @returns VINF_SUCCESS on success.
1962 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1963 * page but has no physical backing.
1964 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1965 * GC physical address.
1966 *
1967 * @param pVM The cross context VM structure.
1968 * @param GCPhys The GC physical address to convert.
1969 * @param pHCPhys Where to store the HC physical address on success.
1970 */
1971VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
1972{
1973 PGM_LOCK_VOID(pVM);
1974 PPGMPAGE pPage;
1975 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1976 if (RT_SUCCESS(rc))
1977 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
1978 PGM_UNLOCK(pVM);
1979 return rc;
1980}
1981
1982
1983/**
1984 * Invalidates all page mapping TLBs.
1985 *
1986 * @param pVM The cross context VM structure.
1987 * @param fInRendezvous Set if we're in a rendezvous.
1988 */
1989void pgmPhysInvalidatePageMapTLB(PVMCC pVM, bool fInRendezvous)
1990{
1991 PGM_LOCK_VOID(pVM);
1992 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushes);
1993
1994 /* Clear the R3 & R0 TLBs completely. */
1995 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
1996 {
1997 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
1998 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
1999 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
2000 }
2001
2002 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
2003 {
2004 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
2005 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
2006 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
2007 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
2008 }
2009
2010 /* For the per VCPU lockless TLBs, we only invalid the GCPhys members so that
2011 anyone concurrently using the entry can safely continue to do so while any
2012 subsequent attempts to use it will fail. (Emulating a scenario where we
2013 lost the PGM lock race and the concurrent TLB user wont it.) */
2014 VMCC_FOR_EACH_VMCPU(pVM)
2015 {
2016 if (!fInRendezvous && pVCpu != VMMGetCpu(pVM))
2017 for (unsigned idx = 0; idx < RT_ELEMENTS(pVCpu->pgm.s.PhysTlb.aEntries); idx++)
2018 ASMAtomicWriteU64(&pVCpu->pgm.s.PhysTlb.aEntries[idx].GCPhys, NIL_RTGCPHYS);
2019 else
2020 for (unsigned idx = 0; idx < RT_ELEMENTS(pVCpu->pgm.s.PhysTlb.aEntries); idx++)
2021 pVCpu->pgm.s.PhysTlb.aEntries[idx].GCPhys = NIL_RTGCPHYS;
2022 }
2023 VMCC_FOR_EACH_VMCPU_END(pVM);
2024
2025 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_MISC);
2026 PGM_UNLOCK(pVM);
2027}
2028
2029
2030/**
2031 * Invalidates a page mapping TLB entry
2032 *
2033 * @param pVM The cross context VM structure.
2034 * @param GCPhys GCPhys entry to flush
2035 *
2036 * @note Caller is responsible for calling IEMTlbInvalidateAllPhysicalAllCpus
2037 * when needed.
2038 */
2039void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys)
2040{
2041 PGM_LOCK_ASSERT_OWNER(pVM);
2042
2043 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushEntry);
2044
2045 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
2046
2047 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
2048 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
2049 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
2050
2051 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
2052 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
2053 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
2054 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
2055
2056 /* For the per VCPU lockless TLBs, we only invalid the GCPhys member so that
2057 anyone concurrently using the entry can safely continue to do so while any
2058 subsequent attempts to use it will fail. (Emulating a scenario where we
2059 lost the PGM lock race and the concurrent TLB user wont it.) */
2060 VMCC_FOR_EACH_VMCPU(pVM)
2061 {
2062 ASMAtomicWriteU64(&pVCpu->pgm.s.PhysTlb.aEntries[idx].GCPhys, NIL_RTGCPHYS);
2063 }
2064 VMCC_FOR_EACH_VMCPU_END(pVM);
2065}
2066
2067
2068/**
2069 * Makes sure that there is at least one handy page ready for use.
2070 *
2071 * This will also take the appropriate actions when reaching water-marks.
2072 *
2073 * @returns VBox status code.
2074 * @retval VINF_SUCCESS on success.
2075 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
2076 *
2077 * @param pVM The cross context VM structure.
2078 *
2079 * @remarks Must be called from within the PGM critical section. It may
2080 * nip back to ring-3/0 in some cases.
2081 */
2082static int pgmPhysEnsureHandyPage(PVMCC pVM)
2083{
2084 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
2085
2086 /*
2087 * Do we need to do anything special?
2088 */
2089#ifdef IN_RING3
2090 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
2091#else
2092 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
2093#endif
2094 {
2095 /*
2096 * Allocate pages only if we're out of them, or in ring-3, almost out.
2097 */
2098#ifdef IN_RING3
2099 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
2100#else
2101 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
2102#endif
2103 {
2104 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
2105 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
2106#ifdef IN_RING3
2107 int rc = PGMR3PhysAllocateHandyPages(pVM);
2108#else
2109 int rc = pgmR0PhysAllocateHandyPages(pVM, VMMGetCpuId(pVM), false /*fRing3*/);
2110#endif
2111 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2112 {
2113 if (RT_FAILURE(rc))
2114 return rc;
2115 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2116 if (!pVM->pgm.s.cHandyPages)
2117 {
2118 LogRel(("PGM: no more handy pages!\n"));
2119 return VERR_EM_NO_MEMORY;
2120 }
2121 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
2122 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
2123#ifndef IN_RING3
2124 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
2125#endif
2126 }
2127 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
2128 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
2129 ("%u\n", pVM->pgm.s.cHandyPages),
2130 VERR_PGM_HANDY_PAGE_IPE);
2131 }
2132 else
2133 {
2134 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
2135 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
2136#ifndef IN_RING3
2137 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
2138 {
2139 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
2140 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
2141 }
2142#endif
2143 }
2144 }
2145
2146 return VINF_SUCCESS;
2147}
2148
2149
2150/**
2151 * Replace a zero or shared page with new page that we can write to.
2152 *
2153 * @returns The following VBox status codes.
2154 * @retval VINF_SUCCESS on success, pPage is modified.
2155 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
2156 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
2157 *
2158 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
2159 *
2160 * @param pVM The cross context VM structure.
2161 * @param pPage The physical page tracking structure. This will
2162 * be modified on success.
2163 * @param GCPhys The address of the page.
2164 *
2165 * @remarks Must be called from within the PGM critical section. It may
2166 * nip back to ring-3/0 in some cases.
2167 *
2168 * @remarks This function shouldn't really fail, however if it does
2169 * it probably means we've screwed up the size of handy pages and/or
2170 * the low-water mark. Or, that some device I/O is causing a lot of
2171 * pages to be allocated while while the host is in a low-memory
2172 * condition. This latter should be handled elsewhere and in a more
2173 * controlled manner, it's on the @bugref{3170} todo list...
2174 */
2175int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
2176{
2177 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
2178
2179 /*
2180 * Prereqs.
2181 */
2182 PGM_LOCK_ASSERT_OWNER(pVM);
2183 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
2184 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
2185
2186# ifdef PGM_WITH_LARGE_PAGES
2187 /*
2188 * Try allocate a large page if applicable.
2189 */
2190 if ( PGMIsUsingLargePages(pVM)
2191 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
2192 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
2193 {
2194 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
2195 PPGMPAGE pBasePage;
2196
2197 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
2198 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
2199 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
2200 {
2201 rc = pgmPhysAllocLargePage(pVM, GCPhys);
2202 if (rc == VINF_SUCCESS)
2203 return rc;
2204 }
2205 /* Mark the base as type page table, so we don't check over and over again. */
2206 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
2207
2208 /* fall back to 4KB pages. */
2209 }
2210# endif
2211
2212 /*
2213 * Flush any shadow page table mappings of the page.
2214 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
2215 */
2216 bool fFlushTLBs = false;
2217 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
2218 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
2219
2220 /*
2221 * Ensure that we've got a page handy, take it and use it.
2222 */
2223 int rc2 = pgmPhysEnsureHandyPage(pVM);
2224 if (RT_FAILURE(rc2))
2225 {
2226 if (fFlushTLBs)
2227 PGM_INVL_ALL_VCPU_TLBS(pVM);
2228 Assert(rc2 == VERR_EM_NO_MEMORY);
2229 return rc2;
2230 }
2231 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
2232 PGM_LOCK_ASSERT_OWNER(pVM);
2233 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
2234 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
2235
2236 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
2237 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
2238 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_GMMPAGEDESC_PHYS);
2239 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
2240 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
2241 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
2242
2243 /*
2244 * There are one or two action to be taken the next time we allocate handy pages:
2245 * - Tell the GMM (global memory manager) what the page is being used for.
2246 * (Speeds up replacement operations - sharing and defragmenting.)
2247 * - If the current backing is shared, it must be freed.
2248 */
2249 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
2250 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
2251
2252 void const *pvSharedPage = NULL;
2253 if (!PGM_PAGE_IS_SHARED(pPage))
2254 {
2255 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
2256 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero);
2257 pVM->pgm.s.cZeroPages--;
2258 }
2259 else
2260 {
2261 /* Mark this shared page for freeing/dereferencing. */
2262 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
2263 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
2264
2265 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
2266 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
2267 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageReplaceShared));
2268 pVM->pgm.s.cSharedPages--;
2269
2270 /* Grab the address of the page so we can make a copy later on. (safe) */
2271 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
2272 AssertRC(rc);
2273 }
2274
2275 /*
2276 * Do the PGMPAGE modifications.
2277 */
2278 pVM->pgm.s.cPrivatePages++;
2279 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
2280 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
2281 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
2282 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
2283 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
2284 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID,
2285 !pvSharedPage
2286 ? IEMTLBPHYSFLUSHREASON_ALLOCATED : IEMTLBPHYSFLUSHREASON_ALLOCATED_FROM_SHARED);
2287
2288 /* Copy the shared page contents to the replacement page. */
2289 if (!pvSharedPage)
2290 { /* likely */ }
2291 else
2292 {
2293 /* Get the virtual address of the new page. */
2294 PGMPAGEMAPLOCK PgMpLck;
2295 void *pvNewPage;
2296 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
2297 if (RT_SUCCESS(rc))
2298 {
2299 memcpy(pvNewPage, pvSharedPage, GUEST_PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
2300 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2301 }
2302 }
2303
2304 if ( fFlushTLBs
2305 && rc != VINF_PGM_GCPHYS_ALIASED)
2306 PGM_INVL_ALL_VCPU_TLBS(pVM);
2307
2308 /*
2309 * Notify NEM about the mapping change for this page.
2310 *
2311 * Note! Shadow ROM pages are complicated as they can definitely be
2312 * allocated while not visible, so play safe.
2313 */
2314 if (VM_IS_NEM_ENABLED(pVM))
2315 {
2316 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
2317 if ( enmType != PGMPAGETYPE_ROM_SHADOW
2318 || pgmPhysGetPage(pVM, GCPhys) == pPage)
2319 {
2320 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
2321 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
2322 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
2323 if (RT_SUCCESS(rc))
2324 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
2325 else
2326 rc = rc2;
2327 }
2328 }
2329
2330 return rc;
2331}
2332
2333#ifdef PGM_WITH_LARGE_PAGES
2334
2335/**
2336 * Replace a 2 MB range of zero pages with new pages that we can write to.
2337 *
2338 * @returns The following VBox status codes.
2339 * @retval VINF_SUCCESS on success, pPage is modified.
2340 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
2341 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
2342 *
2343 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
2344 *
2345 * @param pVM The cross context VM structure.
2346 * @param GCPhys The address of the page.
2347 *
2348 * @remarks Must be called from within the PGM critical section. It may block
2349 * on GMM and host mutexes/locks, leaving HM context.
2350 */
2351int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
2352{
2353 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
2354 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
2355 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
2356
2357 /*
2358 * Check Prereqs.
2359 */
2360 PGM_LOCK_ASSERT_OWNER(pVM);
2361 Assert(PGMIsUsingLargePages(pVM));
2362
2363 /*
2364 * All the pages must be unallocated RAM pages, i.e. mapping the ZERO page.
2365 */
2366 PPGMPAGE pFirstPage;
2367 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
2368 if ( RT_SUCCESS(rc)
2369 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM
2370 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
2371 {
2372 /*
2373 * Further they should have PDE type set to PGM_PAGE_PDE_TYPE_DONTCARE,
2374 * since they are unallocated.
2375 */
2376 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
2377 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
2378 if (uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE)
2379 {
2380 /*
2381 * Now, make sure all the other pages in the 2 MB is in the same state.
2382 */
2383 GCPhys = GCPhysBase;
2384 unsigned cLeft = _2M / GUEST_PAGE_SIZE;
2385 while (cLeft-- > 0)
2386 {
2387 PPGMPAGE pSubPage = pgmPhysGetPage(pVM, GCPhys);
2388 if ( pSubPage
2389 && PGM_PAGE_GET_TYPE(pSubPage) == PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
2390 && PGM_PAGE_GET_STATE(pSubPage) == PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
2391 {
2392 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
2393 GCPhys += GUEST_PAGE_SIZE;
2394 }
2395 else
2396 {
2397 LogFlow(("pgmPhysAllocLargePage: Found page %RGp with wrong attributes (type=%d; state=%d); cancel check.\n",
2398 GCPhys, pSubPage ? PGM_PAGE_GET_TYPE(pSubPage) : -1, pSubPage ? PGM_PAGE_GET_STATE(pSubPage) : -1));
2399
2400 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
2401 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
2402 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
2403 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
2404 }
2405 }
2406
2407 /*
2408 * Do the allocation.
2409 */
2410# ifdef IN_RING3
2411 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhysBase, NULL);
2412# elif defined(IN_RING0)
2413 rc = pgmR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase);
2414# else
2415# error "Port me"
2416# endif
2417 if (RT_SUCCESS(rc))
2418 {
2419 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
2420 pVM->pgm.s.cLargePages++;
2421 return VINF_SUCCESS;
2422 }
2423
2424 /* If we fail once, it most likely means the host's memory is too
2425 fragmented; don't bother trying again. */
2426 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
2427 return rc;
2428 }
2429 }
2430 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
2431}
2432
2433
2434/**
2435 * Recheck the entire 2 MB range to see if we can use it again as a large page.
2436 *
2437 * @returns The following VBox status codes.
2438 * @retval VINF_SUCCESS on success, the large page can be used again
2439 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
2440 *
2441 * @param pVM The cross context VM structure.
2442 * @param GCPhys The address of the page.
2443 * @param pLargePage Page structure of the base page
2444 */
2445int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
2446{
2447 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
2448
2449 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
2450
2451 AssertCompile(X86_PDE2M_PAE_PG_MASK == EPT_PDE2M_PG_MASK); /* Paranoia: Caller uses this for guest EPT tables as well. */
2452 GCPhys &= X86_PDE2M_PAE_PG_MASK;
2453
2454 /* Check the base page. */
2455 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
2456 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
2457 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
2458 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
2459 {
2460 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
2461 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
2462 }
2463
2464 STAM_PROFILE_START(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
2465 /* Check all remaining pages in the 2 MB range. */
2466 unsigned i;
2467 GCPhys += GUEST_PAGE_SIZE;
2468 for (i = 1; i < _2M / GUEST_PAGE_SIZE; i++)
2469 {
2470 PPGMPAGE pPage;
2471 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2472 AssertRCBreak(rc);
2473
2474 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
2475 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2476 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
2477 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
2478 {
2479 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
2480 break;
2481 }
2482
2483 GCPhys += GUEST_PAGE_SIZE;
2484 }
2485 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
2486
2487 if (i == _2M / GUEST_PAGE_SIZE)
2488 {
2489 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
2490 pVM->pgm.s.cLargePagesDisabled--;
2491 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
2492 return VINF_SUCCESS;
2493 }
2494
2495 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
2496}
2497
2498#endif /* PGM_WITH_LARGE_PAGES */
2499
2500
2501/**
2502 * Deal with a write monitored page.
2503 *
2504 * @param pVM The cross context VM structure.
2505 * @param pPage The physical page tracking structure.
2506 * @param GCPhys The guest physical address of the page.
2507 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
2508 * very unlikely situation where it is okay that we let NEM
2509 * fix the page access in a lazy fasion.
2510 *
2511 * @remarks Called from within the PGM critical section.
2512 */
2513void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
2514{
2515 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
2516 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
2517 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
2518 if (PGM_PAGE_IS_CODE_PAGE(pPage))
2519 {
2520 PGM_PAGE_CLEAR_CODE_PAGE(pVM, pPage);
2521 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_MADE_WRITABLE);
2522 }
2523
2524 Assert(pVM->pgm.s.cMonitoredPages > 0);
2525 pVM->pgm.s.cMonitoredPages--;
2526 pVM->pgm.s.cWrittenToPages++;
2527
2528#ifdef VBOX_WITH_NATIVE_NEM
2529 /*
2530 * Notify NEM about the protection change so we won't spin forever.
2531 *
2532 * Note! NEM need to be handle to lazily correct page protection as we cannot
2533 * really get it 100% right here it seems. The page pool does this too.
2534 */
2535 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
2536 {
2537 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
2538 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
2539 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
2540 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
2541 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
2542 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
2543 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
2544 }
2545#else
2546 RT_NOREF(GCPhys);
2547#endif
2548}
2549
2550
2551/**
2552 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
2553 *
2554 * @returns VBox strict status code.
2555 * @retval VINF_SUCCESS on success.
2556 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
2557 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2558 *
2559 * @param pVM The cross context VM structure.
2560 * @param pPage The physical page tracking structure.
2561 * @param GCPhys The address of the page.
2562 *
2563 * @remarks Called from within the PGM critical section.
2564 */
2565int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
2566{
2567 PGM_LOCK_ASSERT_OWNER(pVM);
2568 switch (PGM_PAGE_GET_STATE(pPage))
2569 {
2570 case PGM_PAGE_STATE_WRITE_MONITORED:
2571 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
2572 RT_FALL_THRU();
2573 default: /* to shut up GCC */
2574 case PGM_PAGE_STATE_ALLOCATED:
2575 return VINF_SUCCESS;
2576
2577 /*
2578 * Zero pages can be dummy pages for MMIO or reserved memory,
2579 * so we need to check the flags before joining cause with
2580 * shared page replacement.
2581 */
2582 case PGM_PAGE_STATE_ZERO:
2583 if (PGM_PAGE_IS_MMIO(pPage))
2584 return VERR_PGM_PHYS_PAGE_RESERVED;
2585 RT_FALL_THRU();
2586 case PGM_PAGE_STATE_SHARED:
2587 return pgmPhysAllocPage(pVM, pPage, GCPhys);
2588
2589 /* Not allowed to write to ballooned pages. */
2590 case PGM_PAGE_STATE_BALLOONED:
2591 return VERR_PGM_PHYS_PAGE_BALLOONED;
2592 }
2593}
2594
2595
2596/**
2597 * Internal usage: Map the page specified by its GMM ID.
2598 *
2599 * This is similar to pgmPhysPageMap
2600 *
2601 * @returns VBox status code.
2602 *
2603 * @param pVM The cross context VM structure.
2604 * @param idPage The Page ID.
2605 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
2606 * @param ppv Where to store the mapping address.
2607 *
2608 * @remarks Called from within the PGM critical section. The mapping is only
2609 * valid while you are inside this section.
2610 */
2611int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
2612{
2613 /*
2614 * Validation.
2615 */
2616 PGM_LOCK_ASSERT_OWNER(pVM);
2617 AssertReturn(HCPhys && !(HCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2618 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
2619 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
2620
2621#ifdef IN_RING0
2622# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
2623 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)GUEST_PAGE_OFFSET_MASK, ppv);
2624# else
2625 return GMMR0PageIdToVirt(pVM, idPage, ppv);
2626# endif
2627
2628#else
2629 /*
2630 * Find/make Chunk TLB entry for the mapping chunk.
2631 */
2632 PPGMCHUNKR3MAP pMap;
2633 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
2634 if (pTlbe->idChunk == idChunk)
2635 {
2636 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
2637 pMap = pTlbe->pChunk;
2638 }
2639 else
2640 {
2641 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
2642
2643 /*
2644 * Find the chunk, map it if necessary.
2645 */
2646 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
2647 if (pMap)
2648 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
2649 else
2650 {
2651 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
2652 if (RT_FAILURE(rc))
2653 return rc;
2654 }
2655
2656 /*
2657 * Enter it into the Chunk TLB.
2658 */
2659 pTlbe->idChunk = idChunk;
2660 pTlbe->pChunk = pMap;
2661 }
2662
2663 *ppv = (uint8_t *)pMap->pv + ((idPage & GMM_PAGEID_IDX_MASK) << GUEST_PAGE_SHIFT);
2664 return VINF_SUCCESS;
2665#endif
2666}
2667
2668
2669/**
2670 * Maps a page into the current virtual address space so it can be accessed.
2671 *
2672 * @returns VBox status code.
2673 * @retval VINF_SUCCESS on success.
2674 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2675 *
2676 * @param pVM The cross context VM structure.
2677 * @param pPage The physical page tracking structure.
2678 * @param GCPhys The address of the page.
2679 * @param ppMap Where to store the address of the mapping tracking structure.
2680 * @param ppv Where to store the mapping address of the page. The page
2681 * offset is masked off!
2682 *
2683 * @remarks Called from within the PGM critical section.
2684 */
2685static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
2686{
2687 PGM_LOCK_ASSERT_OWNER(pVM);
2688 NOREF(GCPhys);
2689
2690 /*
2691 * Special cases: MMIO2 and specially aliased MMIO pages.
2692 */
2693 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
2694 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
2695 {
2696 *ppMap = NULL;
2697
2698 /* Decode the page id to a page in a MMIO2 ram range. */
2699 uint8_t const idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
2700 uint32_t const iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
2701 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges),
2702 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
2703 RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges), PGM_PAGE_GET_TYPE(pPage), GCPhys,
2704 pPage->s.idPage, pPage->s.uStateY),
2705 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2706 PPGMREGMMIO2RANGE const pMmio2Range = &pVM->pgm.s.aMmio2Ranges[idMmio2 - 1];
2707 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2708 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2709#ifndef IN_RING0
2710 uint32_t const idRamRange = pMmio2Range->idRamRange;
2711 AssertLogRelReturn(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2712 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
2713 AssertLogRelReturn(pRamRange, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2714 AssertLogRelReturn(iPage < (pRamRange->cb >> GUEST_PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2715 *ppv = pMmio2Range->pbR3 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
2716 return VINF_SUCCESS;
2717
2718#else /* IN_RING0 */
2719 AssertLogRelReturn(iPage < pVM->pgmr0.s.acMmio2RangePages[idMmio2 - 1], VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2720# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
2721 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
2722# else
2723 AssertPtr(pVM->pgmr0.s.apbMmio2Backing[idMmio2 - 1]);
2724 *ppv = pVM->pgmr0.s.apbMmio2Backing[idMmio2 - 1] + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
2725 return VINF_SUCCESS;
2726# endif
2727#endif
2728 }
2729
2730#ifdef VBOX_WITH_PGM_NEM_MODE
2731 if (pVM->pgm.s.fNemMode)
2732 {
2733# ifdef IN_RING3
2734 /*
2735 * Find the corresponding RAM range and use that to locate the mapping address.
2736 */
2737 /** @todo Use the page ID for some kind of indexing as we do with MMIO2 above. */
2738 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
2739 AssertLogRelMsgReturn(pRam, ("%RTGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
2740 size_t const idxPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
2741 Assert(pPage == &pRam->aPages[idxPage]);
2742 *ppMap = NULL;
2743 *ppv = (uint8_t *)pRam->pbR3 + (idxPage << GUEST_PAGE_SHIFT);
2744 return VINF_SUCCESS;
2745# else
2746 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
2747# endif
2748 }
2749#endif /* VBOX_WITH_PGM_NEM_MODE */
2750
2751 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
2752 if (idChunk == NIL_GMM_CHUNKID)
2753 {
2754 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
2755 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
2756 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2757 {
2758 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
2759 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
2760 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
2761 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
2762 *ppv = pVM->pgm.s.abZeroPg;
2763 }
2764 else
2765 *ppv = pVM->pgm.s.abZeroPg;
2766 *ppMap = NULL;
2767 return VINF_SUCCESS;
2768 }
2769
2770# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
2771 /*
2772 * Just use the physical address.
2773 */
2774 *ppMap = NULL;
2775 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
2776
2777# elif defined(IN_RING0)
2778 /*
2779 * Go by page ID thru GMMR0.
2780 */
2781 *ppMap = NULL;
2782 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
2783
2784# else
2785 /*
2786 * Find/make Chunk TLB entry for the mapping chunk.
2787 */
2788 PPGMCHUNKR3MAP pMap;
2789 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
2790 if (pTlbe->idChunk == idChunk)
2791 {
2792 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
2793 pMap = pTlbe->pChunk;
2794 AssertPtr(pMap->pv);
2795 }
2796 else
2797 {
2798 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
2799
2800 /*
2801 * Find the chunk, map it if necessary.
2802 */
2803 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
2804 if (pMap)
2805 {
2806 AssertPtr(pMap->pv);
2807 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
2808 }
2809 else
2810 {
2811 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
2812 if (RT_FAILURE(rc))
2813 return rc;
2814 AssertPtr(pMap->pv);
2815 }
2816
2817 /*
2818 * Enter it into the Chunk TLB.
2819 */
2820 pTlbe->idChunk = idChunk;
2821 pTlbe->pChunk = pMap;
2822 }
2823
2824 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << GUEST_PAGE_SHIFT);
2825 *ppMap = pMap;
2826 return VINF_SUCCESS;
2827# endif /* !IN_RING0 */
2828}
2829
2830
2831/**
2832 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
2833 *
2834 * This is typically used is paths where we cannot use the TLB methods (like ROM
2835 * pages) or where there is no point in using them since we won't get many hits.
2836 *
2837 * @returns VBox strict status code.
2838 * @retval VINF_SUCCESS on success.
2839 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
2840 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2841 *
2842 * @param pVM The cross context VM structure.
2843 * @param pPage The physical page tracking structure.
2844 * @param GCPhys The address of the page.
2845 * @param ppv Where to store the mapping address of the page. The page
2846 * offset is masked off!
2847 *
2848 * @remarks Called from within the PGM critical section. The mapping is only
2849 * valid while you are inside section.
2850 */
2851int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
2852{
2853 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2854 if (RT_SUCCESS(rc))
2855 {
2856 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
2857 PPGMPAGEMAP pMapIgnore;
2858 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
2859 if (RT_FAILURE(rc2)) /* preserve rc */
2860 rc = rc2;
2861 }
2862 return rc;
2863}
2864
2865
2866/**
2867 * Maps a page into the current virtual address space so it can be accessed for
2868 * both writing and reading.
2869 *
2870 * This is typically used is paths where we cannot use the TLB methods (like ROM
2871 * pages) or where there is no point in using them since we won't get many hits.
2872 *
2873 * @returns VBox status code.
2874 * @retval VINF_SUCCESS on success.
2875 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2876 *
2877 * @param pVM The cross context VM structure.
2878 * @param pPage The physical page tracking structure. Must be in the
2879 * allocated state.
2880 * @param GCPhys The address of the page.
2881 * @param ppv Where to store the mapping address of the page. The page
2882 * offset is masked off!
2883 *
2884 * @remarks Called from within the PGM critical section. The mapping is only
2885 * valid while you are inside section.
2886 */
2887int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
2888{
2889 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
2890 PPGMPAGEMAP pMapIgnore;
2891 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
2892}
2893
2894
2895/**
2896 * Maps a page into the current virtual address space so it can be accessed for
2897 * reading.
2898 *
2899 * This is typically used is paths where we cannot use the TLB methods (like ROM
2900 * pages) or where there is no point in using them since we won't get many hits.
2901 *
2902 * @returns VBox status code.
2903 * @retval VINF_SUCCESS on success.
2904 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2905 *
2906 * @param pVM The cross context VM structure.
2907 * @param pPage The physical page tracking structure.
2908 * @param GCPhys The address of the page.
2909 * @param ppv Where to store the mapping address of the page. The page
2910 * offset is masked off!
2911 *
2912 * @remarks Called from within the PGM critical section. The mapping is only
2913 * valid while you are inside this section.
2914 */
2915int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
2916{
2917 PPGMPAGEMAP pMapIgnore;
2918 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
2919}
2920
2921
2922/**
2923 * Load a guest page into the ring-3 physical TLB.
2924 *
2925 * @returns VBox status code.
2926 * @retval VINF_SUCCESS on success
2927 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2928 * @param pVM The cross context VM structure.
2929 * @param GCPhys The guest physical address in question.
2930 */
2931int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
2932{
2933 PGM_LOCK_ASSERT_OWNER(pVM);
2934
2935 /*
2936 * Find the ram range and page and hand it over to the with-page function.
2937 * 99.8% of requests are expected to be in the first range.
2938 */
2939 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2940 if (!pPage)
2941 {
2942 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
2943 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2944 }
2945
2946 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
2947}
2948
2949
2950/**
2951 * Load a guest page into the ring-3 physical TLB.
2952 *
2953 * @returns VBox status code.
2954 * @retval VINF_SUCCESS on success
2955 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2956 *
2957 * @param pVM The cross context VM structure.
2958 * @param pPage Pointer to the PGMPAGE structure corresponding to
2959 * GCPhys.
2960 * @param GCPhys The guest physical address in question.
2961 */
2962int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
2963{
2964 PGM_LOCK_ASSERT_OWNER(pVM);
2965 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
2966
2967 /*
2968 * Map the page.
2969 * Make a special case for the zero page as it is kind of special.
2970 */
2971 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
2972 if ( !PGM_PAGE_IS_ZERO(pPage)
2973 && !PGM_PAGE_IS_BALLOONED(pPage))
2974 {
2975 void *pv;
2976 PPGMPAGEMAP pMap;
2977 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
2978 if (RT_FAILURE(rc))
2979 return rc;
2980# ifndef IN_RING0
2981 pTlbe->pMap = pMap;
2982# endif
2983 pTlbe->pv = pv;
2984 Assert(!((uintptr_t)pTlbe->pv & GUEST_PAGE_OFFSET_MASK));
2985 }
2986 else
2987 {
2988 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
2989# ifndef IN_RING0
2990 pTlbe->pMap = NULL;
2991# endif
2992 pTlbe->pv = pVM->pgm.s.abZeroPg;
2993 }
2994# ifdef PGM_WITH_PHYS_TLB
2995 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
2996 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
2997 pTlbe->GCPhys = GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
2998 else
2999 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
3000# else
3001 pTlbe->GCPhys = NIL_RTGCPHYS;
3002# endif
3003 pTlbe->pPage = pPage;
3004 return VINF_SUCCESS;
3005}
3006
3007
3008#ifdef IN_RING3 /** @todo Need ensure a ring-0 version gets invalidated safely */
3009/**
3010 * Load a guest page into the lockless ring-3 physical TLB for the calling EMT.
3011 *
3012 * @returns VBox status code.
3013 * @retval VINF_SUCCESS on success
3014 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3015 *
3016 * @param pVCpu The cross context virtual CPU structure.
3017 * @param pPage Pointer to the PGMPAGE structure corresponding to
3018 * GCPhys.
3019 * @param GCPhys The guest physical address in question.
3020 */
3021DECLHIDDEN(int) pgmPhysPageLoadIntoLocklessTlbWithPage(PVMCPUCC pVCpu, PPGMPAGE pPage, RTGCPHYS GCPhys)
3022{
3023 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageMapTlbMisses));
3024 PPGMPAGEMAPTLBE const pLocklessTlbe = &pVCpu->pgm.s.PhysTlb.aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
3025 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3026
3027 PGM_LOCK_VOID(pVM);
3028
3029 PPGMPAGEMAPTLBE pSharedTlbe;
3030 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pSharedTlbe);
3031 if (RT_SUCCESS(rc))
3032 *pLocklessTlbe = *pSharedTlbe;
3033
3034 PGM_UNLOCK(pVM);
3035 return rc;
3036}
3037#endif /* IN_RING3 */
3038
3039
3040/**
3041 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
3042 * own the PGM lock and therefore not need to lock the mapped page.
3043 *
3044 * @returns VBox status code.
3045 * @retval VINF_SUCCESS on success.
3046 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3047 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3048 *
3049 * @param pVM The cross context VM structure.
3050 * @param GCPhys The guest physical address of the page that should be mapped.
3051 * @param pPage Pointer to the PGMPAGE structure for the page.
3052 * @param ppv Where to store the address corresponding to GCPhys.
3053 *
3054 * @internal
3055 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
3056 */
3057int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
3058{
3059 int rc;
3060 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
3061 PGM_LOCK_ASSERT_OWNER(pVM);
3062 pVM->pgm.s.cDeprecatedPageLocks++;
3063
3064 /*
3065 * Make sure the page is writable.
3066 */
3067 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
3068 {
3069 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
3070 if (RT_FAILURE(rc))
3071 return rc;
3072 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
3073 }
3074 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
3075
3076 /*
3077 * Get the mapping address.
3078 */
3079 PPGMPAGEMAPTLBE pTlbe;
3080 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3081 if (RT_FAILURE(rc))
3082 return rc;
3083 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3084 return VINF_SUCCESS;
3085}
3086
3087
3088/**
3089 * Locks a page mapping for writing.
3090 *
3091 * @param pVM The cross context VM structure.
3092 * @param pPage The page.
3093 * @param pTlbe The mapping TLB entry for the page.
3094 * @param pLock The lock structure (output).
3095 */
3096DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
3097{
3098# ifndef IN_RING0
3099 PPGMPAGEMAP pMap = pTlbe->pMap;
3100 if (pMap)
3101 pMap->cRefs++;
3102# else
3103 RT_NOREF(pTlbe);
3104# endif
3105
3106 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
3107 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
3108 {
3109 if (cLocks == 0)
3110 pVM->pgm.s.cWriteLockedPages++;
3111 PGM_PAGE_INC_WRITE_LOCKS(pPage);
3112 }
3113 else if (cLocks != PGM_PAGE_MAX_LOCKS)
3114 {
3115 PGM_PAGE_INC_WRITE_LOCKS(pPage);
3116 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
3117# ifndef IN_RING0
3118 if (pMap)
3119 pMap->cRefs++; /* Extra ref to prevent it from going away. */
3120# endif
3121 }
3122
3123 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
3124# ifndef IN_RING0
3125 pLock->pvMap = pMap;
3126# else
3127 pLock->pvMap = NULL;
3128# endif
3129}
3130
3131/**
3132 * Locks a page mapping for reading.
3133 *
3134 * @param pVM The cross context VM structure.
3135 * @param pPage The page.
3136 * @param pTlbe The mapping TLB entry for the page.
3137 * @param pLock The lock structure (output).
3138 */
3139DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
3140{
3141# ifndef IN_RING0
3142 PPGMPAGEMAP pMap = pTlbe->pMap;
3143 if (pMap)
3144 pMap->cRefs++;
3145# else
3146 RT_NOREF(pTlbe);
3147# endif
3148
3149 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
3150 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
3151 {
3152 if (cLocks == 0)
3153 pVM->pgm.s.cReadLockedPages++;
3154 PGM_PAGE_INC_READ_LOCKS(pPage);
3155 }
3156 else if (cLocks != PGM_PAGE_MAX_LOCKS)
3157 {
3158 PGM_PAGE_INC_READ_LOCKS(pPage);
3159 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
3160# ifndef IN_RING0
3161 if (pMap)
3162 pMap->cRefs++; /* Extra ref to prevent it from going away. */
3163# endif
3164 }
3165
3166 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
3167# ifndef IN_RING0
3168 pLock->pvMap = pMap;
3169# else
3170 pLock->pvMap = NULL;
3171# endif
3172}
3173
3174
3175/**
3176 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
3177 * own the PGM lock and have access to the page structure.
3178 *
3179 * @returns VBox status code.
3180 * @retval VINF_SUCCESS on success.
3181 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3182 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3183 *
3184 * @param pVM The cross context VM structure.
3185 * @param GCPhys The guest physical address of the page that should be mapped.
3186 * @param pPage Pointer to the PGMPAGE structure for the page.
3187 * @param ppv Where to store the address corresponding to GCPhys.
3188 * @param pLock Where to store the lock information that
3189 * pgmPhysReleaseInternalPageMappingLock needs.
3190 *
3191 * @internal
3192 */
3193int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
3194{
3195 int rc;
3196 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
3197 PGM_LOCK_ASSERT_OWNER(pVM);
3198
3199 /*
3200 * Make sure the page is writable.
3201 */
3202 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
3203 {
3204 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
3205 if (RT_FAILURE(rc))
3206 return rc;
3207 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
3208 }
3209 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
3210
3211 /*
3212 * Do the job.
3213 */
3214 PPGMPAGEMAPTLBE pTlbe;
3215 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3216 if (RT_FAILURE(rc))
3217 return rc;
3218 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
3219 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3220 return VINF_SUCCESS;
3221}
3222
3223
3224/**
3225 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
3226 * own the PGM lock and have access to the page structure.
3227 *
3228 * @returns VBox status code.
3229 * @retval VINF_SUCCESS on success.
3230 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3231 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3232 *
3233 * @param pVM The cross context VM structure.
3234 * @param GCPhys The guest physical address of the page that should be mapped.
3235 * @param pPage Pointer to the PGMPAGE structure for the page.
3236 * @param ppv Where to store the address corresponding to GCPhys.
3237 * @param pLock Where to store the lock information that
3238 * pgmPhysReleaseInternalPageMappingLock needs.
3239 *
3240 * @internal
3241 */
3242int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
3243{
3244 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
3245 PGM_LOCK_ASSERT_OWNER(pVM);
3246 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
3247
3248 /*
3249 * Do the job.
3250 */
3251 PPGMPAGEMAPTLBE pTlbe;
3252 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3253 if (RT_FAILURE(rc))
3254 return rc;
3255 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
3256 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3257 return VINF_SUCCESS;
3258}
3259
3260
3261/**
3262 * Requests the mapping of a guest page into the current context.
3263 *
3264 * This API should only be used for very short term, as it will consume scarse
3265 * resources (R0 and GC) in the mapping cache. When you're done with the page,
3266 * call PGMPhysReleasePageMappingLock() ASAP to release it.
3267 *
3268 * This API will assume your intention is to write to the page, and will
3269 * therefore replace shared and zero pages. If you do not intend to modify
3270 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
3271 *
3272 * @returns VBox status code.
3273 * @retval VINF_SUCCESS on success.
3274 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3275 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3276 *
3277 * @param pVM The cross context VM structure.
3278 * @param GCPhys The guest physical address of the page that should be
3279 * mapped.
3280 * @param ppv Where to store the address corresponding to GCPhys.
3281 * @param pLock Where to store the lock information that
3282 * PGMPhysReleasePageMappingLock needs.
3283 *
3284 * @remarks The caller is responsible for dealing with access handlers.
3285 * @todo Add an informational return code for pages with access handlers?
3286 *
3287 * @remark Avoid calling this API from within critical sections (other than
3288 * the PGM one) because of the deadlock risk. External threads may
3289 * need to delegate jobs to the EMTs.
3290 * @remarks Only one page is mapped! Make no assumption about what's after or
3291 * before the returned page!
3292 * @thread Any thread.
3293 */
3294VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
3295{
3296 int rc = PGM_LOCK(pVM);
3297 AssertRCReturn(rc, rc);
3298
3299 /*
3300 * Query the Physical TLB entry for the page (may fail).
3301 */
3302 PPGMPAGEMAPTLBE pTlbe;
3303 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
3304 if (RT_SUCCESS(rc))
3305 {
3306 /*
3307 * If the page is shared, the zero page, or being write monitored
3308 * it must be converted to a page that's writable if possible.
3309 */
3310 PPGMPAGE pPage = pTlbe->pPage;
3311 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
3312 {
3313 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
3314 if (RT_SUCCESS(rc))
3315 {
3316 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
3317 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3318 }
3319 }
3320 if (RT_SUCCESS(rc))
3321 {
3322 /*
3323 * Now, just perform the locking and calculate the return address.
3324 */
3325 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
3326 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3327 }
3328 }
3329
3330 PGM_UNLOCK(pVM);
3331 return rc;
3332}
3333
3334
3335/**
3336 * Requests the mapping of a guest page into the current context.
3337 *
3338 * This API should only be used for very short term, as it will consume scarse
3339 * resources (R0 and GC) in the mapping cache. When you're done with the page,
3340 * call PGMPhysReleasePageMappingLock() ASAP to release it.
3341 *
3342 * @returns VBox status code.
3343 * @retval VINF_SUCCESS on success.
3344 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3345 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3346 *
3347 * @param pVM The cross context VM structure.
3348 * @param GCPhys The guest physical address of the page that should be
3349 * mapped.
3350 * @param ppv Where to store the address corresponding to GCPhys.
3351 * @param pLock Where to store the lock information that
3352 * PGMPhysReleasePageMappingLock needs.
3353 *
3354 * @remarks The caller is responsible for dealing with access handlers.
3355 * @todo Add an informational return code for pages with access handlers?
3356 *
3357 * @remarks Avoid calling this API from within critical sections (other than
3358 * the PGM one) because of the deadlock risk.
3359 * @remarks Only one page is mapped! Make no assumption about what's after or
3360 * before the returned page!
3361 * @thread Any thread.
3362 */
3363VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
3364{
3365 int rc = PGM_LOCK(pVM);
3366 AssertRCReturn(rc, rc);
3367
3368 /*
3369 * Query the Physical TLB entry for the page (may fail).
3370 */
3371 PPGMPAGEMAPTLBE pTlbe;
3372 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
3373 if (RT_SUCCESS(rc))
3374 {
3375 /* MMIO pages doesn't have any readable backing. */
3376 PPGMPAGE pPage = pTlbe->pPage;
3377 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
3378 rc = VERR_PGM_PHYS_PAGE_RESERVED;
3379 else
3380 {
3381 /*
3382 * Now, just perform the locking and calculate the return address.
3383 */
3384 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
3385 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3386 }
3387 }
3388
3389 PGM_UNLOCK(pVM);
3390 return rc;
3391}
3392
3393
3394/**
3395 * Requests the mapping of a guest page given by virtual address into the current context.
3396 *
3397 * This API should only be used for very short term, as it will consume
3398 * scarse resources (R0 and GC) in the mapping cache. When you're done
3399 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
3400 *
3401 * This API will assume your intention is to write to the page, and will
3402 * therefore replace shared and zero pages. If you do not intend to modify
3403 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
3404 *
3405 * @returns VBox status code.
3406 * @retval VINF_SUCCESS on success.
3407 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
3408 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
3409 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3410 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3411 *
3412 * @param pVCpu The cross context virtual CPU structure.
3413 * @param GCPtr The guest physical address of the page that should be
3414 * mapped.
3415 * @param ppv Where to store the address corresponding to GCPhys.
3416 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
3417 *
3418 * @remark Avoid calling this API from within critical sections (other than
3419 * the PGM one) because of the deadlock risk.
3420 * @thread EMT
3421 */
3422VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
3423{
3424 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
3425 RTGCPHYS GCPhys;
3426 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
3427 if (RT_SUCCESS(rc))
3428 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
3429 return rc;
3430}
3431
3432
3433/**
3434 * Requests the mapping of a guest page given by virtual address into the current context.
3435 *
3436 * This API should only be used for very short term, as it will consume
3437 * scarse resources (R0 and GC) in the mapping cache. When you're done
3438 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
3439 *
3440 * @returns VBox status code.
3441 * @retval VINF_SUCCESS on success.
3442 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
3443 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
3444 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3445 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3446 *
3447 * @param pVCpu The cross context virtual CPU structure.
3448 * @param GCPtr The guest physical address of the page that should be
3449 * mapped.
3450 * @param ppv Where to store the address corresponding to GCPtr.
3451 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
3452 *
3453 * @remark Avoid calling this API from within critical sections (other than
3454 * the PGM one) because of the deadlock risk.
3455 * @thread EMT(pVCpu)
3456 */
3457VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
3458{
3459 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
3460 RTGCPHYS GCPhys;
3461 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
3462 if (RT_SUCCESS(rc))
3463 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
3464 return rc;
3465}
3466
3467
3468/**
3469 * Release the mapping of a guest page.
3470 *
3471 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
3472 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
3473 *
3474 * @param pVM The cross context VM structure.
3475 * @param pLock The lock structure initialized by the mapping function.
3476 */
3477VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
3478{
3479# ifndef IN_RING0
3480 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
3481# endif
3482 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
3483 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
3484
3485 pLock->uPageAndType = 0;
3486 pLock->pvMap = NULL;
3487
3488 PGM_LOCK_VOID(pVM);
3489 if (fWriteLock)
3490 {
3491 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
3492 Assert(cLocks > 0);
3493 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
3494 {
3495 if (cLocks == 1)
3496 {
3497 Assert(pVM->pgm.s.cWriteLockedPages > 0);
3498 pVM->pgm.s.cWriteLockedPages--;
3499 }
3500 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
3501 }
3502
3503 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
3504 { /* probably extremely likely */ }
3505 else
3506 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
3507 }
3508 else
3509 {
3510 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
3511 Assert(cLocks > 0);
3512 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
3513 {
3514 if (cLocks == 1)
3515 {
3516 Assert(pVM->pgm.s.cReadLockedPages > 0);
3517 pVM->pgm.s.cReadLockedPages--;
3518 }
3519 PGM_PAGE_DEC_READ_LOCKS(pPage);
3520 }
3521 }
3522
3523# ifndef IN_RING0
3524 if (pMap)
3525 {
3526 Assert(pMap->cRefs >= 1);
3527 pMap->cRefs--;
3528 }
3529# endif
3530 PGM_UNLOCK(pVM);
3531}
3532
3533
3534#ifdef IN_RING3
3535/**
3536 * Release the mapping of multiple guest pages.
3537 *
3538 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
3539 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
3540 *
3541 * @param pVM The cross context VM structure.
3542 * @param cPages Number of pages to unlock.
3543 * @param paLocks Array of locks lock structure initialized by the mapping
3544 * function.
3545 */
3546VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
3547{
3548 Assert(cPages > 0);
3549 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
3550#ifdef VBOX_STRICT
3551 for (uint32_t i = 1; i < cPages; i++)
3552 {
3553 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
3554 AssertPtr(paLocks[i].uPageAndType);
3555 }
3556#endif
3557
3558 PGM_LOCK_VOID(pVM);
3559 if (fWriteLock)
3560 {
3561 /*
3562 * Write locks:
3563 */
3564 for (uint32_t i = 0; i < cPages; i++)
3565 {
3566 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
3567 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
3568 Assert(cLocks > 0);
3569 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
3570 {
3571 if (cLocks == 1)
3572 {
3573 Assert(pVM->pgm.s.cWriteLockedPages > 0);
3574 pVM->pgm.s.cWriteLockedPages--;
3575 }
3576 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
3577 }
3578
3579 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
3580 { /* probably extremely likely */ }
3581 else
3582 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
3583
3584 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
3585 if (pMap)
3586 {
3587 Assert(pMap->cRefs >= 1);
3588 pMap->cRefs--;
3589 }
3590
3591 /* Yield the lock: */
3592 if ((i & 1023) == 1023 && i + 1 < cPages)
3593 {
3594 PGM_UNLOCK(pVM);
3595 PGM_LOCK_VOID(pVM);
3596 }
3597 }
3598 }
3599 else
3600 {
3601 /*
3602 * Read locks:
3603 */
3604 for (uint32_t i = 0; i < cPages; i++)
3605 {
3606 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
3607 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
3608 Assert(cLocks > 0);
3609 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
3610 {
3611 if (cLocks == 1)
3612 {
3613 Assert(pVM->pgm.s.cReadLockedPages > 0);
3614 pVM->pgm.s.cReadLockedPages--;
3615 }
3616 PGM_PAGE_DEC_READ_LOCKS(pPage);
3617 }
3618
3619 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
3620 if (pMap)
3621 {
3622 Assert(pMap->cRefs >= 1);
3623 pMap->cRefs--;
3624 }
3625
3626 /* Yield the lock: */
3627 if ((i & 1023) == 1023 && i + 1 < cPages)
3628 {
3629 PGM_UNLOCK(pVM);
3630 PGM_LOCK_VOID(pVM);
3631 }
3632 }
3633 }
3634 PGM_UNLOCK(pVM);
3635
3636 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
3637}
3638#endif /* IN_RING3 */
3639
3640
3641/**
3642 * Release the internal mapping of a guest page.
3643 *
3644 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
3645 * pgmPhysGCPhys2CCPtrInternalReadOnly.
3646 *
3647 * @param pVM The cross context VM structure.
3648 * @param pLock The lock structure initialized by the mapping function.
3649 *
3650 * @remarks Caller must hold the PGM lock.
3651 */
3652void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
3653{
3654 PGM_LOCK_ASSERT_OWNER(pVM);
3655 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
3656}
3657
3658
3659/**
3660 * Converts a GC physical address to a HC ring-3 pointer.
3661 *
3662 * @returns VINF_SUCCESS on success.
3663 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
3664 * page but has no physical backing.
3665 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
3666 * GC physical address.
3667 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
3668 * a dynamic ram chunk boundary
3669 *
3670 * @param pVM The cross context VM structure.
3671 * @param GCPhys The GC physical address to convert.
3672 * @param pR3Ptr Where to store the R3 pointer on success.
3673 *
3674 * @deprecated Avoid when possible!
3675 */
3676int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
3677{
3678/** @todo this is kind of hacky and needs some more work. */
3679#ifndef DEBUG_sandervl
3680 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
3681#endif
3682
3683 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
3684 PGM_LOCK_VOID(pVM);
3685
3686 PPGMRAMRANGE pRam;
3687 PPGMPAGE pPage;
3688 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3689 if (RT_SUCCESS(rc))
3690 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
3691
3692 PGM_UNLOCK(pVM);
3693 Assert(rc <= VINF_SUCCESS);
3694 return rc;
3695}
3696
3697
3698/**
3699 * Special lockless guest physical to current context pointer convertor.
3700 *
3701 * This is mainly for the page table walking and such.
3702 */
3703int pgmPhysGCPhys2CCPtrLockless(PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv)
3704{
3705 VMCPU_ASSERT_EMT(pVCpu);
3706
3707 /*
3708 * Get the RAM range and page structure.
3709 */
3710 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3711 PGMRAMRANGE volatile *pRam;
3712 PGMPAGE volatile *pPage;
3713 int rc = pgmPhysGetPageAndRangeExLockless(pVM, pVCpu, GCPhys, &pPage, &pRam);
3714 if (RT_SUCCESS(rc))
3715 {
3716 /*
3717 * Now, make sure it's writable (typically it is).
3718 */
3719 if (RT_LIKELY(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED))
3720 { /* likely, typically */ }
3721 else
3722 {
3723 PGM_LOCK_VOID(pVM);
3724 rc = pgmPhysPageMakeWritable(pVM, (PPGMPAGE)pPage, GCPhys);
3725 if (RT_SUCCESS(rc))
3726 rc = pgmPhysGetPageAndRangeExLockless(pVM, pVCpu, GCPhys, &pPage, &pRam);
3727 PGM_UNLOCK(pVM);
3728 if (RT_FAILURE(rc))
3729 return rc;
3730 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
3731 }
3732 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
3733
3734 /*
3735 * Get the mapping address.
3736 */
3737 uint8_t *pb;
3738#ifdef IN_RING3
3739 if (PGM_IS_IN_NEM_MODE(pVM))
3740 pb = &pRam->pbR3[(RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << GUEST_PAGE_SHIFT];
3741 else
3742#endif
3743 {
3744#ifdef IN_RING3
3745 PPGMPAGEMAPTLBE pTlbe;
3746 rc = pgmPhysPageQueryLocklessTlbeWithPage(pVCpu, (PPGMPAGE)pPage, GCPhys, &pTlbe);
3747 AssertLogRelRCReturn(rc, rc);
3748 pb = (uint8_t *)pTlbe->pv;
3749 RT_NOREF(pVM);
3750#else /** @todo a safe lockless page TLB in ring-0 needs the to ensure it gets the right invalidations. later. */
3751 PGM_LOCK(pVM);
3752 PPGMPAGEMAPTLBE pTlbe;
3753 rc = pgmPhysPageQueryTlbeWithPage(pVM, (PPGMPAGE)pPage, GCPhys, &pTlbe);
3754 AssertLogRelRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
3755 pb = (uint8_t *)pTlbe->pv;
3756 PGM_UNLOCK(pVM);
3757 RT_NOREF(pVCpu);
3758#endif
3759 }
3760 *ppv = (void *)((uintptr_t)pb | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3761 return VINF_SUCCESS;
3762 }
3763 Assert(rc <= VINF_SUCCESS);
3764 return rc;
3765}
3766
3767
3768/**
3769 * Converts a guest pointer to a GC physical address.
3770 *
3771 * This uses the current CR3/CR0/CR4 of the guest.
3772 *
3773 * @returns VBox status code.
3774 * @param pVCpu The cross context virtual CPU structure.
3775 * @param GCPtr The guest pointer to convert.
3776 * @param pGCPhys Where to store the GC physical address.
3777 * @thread EMT(pVCpu)
3778 */
3779VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
3780{
3781 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
3782 PGMPTWALK Walk;
3783 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
3784 if (pGCPhys && RT_SUCCESS(rc))
3785 *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK);
3786 return rc;
3787}
3788
3789
3790/**
3791 * Converts a guest pointer to a HC physical address.
3792 *
3793 * This uses the current CR3/CR0/CR4 of the guest.
3794 *
3795 * @returns VBox status code.
3796 * @param pVCpu The cross context virtual CPU structure.
3797 * @param GCPtr The guest pointer to convert.
3798 * @param pHCPhys Where to store the HC physical address.
3799 * @thread EMT(pVCpu)
3800 */
3801VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
3802{
3803 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
3804 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3805 PGMPTWALK Walk;
3806 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
3807 if (RT_SUCCESS(rc))
3808 rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK), pHCPhys);
3809 return rc;
3810}
3811
3812
3813
3814#undef LOG_GROUP
3815#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
3816
3817
3818#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
3819/**
3820 * Cache PGMPhys memory access
3821 *
3822 * @param pVM The cross context VM structure.
3823 * @param pCache Cache structure pointer
3824 * @param GCPhys GC physical address
3825 * @param pbR3 HC pointer corresponding to physical page
3826 *
3827 * @thread EMT.
3828 */
3829static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
3830{
3831 uint32_t iCacheIndex;
3832
3833 Assert(VM_IS_EMT(pVM));
3834
3835 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
3836 pbR3 = (uint8_t *)((uintptr_t)pbR3 & ~(uintptr_t)GUEST_PAGE_OFFSET_MASK);
3837
3838 iCacheIndex = ((GCPhys >> GUEST_PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
3839
3840 ASMBitSet(&pCache->aEntries, iCacheIndex);
3841
3842 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
3843 pCache->Entry[iCacheIndex].pbR3 = pbR3;
3844}
3845#endif /* IN_RING3 */
3846
3847
3848/**
3849 * Deals with reading from a page with one or more ALL access handlers.
3850 *
3851 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
3852 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
3853 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
3854 *
3855 * @param pVM The cross context VM structure.
3856 * @param pPage The page descriptor.
3857 * @param GCPhys The physical address to start reading at.
3858 * @param pvBuf Where to put the bits we read.
3859 * @param cb How much to read - less or equal to a page.
3860 * @param enmOrigin The origin of this call.
3861 */
3862static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
3863 PGMACCESSORIGIN enmOrigin)
3864{
3865 /*
3866 * The most frequent access here is MMIO and shadowed ROM.
3867 * The current code ASSUMES all these access handlers covers full pages!
3868 */
3869
3870 /*
3871 * Whatever we do we need the source page, map it first.
3872 */
3873 PGMPAGEMAPLOCK PgMpLck;
3874 const void *pvSrc = NULL;
3875 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
3876/** @todo Check how this can work for MMIO pages? */
3877 if (RT_FAILURE(rc))
3878 {
3879 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
3880 GCPhys, pPage, rc));
3881 memset(pvBuf, 0xff, cb);
3882 return VINF_SUCCESS;
3883 }
3884
3885 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
3886
3887 /*
3888 * Deal with any physical handlers.
3889 */
3890 PVMCPUCC pVCpu = VMMGetCpu(pVM);
3891 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
3892 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
3893 {
3894 PPGMPHYSHANDLER pCur;
3895 rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
3896 if (RT_SUCCESS(rc))
3897 {
3898 Assert(pCur && GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
3899 Assert((pCur->Key & GUEST_PAGE_OFFSET_MASK) == 0);
3900 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
3901#ifndef IN_RING3
3902 if (enmOrigin != PGMACCESSORIGIN_IEM)
3903 {
3904 /* Cannot reliably handle informational status codes in this context */
3905 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3906 return VERR_PGM_PHYS_WR_HIT_HANDLER;
3907 }
3908#endif
3909 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
3910 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler; Assert(pfnHandler);
3911 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
3912 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
3913
3914 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pCur->pszDesc) ));
3915 STAM_PROFILE_START(&pCur->Stat, h);
3916 PGM_LOCK_ASSERT_OWNER(pVM);
3917
3918 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
3919 PGM_UNLOCK(pVM);
3920 /* If the access origins with a device, make sure the buffer is initialized
3921 as a guard against leaking heap, stack and other info via badly written
3922 MMIO handling. @bugref{10651} */
3923 if (enmOrigin == PGMACCESSORIGIN_DEVICE)
3924 memset(pvBuf, 0xff, cb);
3925 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, uUser);
3926 PGM_LOCK_VOID(pVM);
3927
3928 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
3929 pCur = NULL; /* might not be valid anymore. */
3930 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
3931 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
3932 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
3933 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
3934 {
3935 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3936 return rcStrict;
3937 }
3938 }
3939 else if (rc == VERR_NOT_FOUND)
3940 AssertLogRelMsgFailed(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb));
3941 else
3942 AssertLogRelMsgFailedReturn(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb), rc);
3943 }
3944
3945 /*
3946 * Take the default action.
3947 */
3948 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
3949 {
3950 memcpy(pvBuf, pvSrc, cb);
3951 rcStrict = VINF_SUCCESS;
3952 }
3953 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3954 return rcStrict;
3955}
3956
3957
3958/**
3959 * Read physical memory.
3960 *
3961 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
3962 * want to ignore those.
3963 *
3964 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
3965 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
3966 * @retval VINF_SUCCESS in all context - read completed.
3967 *
3968 * @retval VINF_EM_OFF in RC and R0 - read completed.
3969 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
3970 * @retval VINF_EM_RESET in RC and R0 - read completed.
3971 * @retval VINF_EM_HALT in RC and R0 - read completed.
3972 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
3973 *
3974 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
3975 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
3976 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
3977 *
3978 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
3979 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
3980 *
3981 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
3982 *
3983 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
3984 * haven't been cleared for strict status codes yet.
3985 *
3986 * @param pVM The cross context VM structure.
3987 * @param GCPhys Physical address start reading from.
3988 * @param pvBuf Where to put the read bits.
3989 * @param cbRead How many bytes to read.
3990 * @param enmOrigin The origin of this call.
3991 */
3992VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
3993{
3994 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
3995 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
3996
3997 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysRead));
3998 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
3999
4000 PGM_LOCK_VOID(pVM);
4001
4002 /*
4003 * Copy loop on ram ranges.
4004 */
4005 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4006 for (;;)
4007 {
4008 PPGMRAMRANGE const pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
4009
4010 /* Inside range or not? */
4011 if (pRam && GCPhys >= pRam->GCPhys)
4012 {
4013 /*
4014 * Must work our way thru this page by page.
4015 */
4016 RTGCPHYS off = GCPhys - pRam->GCPhys;
4017 while (off < pRam->cb)
4018 {
4019 unsigned iPage = off >> GUEST_PAGE_SHIFT;
4020 PPGMPAGE pPage = &pRam->aPages[iPage];
4021 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
4022 if (cb > cbRead)
4023 cb = cbRead;
4024
4025 /*
4026 * Normal page? Get the pointer to it.
4027 */
4028 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
4029 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4030 {
4031 /*
4032 * Get the pointer to the page.
4033 */
4034 PGMPAGEMAPLOCK PgMpLck;
4035 const void *pvSrc;
4036 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
4037 if (RT_SUCCESS(rc))
4038 {
4039 memcpy(pvBuf, pvSrc, cb);
4040 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
4041 }
4042 else
4043 {
4044 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
4045 pRam->GCPhys + off, pPage, rc));
4046 memset(pvBuf, 0xff, cb);
4047 }
4048 }
4049 /*
4050 * Have ALL/MMIO access handlers.
4051 */
4052 else
4053 {
4054 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
4055 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
4056 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
4057 else
4058 {
4059 /* Set the remaining buffer to a known value. */
4060 memset(pvBuf, 0xff, cbRead);
4061 PGM_UNLOCK(pVM);
4062 return rcStrict2;
4063 }
4064 }
4065
4066 /* next page */
4067 if (cb >= cbRead)
4068 {
4069 PGM_UNLOCK(pVM);
4070 return rcStrict;
4071 }
4072 cbRead -= cb;
4073 off += cb;
4074 pvBuf = (char *)pvBuf + cb;
4075 } /* walk pages in ram range. */
4076
4077 GCPhys = pRam->GCPhysLast + 1;
4078 }
4079 else
4080 {
4081 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
4082
4083 /*
4084 * Unassigned address space.
4085 */
4086 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
4087 if (cb >= cbRead)
4088 {
4089 memset(pvBuf, 0xff, cbRead);
4090 break;
4091 }
4092 memset(pvBuf, 0xff, cb);
4093
4094 cbRead -= cb;
4095 pvBuf = (char *)pvBuf + cb;
4096 GCPhys += cb;
4097 }
4098
4099 } /* Ram range walk */
4100
4101 PGM_UNLOCK(pVM);
4102 return rcStrict;
4103}
4104
4105
4106/**
4107 * Deals with writing to a page with one or more WRITE or ALL access handlers.
4108 *
4109 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
4110 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
4111 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
4112 *
4113 * @param pVM The cross context VM structure.
4114 * @param pPage The page descriptor.
4115 * @param GCPhys The physical address to start writing at.
4116 * @param pvBuf What to write.
4117 * @param cbWrite How much to write - less or equal to a page.
4118 * @param enmOrigin The origin of this call.
4119 */
4120static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
4121 PGMACCESSORIGIN enmOrigin)
4122{
4123 PGMPAGEMAPLOCK PgMpLck;
4124 void *pvDst = NULL;
4125 VBOXSTRICTRC rcStrict;
4126
4127 /*
4128 * Give priority to physical handlers (like #PF does).
4129 *
4130 * Hope for a lonely physical handler first that covers the whole write
4131 * area. This should be a pretty frequent case with MMIO and the heavy
4132 * usage of full page handlers in the page pool.
4133 */
4134 PVMCPUCC pVCpu = VMMGetCpu(pVM);
4135 PPGMPHYSHANDLER pCur;
4136 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
4137 if (RT_SUCCESS(rcStrict))
4138 {
4139 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
4140#ifndef IN_RING3
4141 if (enmOrigin != PGMACCESSORIGIN_IEM)
4142 /* Cannot reliably handle informational status codes in this context */
4143 return VERR_PGM_PHYS_WR_HIT_HANDLER;
4144#endif
4145 size_t cbRange = pCur->KeyLast - GCPhys + 1;
4146 if (cbRange > cbWrite)
4147 cbRange = cbWrite;
4148
4149 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->pfnHandler);
4150 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
4151 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
4152 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
4153 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
4154 else
4155 rcStrict = VINF_SUCCESS;
4156 if (RT_SUCCESS(rcStrict))
4157 {
4158 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
4159 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
4160 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
4161 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
4162 STAM_PROFILE_START(&pCur->Stat, h);
4163
4164 /* Most handlers will want to release the PGM lock for deadlock prevention
4165 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
4166 dirty page trackers will want to keep it for performance reasons. */
4167 PGM_LOCK_ASSERT_OWNER(pVM);
4168 if (pCurType->fKeepPgmLock)
4169 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
4170 else
4171 {
4172 PGM_UNLOCK(pVM);
4173 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
4174 PGM_LOCK_VOID(pVM);
4175 }
4176
4177 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
4178 pCur = NULL; /* might not be valid anymore. */
4179 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
4180 {
4181 if (pvDst)
4182 memcpy(pvDst, pvBuf, cbRange);
4183 rcStrict = VINF_SUCCESS;
4184 }
4185 else
4186 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
4187 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
4188 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
4189 }
4190 else
4191 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
4192 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
4193 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
4194 {
4195 if (pvDst)
4196 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
4197 return rcStrict;
4198 }
4199
4200 /* more fun to be had below */
4201 cbWrite -= cbRange;
4202 GCPhys += cbRange;
4203 pvBuf = (uint8_t *)pvBuf + cbRange;
4204 pvDst = (uint8_t *)pvDst + cbRange;
4205 }
4206 else if (rcStrict == VERR_NOT_FOUND) /* The handler is somewhere else in the page, deal with it below. */
4207 rcStrict = VINF_SUCCESS;
4208 else
4209 AssertMsgFailedReturn(("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
4210 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all GUEST_PAGE_SIZEed! */
4211
4212 /*
4213 * Deal with all the odd ends (used to be deal with virt+phys).
4214 */
4215 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
4216
4217 /* We need a writable destination page. */
4218 if (!pvDst)
4219 {
4220 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
4221 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
4222 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
4223 rc2);
4224 }
4225
4226 /** @todo clean up this code some more now there are no virtual handlers any
4227 * more. */
4228 /* The loop state (big + ugly). */
4229 PPGMPHYSHANDLER pPhys = NULL;
4230 uint32_t offPhys = GUEST_PAGE_SIZE;
4231 uint32_t offPhysLast = GUEST_PAGE_SIZE;
4232 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
4233
4234 /* The loop. */
4235 for (;;)
4236 {
4237 if (fMorePhys && !pPhys)
4238 {
4239 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pPhys);
4240 if (RT_SUCCESS_NP(rcStrict))
4241 {
4242 offPhys = 0;
4243 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
4244 }
4245 else
4246 {
4247 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
4248
4249 rcStrict = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
4250 GCPhys, &pPhys);
4251 AssertMsgReturn(RT_SUCCESS(rcStrict) || rcStrict == VERR_NOT_FOUND,
4252 ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
4253
4254 if ( RT_SUCCESS(rcStrict)
4255 && pPhys->Key <= GCPhys + (cbWrite - 1))
4256 {
4257 offPhys = pPhys->Key - GCPhys;
4258 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
4259 Assert(pPhys->KeyLast - pPhys->Key < _4G);
4260 }
4261 else
4262 {
4263 pPhys = NULL;
4264 fMorePhys = false;
4265 offPhys = offPhysLast = GUEST_PAGE_SIZE;
4266 }
4267 }
4268 }
4269
4270 /*
4271 * Handle access to space without handlers (that's easy).
4272 */
4273 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
4274 uint32_t cbRange = (uint32_t)cbWrite;
4275 Assert(cbRange == cbWrite);
4276
4277 /*
4278 * Physical handler.
4279 */
4280 if (!offPhys)
4281 {
4282#ifndef IN_RING3
4283 if (enmOrigin != PGMACCESSORIGIN_IEM)
4284 /* Cannot reliably handle informational status codes in this context */
4285 return VERR_PGM_PHYS_WR_HIT_HANDLER;
4286#endif
4287 if (cbRange > offPhysLast + 1)
4288 cbRange = offPhysLast + 1;
4289
4290 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pPhys);
4291 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
4292 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pPhys->uUser
4293 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pPhys->uUser);
4294
4295 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
4296 STAM_PROFILE_START(&pPhys->Stat, h);
4297
4298 /* Most handlers will want to release the PGM lock for deadlock prevention
4299 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
4300 dirty page trackers will want to keep it for performance reasons. */
4301 PGM_LOCK_ASSERT_OWNER(pVM);
4302 if (pCurType->fKeepPgmLock)
4303 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
4304 else
4305 {
4306 PGM_UNLOCK(pVM);
4307 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
4308 PGM_LOCK_VOID(pVM);
4309 }
4310
4311 STAM_PROFILE_STOP(&pPhys->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
4312 pPhys = NULL; /* might not be valid anymore. */
4313 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
4314 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
4315 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
4316 }
4317
4318 /*
4319 * Execute the default action and merge the status codes.
4320 */
4321 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
4322 {
4323 memcpy(pvDst, pvBuf, cbRange);
4324 rcStrict2 = VINF_SUCCESS;
4325 }
4326 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
4327 {
4328 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
4329 return rcStrict2;
4330 }
4331 else
4332 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
4333
4334 /*
4335 * Advance if we've got more stuff to do.
4336 */
4337 if (cbRange >= cbWrite)
4338 {
4339 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
4340 return rcStrict;
4341 }
4342
4343
4344 cbWrite -= cbRange;
4345 GCPhys += cbRange;
4346 pvBuf = (uint8_t *)pvBuf + cbRange;
4347 pvDst = (uint8_t *)pvDst + cbRange;
4348
4349 offPhys -= cbRange;
4350 offPhysLast -= cbRange;
4351 }
4352}
4353
4354
4355/**
4356 * Write to physical memory.
4357 *
4358 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
4359 * want to ignore those.
4360 *
4361 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
4362 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
4363 * @retval VINF_SUCCESS in all context - write completed.
4364 *
4365 * @retval VINF_EM_OFF in RC and R0 - write completed.
4366 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
4367 * @retval VINF_EM_RESET in RC and R0 - write completed.
4368 * @retval VINF_EM_HALT in RC and R0 - write completed.
4369 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
4370 *
4371 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
4372 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
4373 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
4374 *
4375 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
4376 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
4377 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
4378 *
4379 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
4380 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
4381 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
4382 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
4383 * @retval VINF_CSAM_PENDING_ACTION in RC only.
4384 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
4385 *
4386 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
4387 * haven't been cleared for strict status codes yet.
4388 *
4389 *
4390 * @param pVM The cross context VM structure.
4391 * @param GCPhys Physical address to write to.
4392 * @param pvBuf What to write.
4393 * @param cbWrite How many bytes to write.
4394 * @param enmOrigin Who is calling.
4395 */
4396VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
4397{
4398 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
4399 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
4400 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
4401
4402 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWrite));
4403 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
4404
4405 PGM_LOCK_VOID(pVM);
4406
4407 /*
4408 * Copy loop on ram ranges.
4409 */
4410 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4411 for (;;)
4412 {
4413 PPGMRAMRANGE const pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
4414
4415 /* Inside range or not? */
4416 if (pRam && GCPhys >= pRam->GCPhys)
4417 {
4418 /*
4419 * Must work our way thru this page by page.
4420 */
4421 RTGCPTR off = GCPhys - pRam->GCPhys;
4422 while (off < pRam->cb)
4423 {
4424 RTGCPTR iPage = off >> GUEST_PAGE_SHIFT;
4425 PPGMPAGE pPage = &pRam->aPages[iPage];
4426 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
4427 if (cb > cbWrite)
4428 cb = cbWrite;
4429
4430 /*
4431 * Normal page? Get the pointer to it.
4432 */
4433 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
4434 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4435 {
4436 PGMPAGEMAPLOCK PgMpLck;
4437 void *pvDst;
4438 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
4439 if (RT_SUCCESS(rc))
4440 {
4441 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
4442 memcpy(pvDst, pvBuf, cb);
4443 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
4444 }
4445 /* Ignore writes to ballooned pages. */
4446 else if (!PGM_PAGE_IS_BALLOONED(pPage))
4447 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
4448 pRam->GCPhys + off, pPage, rc));
4449 }
4450 /*
4451 * Active WRITE or ALL access handlers.
4452 */
4453 else
4454 {
4455 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
4456 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
4457 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
4458 else
4459 {
4460 PGM_UNLOCK(pVM);
4461 return rcStrict2;
4462 }
4463 }
4464
4465 /* next page */
4466 if (cb >= cbWrite)
4467 {
4468 PGM_UNLOCK(pVM);
4469 return rcStrict;
4470 }
4471
4472 cbWrite -= cb;
4473 off += cb;
4474 pvBuf = (const char *)pvBuf + cb;
4475 } /* walk pages in ram range */
4476
4477 GCPhys = pRam->GCPhysLast + 1;
4478 }
4479 else
4480 {
4481 /*
4482 * Unassigned address space, skip it.
4483 */
4484 if (!pRam)
4485 break;
4486 size_t cb = pRam->GCPhys - GCPhys;
4487 if (cb >= cbWrite)
4488 break;
4489 cbWrite -= cb;
4490 pvBuf = (const char *)pvBuf + cb;
4491 GCPhys += cb;
4492 }
4493
4494 } /* Ram range walk */
4495
4496 PGM_UNLOCK(pVM);
4497 return rcStrict;
4498}
4499
4500
4501/**
4502 * Read from guest physical memory by GC physical address, bypassing
4503 * MMIO and access handlers.
4504 *
4505 * @returns VBox status code.
4506 * @param pVM The cross context VM structure.
4507 * @param pvDst The destination address.
4508 * @param GCPhysSrc The source address (GC physical address).
4509 * @param cb The number of bytes to read.
4510 */
4511VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
4512{
4513 /*
4514 * Treat the first page as a special case.
4515 */
4516 if (!cb)
4517 return VINF_SUCCESS;
4518
4519 /* map the 1st page */
4520 void const *pvSrc;
4521 PGMPAGEMAPLOCK Lock;
4522 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
4523 if (RT_FAILURE(rc))
4524 return rc;
4525
4526 /* optimize for the case where access is completely within the first page. */
4527 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysSrc & GUEST_PAGE_OFFSET_MASK);
4528 if (RT_LIKELY(cb <= cbPage))
4529 {
4530 memcpy(pvDst, pvSrc, cb);
4531 PGMPhysReleasePageMappingLock(pVM, &Lock);
4532 return VINF_SUCCESS;
4533 }
4534
4535 /* copy to the end of the page. */
4536 memcpy(pvDst, pvSrc, cbPage);
4537 PGMPhysReleasePageMappingLock(pVM, &Lock);
4538 GCPhysSrc += cbPage;
4539 pvDst = (uint8_t *)pvDst + cbPage;
4540 cb -= cbPage;
4541
4542 /*
4543 * Page by page.
4544 */
4545 for (;;)
4546 {
4547 /* map the page */
4548 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
4549 if (RT_FAILURE(rc))
4550 return rc;
4551
4552 /* last page? */
4553 if (cb <= GUEST_PAGE_SIZE)
4554 {
4555 memcpy(pvDst, pvSrc, cb);
4556 PGMPhysReleasePageMappingLock(pVM, &Lock);
4557 return VINF_SUCCESS;
4558 }
4559
4560 /* copy the entire page and advance */
4561 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
4562 PGMPhysReleasePageMappingLock(pVM, &Lock);
4563 GCPhysSrc += GUEST_PAGE_SIZE;
4564 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
4565 cb -= GUEST_PAGE_SIZE;
4566 }
4567 /* won't ever get here. */
4568}
4569
4570
4571/**
4572 * Write to guest physical memory referenced by GC pointer.
4573 * Write memory to GC physical address in guest physical memory.
4574 *
4575 * This will bypass MMIO and access handlers.
4576 *
4577 * @returns VBox status code.
4578 * @param pVM The cross context VM structure.
4579 * @param GCPhysDst The GC physical address of the destination.
4580 * @param pvSrc The source buffer.
4581 * @param cb The number of bytes to write.
4582 */
4583VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
4584{
4585 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
4586
4587 /*
4588 * Treat the first page as a special case.
4589 */
4590 if (!cb)
4591 return VINF_SUCCESS;
4592
4593 /* map the 1st page */
4594 void *pvDst;
4595 PGMPAGEMAPLOCK Lock;
4596 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
4597 if (RT_FAILURE(rc))
4598 return rc;
4599
4600 /* optimize for the case where access is completely within the first page. */
4601 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysDst & GUEST_PAGE_OFFSET_MASK);
4602 if (RT_LIKELY(cb <= cbPage))
4603 {
4604 memcpy(pvDst, pvSrc, cb);
4605 PGMPhysReleasePageMappingLock(pVM, &Lock);
4606 return VINF_SUCCESS;
4607 }
4608
4609 /* copy to the end of the page. */
4610 memcpy(pvDst, pvSrc, cbPage);
4611 PGMPhysReleasePageMappingLock(pVM, &Lock);
4612 GCPhysDst += cbPage;
4613 pvSrc = (const uint8_t *)pvSrc + cbPage;
4614 cb -= cbPage;
4615
4616 /*
4617 * Page by page.
4618 */
4619 for (;;)
4620 {
4621 /* map the page */
4622 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
4623 if (RT_FAILURE(rc))
4624 return rc;
4625
4626 /* last page? */
4627 if (cb <= GUEST_PAGE_SIZE)
4628 {
4629 memcpy(pvDst, pvSrc, cb);
4630 PGMPhysReleasePageMappingLock(pVM, &Lock);
4631 return VINF_SUCCESS;
4632 }
4633
4634 /* copy the entire page and advance */
4635 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
4636 PGMPhysReleasePageMappingLock(pVM, &Lock);
4637 GCPhysDst += GUEST_PAGE_SIZE;
4638 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
4639 cb -= GUEST_PAGE_SIZE;
4640 }
4641 /* won't ever get here. */
4642}
4643
4644
4645/**
4646 * Read from guest physical memory referenced by GC pointer.
4647 *
4648 * This function uses the current CR3/CR0/CR4 of the guest and will
4649 * bypass access handlers and not set any accessed bits.
4650 *
4651 * @returns VBox status code.
4652 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4653 * @param pvDst The destination address.
4654 * @param GCPtrSrc The source address (GC pointer).
4655 * @param cb The number of bytes to read.
4656 */
4657VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
4658{
4659 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4660/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
4661
4662 /*
4663 * Treat the first page as a special case.
4664 */
4665 if (!cb)
4666 return VINF_SUCCESS;
4667
4668 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleRead));
4669 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
4670
4671 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
4672 * when many VCPUs are fighting for the lock.
4673 */
4674 PGM_LOCK_VOID(pVM);
4675
4676 /* map the 1st page */
4677 void const *pvSrc;
4678 PGMPAGEMAPLOCK Lock;
4679 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
4680 if (RT_FAILURE(rc))
4681 {
4682 PGM_UNLOCK(pVM);
4683 return rc;
4684 }
4685
4686 /* optimize for the case where access is completely within the first page. */
4687 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
4688 if (RT_LIKELY(cb <= cbPage))
4689 {
4690 memcpy(pvDst, pvSrc, cb);
4691 PGMPhysReleasePageMappingLock(pVM, &Lock);
4692 PGM_UNLOCK(pVM);
4693 return VINF_SUCCESS;
4694 }
4695
4696 /* copy to the end of the page. */
4697 memcpy(pvDst, pvSrc, cbPage);
4698 PGMPhysReleasePageMappingLock(pVM, &Lock);
4699 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
4700 pvDst = (uint8_t *)pvDst + cbPage;
4701 cb -= cbPage;
4702
4703 /*
4704 * Page by page.
4705 */
4706 for (;;)
4707 {
4708 /* map the page */
4709 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
4710 if (RT_FAILURE(rc))
4711 {
4712 PGM_UNLOCK(pVM);
4713 return rc;
4714 }
4715
4716 /* last page? */
4717 if (cb <= GUEST_PAGE_SIZE)
4718 {
4719 memcpy(pvDst, pvSrc, cb);
4720 PGMPhysReleasePageMappingLock(pVM, &Lock);
4721 PGM_UNLOCK(pVM);
4722 return VINF_SUCCESS;
4723 }
4724
4725 /* copy the entire page and advance */
4726 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
4727 PGMPhysReleasePageMappingLock(pVM, &Lock);
4728 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + GUEST_PAGE_SIZE);
4729 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
4730 cb -= GUEST_PAGE_SIZE;
4731 }
4732 /* won't ever get here. */
4733}
4734
4735
4736/**
4737 * Write to guest physical memory referenced by GC pointer.
4738 *
4739 * This function uses the current CR3/CR0/CR4 of the guest and will
4740 * bypass access handlers and not set dirty or accessed bits.
4741 *
4742 * @returns VBox status code.
4743 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4744 * @param GCPtrDst The destination address (GC pointer).
4745 * @param pvSrc The source address.
4746 * @param cb The number of bytes to write.
4747 */
4748VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
4749{
4750 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4751 VMCPU_ASSERT_EMT(pVCpu);
4752
4753 /*
4754 * Treat the first page as a special case.
4755 */
4756 if (!cb)
4757 return VINF_SUCCESS;
4758
4759 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWrite));
4760 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
4761
4762 /* map the 1st page */
4763 void *pvDst;
4764 PGMPAGEMAPLOCK Lock;
4765 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
4766 if (RT_FAILURE(rc))
4767 return rc;
4768
4769 /* optimize for the case where access is completely within the first page. */
4770 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
4771 if (RT_LIKELY(cb <= cbPage))
4772 {
4773 memcpy(pvDst, pvSrc, cb);
4774 PGMPhysReleasePageMappingLock(pVM, &Lock);
4775 return VINF_SUCCESS;
4776 }
4777
4778 /* copy to the end of the page. */
4779 memcpy(pvDst, pvSrc, cbPage);
4780 PGMPhysReleasePageMappingLock(pVM, &Lock);
4781 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
4782 pvSrc = (const uint8_t *)pvSrc + cbPage;
4783 cb -= cbPage;
4784
4785 /*
4786 * Page by page.
4787 */
4788 for (;;)
4789 {
4790 /* map the page */
4791 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
4792 if (RT_FAILURE(rc))
4793 return rc;
4794
4795 /* last page? */
4796 if (cb <= GUEST_PAGE_SIZE)
4797 {
4798 memcpy(pvDst, pvSrc, cb);
4799 PGMPhysReleasePageMappingLock(pVM, &Lock);
4800 return VINF_SUCCESS;
4801 }
4802
4803 /* copy the entire page and advance */
4804 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
4805 PGMPhysReleasePageMappingLock(pVM, &Lock);
4806 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
4807 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
4808 cb -= GUEST_PAGE_SIZE;
4809 }
4810 /* won't ever get here. */
4811}
4812
4813
4814/**
4815 * Write to guest physical memory referenced by GC pointer and update the PTE.
4816 *
4817 * This function uses the current CR3/CR0/CR4 of the guest and will
4818 * bypass access handlers but will set any dirty and accessed bits in the PTE.
4819 *
4820 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
4821 *
4822 * @returns VBox status code.
4823 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4824 * @param GCPtrDst The destination address (GC pointer).
4825 * @param pvSrc The source address.
4826 * @param cb The number of bytes to write.
4827 */
4828VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
4829{
4830 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4831 VMCPU_ASSERT_EMT(pVCpu);
4832
4833 /*
4834 * Treat the first page as a special case.
4835 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
4836 */
4837 if (!cb)
4838 return VINF_SUCCESS;
4839
4840 /* map the 1st page */
4841 void *pvDst;
4842 PGMPAGEMAPLOCK Lock;
4843 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
4844 if (RT_FAILURE(rc))
4845 return rc;
4846
4847 /* optimize for the case where access is completely within the first page. */
4848 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
4849 if (RT_LIKELY(cb <= cbPage))
4850 {
4851 memcpy(pvDst, pvSrc, cb);
4852 PGMPhysReleasePageMappingLock(pVM, &Lock);
4853 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
4854 return VINF_SUCCESS;
4855 }
4856
4857 /* copy to the end of the page. */
4858 memcpy(pvDst, pvSrc, cbPage);
4859 PGMPhysReleasePageMappingLock(pVM, &Lock);
4860 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
4861 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
4862 pvSrc = (const uint8_t *)pvSrc + cbPage;
4863 cb -= cbPage;
4864
4865 /*
4866 * Page by page.
4867 */
4868 for (;;)
4869 {
4870 /* map the page */
4871 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
4872 if (RT_FAILURE(rc))
4873 return rc;
4874
4875 /* last page? */
4876 if (cb <= GUEST_PAGE_SIZE)
4877 {
4878 memcpy(pvDst, pvSrc, cb);
4879 PGMPhysReleasePageMappingLock(pVM, &Lock);
4880 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
4881 return VINF_SUCCESS;
4882 }
4883
4884 /* copy the entire page and advance */
4885 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
4886 PGMPhysReleasePageMappingLock(pVM, &Lock);
4887 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
4888 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
4889 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
4890 cb -= GUEST_PAGE_SIZE;
4891 }
4892 /* won't ever get here. */
4893}
4894
4895
4896/**
4897 * Read from guest physical memory referenced by GC pointer.
4898 *
4899 * This function uses the current CR3/CR0/CR4 of the guest and will
4900 * respect access handlers and set accessed bits.
4901 *
4902 * @returns Strict VBox status, see PGMPhysRead for details.
4903 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
4904 * specified virtual address.
4905 *
4906 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4907 * @param pvDst The destination address.
4908 * @param GCPtrSrc The source address (GC pointer).
4909 * @param cb The number of bytes to read.
4910 * @param enmOrigin Who is calling.
4911 * @thread EMT(pVCpu)
4912 */
4913VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
4914{
4915 int rc;
4916 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4917 VMCPU_ASSERT_EMT(pVCpu);
4918
4919 /*
4920 * Anything to do?
4921 */
4922 if (!cb)
4923 return VINF_SUCCESS;
4924
4925 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
4926
4927 /*
4928 * Optimize reads within a single page.
4929 */
4930 if (((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
4931 {
4932 /* Convert virtual to physical address + flags */
4933 PGMPTWALK Walk;
4934 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
4935 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
4936 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
4937
4938 /* mark the guest page as accessed. */
4939 if (!(Walk.fEffective & X86_PTE_A))
4940 {
4941 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
4942 AssertRC(rc);
4943 }
4944
4945 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
4946 }
4947
4948 /*
4949 * Page by page.
4950 */
4951 for (;;)
4952 {
4953 /* Convert virtual to physical address + flags */
4954 PGMPTWALK Walk;
4955 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
4956 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
4957 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
4958
4959 /* mark the guest page as accessed. */
4960 if (!(Walk.fEffective & X86_PTE_A))
4961 {
4962 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
4963 AssertRC(rc);
4964 }
4965
4966 /* copy */
4967 size_t cbRead = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
4968 if (cbRead < cb)
4969 {
4970 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
4971 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4972 { /* likely */ }
4973 else
4974 return rcStrict;
4975 }
4976 else /* Last page (cbRead is GUEST_PAGE_SIZE, we only need cb!) */
4977 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
4978
4979 /* next */
4980 Assert(cb > cbRead);
4981 cb -= cbRead;
4982 pvDst = (uint8_t *)pvDst + cbRead;
4983 GCPtrSrc += cbRead;
4984 }
4985}
4986
4987
4988/**
4989 * Write to guest physical memory referenced by GC pointer.
4990 *
4991 * This function uses the current CR3/CR0/CR4 of the guest and will
4992 * respect access handlers and set dirty and accessed bits.
4993 *
4994 * @returns Strict VBox status, see PGMPhysWrite for details.
4995 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
4996 * specified virtual address.
4997 *
4998 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4999 * @param GCPtrDst The destination address (GC pointer).
5000 * @param pvSrc The source address.
5001 * @param cb The number of bytes to write.
5002 * @param enmOrigin Who is calling.
5003 */
5004VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
5005{
5006 int rc;
5007 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5008 VMCPU_ASSERT_EMT(pVCpu);
5009
5010 /*
5011 * Anything to do?
5012 */
5013 if (!cb)
5014 return VINF_SUCCESS;
5015
5016 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
5017
5018 /*
5019 * Optimize writes within a single page.
5020 */
5021 if (((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
5022 {
5023 /* Convert virtual to physical address + flags */
5024 PGMPTWALK Walk;
5025 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
5026 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
5027 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
5028
5029 /* Mention when we ignore X86_PTE_RW... */
5030 if (!(Walk.fEffective & X86_PTE_RW))
5031 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
5032
5033 /* Mark the guest page as accessed and dirty if necessary. */
5034 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
5035 {
5036 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
5037 AssertRC(rc);
5038 }
5039
5040 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
5041 }
5042
5043 /*
5044 * Page by page.
5045 */
5046 for (;;)
5047 {
5048 /* Convert virtual to physical address + flags */
5049 PGMPTWALK Walk;
5050 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
5051 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
5052 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
5053
5054 /* Mention when we ignore X86_PTE_RW... */
5055 if (!(Walk.fEffective & X86_PTE_RW))
5056 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
5057
5058 /* Mark the guest page as accessed and dirty if necessary. */
5059 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
5060 {
5061 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
5062 AssertRC(rc);
5063 }
5064
5065 /* copy */
5066 size_t cbWrite = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
5067 if (cbWrite < cb)
5068 {
5069 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
5070 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5071 { /* likely */ }
5072 else
5073 return rcStrict;
5074 }
5075 else /* Last page (cbWrite is GUEST_PAGE_SIZE, we only need cb!) */
5076 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
5077
5078 /* next */
5079 Assert(cb > cbWrite);
5080 cb -= cbWrite;
5081 pvSrc = (uint8_t *)pvSrc + cbWrite;
5082 GCPtrDst += cbWrite;
5083 }
5084}
5085
5086
5087/**
5088 * Return the page type of the specified physical address.
5089 *
5090 * @returns The page type.
5091 * @param pVM The cross context VM structure.
5092 * @param GCPhys Guest physical address
5093 */
5094VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
5095{
5096 PGM_LOCK_VOID(pVM);
5097 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
5098 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
5099 PGM_UNLOCK(pVM);
5100
5101 return enmPgType;
5102}
5103
5104
5105/** Helper for PGMPhysIemGCPhys2PtrNoLock. */
5106DECL_FORCE_INLINE(int)
5107pgmPhyIemGCphys2PtrNoLockReturnNoNothing(uint64_t uTlbPhysRev, R3R0PTRTYPE(uint8_t *) *ppb, uint64_t *pfTlb,
5108 RTGCPHYS GCPhys, PCPGMPAGE pPageCopy)
5109{
5110 *pfTlb |= uTlbPhysRev
5111 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
5112 *ppb = NULL;
5113 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=NULL *pfTlb=%#RX64 PageCopy=%R[pgmpage] NO\n", GCPhys,
5114 uTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3, pPageCopy));
5115 RT_NOREF(GCPhys, pPageCopy);
5116 return VINF_SUCCESS;
5117}
5118
5119
5120/** Helper for PGMPhysIemGCPhys2PtrNoLock. */
5121DECL_FORCE_INLINE(int)
5122pgmPhyIemGCphys2PtrNoLockReturnReadOnly(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTlbPhysRev, RTGCPHYS GCPhys, PCPGMPAGE pPageCopy,
5123 PPGMRAMRANGE pRam, PPGMPAGE pPage, R3R0PTRTYPE(uint8_t *) *ppb, uint64_t *pfTlb)
5124{
5125 if (!PGM_PAGE_IS_CODE_PAGE(pPageCopy))
5126 *pfTlb |= uTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
5127 else
5128 *pfTlb |= uTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_CODE_PAGE;
5129
5130#ifdef IN_RING3
5131 if (PGM_IS_IN_NEM_MODE(pVM))
5132 *ppb = &pRam->pbR3[(RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << GUEST_PAGE_SHIFT];
5133 else
5134#endif
5135 {
5136#ifdef IN_RING3
5137 PPGMPAGEMAPTLBE pTlbe;
5138 int rc = pgmPhysPageQueryLocklessTlbeWithPage(pVCpu, pPage, GCPhys, &pTlbe);
5139 AssertLogRelRCReturn(rc, rc);
5140 *ppb = (uint8_t *)pTlbe->pv;
5141 RT_NOREF(pVM);
5142#else /** @todo a safe lockless page TLB in ring-0 needs the to ensure it gets the right invalidations. later. */
5143 PGM_LOCK(pVM);
5144 PPGMPAGEMAPTLBE pTlbe;
5145 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
5146 AssertLogRelRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
5147 *ppb = (uint8_t *)pTlbe->pv;
5148 PGM_UNLOCK(pVM);
5149 RT_NOREF(pVCpu);
5150#endif
5151 }
5152 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 PageCopy=%R[pgmpage] RO\n", GCPhys, *ppb, *pfTlb, pPageCopy));
5153 RT_NOREF(pRam);
5154 return VINF_SUCCESS;
5155}
5156
5157
5158/** Helper for PGMPhysIemGCPhys2PtrNoLock. */
5159DECL_FORCE_INLINE(int)
5160pgmPhyIemGCphys2PtrNoLockReturnReadWrite(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTlbPhysRev, RTGCPHYS GCPhys, PCPGMPAGE pPageCopy,
5161 PPGMRAMRANGE pRam, PPGMPAGE pPage, R3R0PTRTYPE(uint8_t *) *ppb, uint64_t *pfTlb)
5162{
5163 Assert(!PGM_PAGE_IS_CODE_PAGE(pPageCopy));
5164 RT_NOREF(pPageCopy);
5165 *pfTlb |= uTlbPhysRev;
5166
5167#ifdef IN_RING3
5168 if (PGM_IS_IN_NEM_MODE(pVM))
5169 *ppb = &pRam->pbR3[(RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << GUEST_PAGE_SHIFT];
5170 else
5171#endif
5172 {
5173#ifdef IN_RING3
5174 PPGMPAGEMAPTLBE pTlbe;
5175 int rc = pgmPhysPageQueryLocklessTlbeWithPage(pVCpu, pPage, GCPhys, &pTlbe);
5176 AssertLogRelRCReturn(rc, rc);
5177 *ppb = (uint8_t *)pTlbe->pv;
5178 RT_NOREF(pVM);
5179#else /** @todo a safe lockless page TLB in ring-0 needs the to ensure it gets the right invalidations. later. */
5180 PGM_LOCK(pVM);
5181 PPGMPAGEMAPTLBE pTlbe;
5182 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
5183 AssertLogRelRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
5184 *ppb = (uint8_t *)pTlbe->pv;
5185 PGM_UNLOCK(pVM);
5186 RT_NOREF(pVCpu);
5187#endif
5188 }
5189 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 PageCopy=%R[pgmpage] RW\n", GCPhys, *ppb, *pfTlb, pPageCopy));
5190 RT_NOREF(pRam);
5191 return VINF_SUCCESS;
5192}
5193
5194
5195/**
5196 * Converts a GC physical address to a HC ring-3 pointer, with some
5197 * additional checks.
5198 *
5199 * @returns VBox status code (no informational statuses).
5200 *
5201 * @param pVM The cross context VM structure.
5202 * @param pVCpu The cross context virtual CPU structure of the
5203 * calling EMT.
5204 * @param GCPhys The GC physical address to convert. This API mask
5205 * the A20 line when necessary.
5206 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
5207 * be done while holding the PGM lock.
5208 * @param ppb Where to store the pointer corresponding to GCPhys
5209 * on success.
5210 * @param pfTlb The TLB flags and revision. We only add stuff.
5211 *
5212 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
5213 * PGMPhysIemGCPhys2Ptr.
5214 *
5215 * @thread EMT(pVCpu).
5216 */
5217VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
5218 R3R0PTRTYPE(uint8_t *) *ppb, uint64_t *pfTlb)
5219{
5220 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
5221 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
5222
5223 PGMRAMRANGE volatile *pRam;
5224 PGMPAGE volatile *pPage;
5225 int rc = pgmPhysGetPageAndRangeExLockless(pVM, pVCpu, GCPhys, &pPage, &pRam);
5226 if (RT_SUCCESS(rc))
5227 {
5228 /*
5229 * Wrt to update races, we will try to pretend we beat the update we're
5230 * racing. We do this by sampling the physical TLB revision first, so
5231 * that the TLB entry / whatever purpose the caller has with the info
5232 * will become invalid immediately if it's updated.
5233 *
5234 * This means the caller will (probably) make use of the returned info
5235 * only once and then requery it the next time it is use, getting the
5236 * updated info. This would then be just as if the first query got the
5237 * PGM lock before the updater.
5238 */
5239 /** @todo make PGMPAGE updates more atomic, possibly flagging complex
5240 * updates by adding a u1UpdateInProgress field (or revision).
5241 * This would be especially important when updating the page ID... */
5242 uint64_t uTlbPhysRev = *puTlbPhysRev;
5243 PGMPAGE PageCopy = { { pPage->au64[0], pPage->au64[1] } };
5244 if ( uTlbPhysRev == *puTlbPhysRev
5245 && PageCopy.au64[0] == pPage->au64[0]
5246 && PageCopy.au64[1] == pPage->au64[1])
5247 ASMCompilerBarrier(); /* likely */
5248 else
5249 {
5250 PGM_LOCK_VOID(pVM);
5251 uTlbPhysRev = *puTlbPhysRev;
5252 PageCopy.au64[0] = pPage->au64[0];
5253 PageCopy.au64[1] = pPage->au64[1];
5254 PGM_UNLOCK(pVM);
5255 }
5256
5257 /*
5258 * Try optimize for the regular case first: Writable RAM.
5259 */
5260 switch (PGM_PAGE_GET_HNDL_PHYS_STATE(&PageCopy))
5261 {
5262 case PGM_PAGE_HNDL_PHYS_STATE_DISABLED:
5263 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(&PageCopy))
5264 { /* likely */ }
5265 else
5266 return pgmPhyIemGCphys2PtrNoLockReturnNoNothing(uTlbPhysRev, ppb, pfTlb, GCPhys, &PageCopy);
5267 RT_FALL_THRU();
5268 case PGM_PAGE_HNDL_PHYS_STATE_NONE:
5269 Assert(!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(&PageCopy));
5270 switch (PGM_PAGE_GET_STATE_NA(&PageCopy))
5271 {
5272 case PGM_PAGE_STATE_ALLOCATED:
5273 return pgmPhyIemGCphys2PtrNoLockReturnReadWrite(pVM, pVCpu, uTlbPhysRev, GCPhys, &PageCopy,
5274 (PPGMRAMRANGE)pRam, (PPGMPAGE)pPage, ppb, pfTlb);
5275
5276 case PGM_PAGE_STATE_ZERO:
5277 case PGM_PAGE_STATE_WRITE_MONITORED:
5278 case PGM_PAGE_STATE_SHARED:
5279 return pgmPhyIemGCphys2PtrNoLockReturnReadOnly(pVM, pVCpu, uTlbPhysRev, GCPhys, &PageCopy,
5280 (PPGMRAMRANGE)pRam, (PPGMPAGE)pPage, ppb, pfTlb);
5281
5282 default: AssertFailed(); RT_FALL_THROUGH();
5283 case PGM_PAGE_STATE_BALLOONED:
5284 return pgmPhyIemGCphys2PtrNoLockReturnNoNothing(uTlbPhysRev, ppb, pfTlb, GCPhys, &PageCopy);
5285 }
5286 break;
5287
5288 case PGM_PAGE_HNDL_PHYS_STATE_WRITE:
5289 Assert(!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(&PageCopy));
5290 switch (PGM_PAGE_GET_STATE_NA(&PageCopy))
5291 {
5292 case PGM_PAGE_STATE_ALLOCATED:
5293 Assert(!PGM_PAGE_IS_CODE_PAGE(&PageCopy));
5294 RT_FALL_THRU();
5295 case PGM_PAGE_STATE_ZERO:
5296 case PGM_PAGE_STATE_WRITE_MONITORED:
5297 case PGM_PAGE_STATE_SHARED:
5298 return pgmPhyIemGCphys2PtrNoLockReturnReadOnly(pVM, pVCpu, uTlbPhysRev, GCPhys, &PageCopy,
5299 (PPGMRAMRANGE)pRam, (PPGMPAGE)pPage, ppb, pfTlb);
5300
5301 default: AssertFailed(); RT_FALL_THROUGH();
5302 case PGM_PAGE_STATE_BALLOONED:
5303 return pgmPhyIemGCphys2PtrNoLockReturnNoNothing(uTlbPhysRev, ppb, pfTlb, GCPhys, &PageCopy);
5304 }
5305 break;
5306
5307 case PGM_PAGE_HNDL_PHYS_STATE_ALL:
5308 Assert(!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(&PageCopy));
5309 return pgmPhyIemGCphys2PtrNoLockReturnNoNothing(uTlbPhysRev, ppb, pfTlb, GCPhys, &PageCopy);
5310 }
5311 }
5312 else
5313 {
5314 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ
5315 | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 | PGMIEMGCPHYS2PTR_F_UNASSIGNED;
5316 *ppb = NULL;
5317 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
5318 }
5319
5320 return VINF_SUCCESS;
5321}
5322
5323
5324/**
5325 * Converts a GC physical address to a HC ring-3 pointer, with some
5326 * additional checks.
5327 *
5328 * @returns VBox status code (no informational statuses).
5329 * @retval VINF_SUCCESS on success.
5330 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
5331 * access handler of some kind.
5332 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
5333 * accesses or is odd in any way.
5334 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
5335 *
5336 * @param pVM The cross context VM structure.
5337 * @param pVCpu The cross context virtual CPU structure of the
5338 * calling EMT.
5339 * @param GCPhys The GC physical address to convert. This API mask
5340 * the A20 line when necessary.
5341 * @param fWritable Whether write access is required.
5342 * @param fByPassHandlers Whether to bypass access handlers.
5343 * @param ppv Where to store the pointer corresponding to GCPhys
5344 * on success.
5345 * @param pLock
5346 *
5347 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
5348 * @thread EMT(pVCpu).
5349 */
5350VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
5351 void **ppv, PPGMPAGEMAPLOCK pLock)
5352{
5353 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
5354
5355 PGM_LOCK_VOID(pVM);
5356
5357 PPGMRAMRANGE pRam;
5358 PPGMPAGE pPage;
5359 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
5360 if (RT_SUCCESS(rc))
5361 {
5362 if (PGM_PAGE_IS_BALLOONED(pPage))
5363 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
5364 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
5365 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
5366 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
5367 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
5368 rc = VINF_SUCCESS;
5369 else
5370 {
5371 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
5372 {
5373 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
5374 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
5375 }
5376 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
5377 {
5378 Assert(!fByPassHandlers);
5379 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
5380 }
5381 }
5382 if (RT_SUCCESS(rc))
5383 {
5384 int rc2;
5385
5386 /* Make sure what we return is writable. */
5387 if (fWritable)
5388 switch (PGM_PAGE_GET_STATE(pPage))
5389 {
5390 case PGM_PAGE_STATE_ALLOCATED:
5391 break;
5392 case PGM_PAGE_STATE_BALLOONED:
5393 AssertFailed();
5394 break;
5395 case PGM_PAGE_STATE_ZERO:
5396 case PGM_PAGE_STATE_SHARED:
5397 case PGM_PAGE_STATE_WRITE_MONITORED:
5398 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
5399 AssertLogRelRCReturn(rc2, rc2);
5400 break;
5401 }
5402
5403 /* Get a ring-3 mapping of the address. */
5404 PPGMPAGEMAPTLBE pTlbe;
5405 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
5406 AssertLogRelRCReturn(rc2, rc2);
5407
5408 /* Lock it and calculate the address. */
5409 if (fWritable)
5410 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
5411 else
5412 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
5413 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
5414
5415 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
5416 }
5417 else
5418 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
5419
5420 /* else: handler catching all access, no pointer returned. */
5421 }
5422 else
5423 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
5424
5425 PGM_UNLOCK(pVM);
5426 return rc;
5427}
5428
5429
5430/**
5431 * Checks if the give GCPhys page requires special handling for the given access
5432 * because it's MMIO or otherwise monitored.
5433 *
5434 * @returns VBox status code (no informational statuses).
5435 * @retval VINF_SUCCESS on success.
5436 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
5437 * access handler of some kind.
5438 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
5439 * accesses or is odd in any way.
5440 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
5441 *
5442 * @param pVM The cross context VM structure.
5443 * @param GCPhys The GC physical address to convert. Since this is
5444 * only used for filling the REM TLB, the A20 mask must
5445 * be applied before calling this API.
5446 * @param fWritable Whether write access is required.
5447 * @param fByPassHandlers Whether to bypass access handlers.
5448 *
5449 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
5450 * a stop gap thing that should be removed once there is a better TLB
5451 * for virtual address accesses.
5452 */
5453VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
5454{
5455 PGM_LOCK_VOID(pVM);
5456 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
5457
5458 PPGMRAMRANGE pRam;
5459 PPGMPAGE pPage;
5460 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
5461 if (RT_SUCCESS(rc))
5462 {
5463 if (PGM_PAGE_IS_BALLOONED(pPage))
5464 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
5465 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
5466 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
5467 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
5468 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
5469 rc = VINF_SUCCESS;
5470 else
5471 {
5472 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
5473 {
5474 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
5475 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
5476 }
5477 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
5478 {
5479 Assert(!fByPassHandlers);
5480 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
5481 }
5482 }
5483 }
5484
5485 PGM_UNLOCK(pVM);
5486 return rc;
5487}
5488
5489#ifdef VBOX_WITH_NATIVE_NEM
5490
5491/**
5492 * Interface used by NEM to check what to do on a memory access exit.
5493 *
5494 * @returns VBox status code.
5495 * @param pVM The cross context VM structure.
5496 * @param pVCpu The cross context per virtual CPU structure.
5497 * Optional.
5498 * @param GCPhys The guest physical address.
5499 * @param fMakeWritable Whether to try make the page writable or not. If it
5500 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
5501 * be returned and the return code will be unaffected
5502 * @param pInfo Where to return the page information. This is
5503 * initialized even on failure.
5504 * @param pfnChecker Page in-sync checker callback. Optional.
5505 * @param pvUser User argument to pass to pfnChecker.
5506 */
5507VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable,
5508 PPGMPHYSNEMPAGEINFO pInfo, PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
5509{
5510 PGM_LOCK_VOID(pVM);
5511
5512 PPGMPAGE pPage;
5513 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
5514 if (RT_SUCCESS(rc))
5515 {
5516 /* Try make it writable if requested. */
5517 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
5518 if (fMakeWritable)
5519 switch (PGM_PAGE_GET_STATE(pPage))
5520 {
5521 case PGM_PAGE_STATE_SHARED:
5522 case PGM_PAGE_STATE_WRITE_MONITORED:
5523 case PGM_PAGE_STATE_ZERO:
5524 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
5525 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
5526 rc = VINF_SUCCESS;
5527 break;
5528 }
5529
5530 /* Fill in the info. */
5531 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
5532 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
5533 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
5534 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
5535 pInfo->enmType = enmType;
5536 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
5537 switch (PGM_PAGE_GET_STATE(pPage))
5538 {
5539 case PGM_PAGE_STATE_ALLOCATED:
5540 pInfo->fZeroPage = 0;
5541 break;
5542
5543 case PGM_PAGE_STATE_ZERO:
5544 pInfo->fZeroPage = 1;
5545 break;
5546
5547 case PGM_PAGE_STATE_WRITE_MONITORED:
5548 pInfo->fZeroPage = 0;
5549 break;
5550
5551 case PGM_PAGE_STATE_SHARED:
5552 pInfo->fZeroPage = 0;
5553 break;
5554
5555 case PGM_PAGE_STATE_BALLOONED:
5556 pInfo->fZeroPage = 1;
5557 break;
5558
5559 default:
5560 pInfo->fZeroPage = 1;
5561 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
5562 }
5563
5564 /* Call the checker and update NEM state. */
5565 if (pfnChecker)
5566 {
5567 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
5568 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
5569 }
5570
5571 /* Done. */
5572 PGM_UNLOCK(pVM);
5573 }
5574 else
5575 {
5576 PGM_UNLOCK(pVM);
5577
5578 pInfo->HCPhys = NIL_RTHCPHYS;
5579 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
5580 pInfo->u2NemState = 0;
5581 pInfo->fHasHandlers = 0;
5582 pInfo->fZeroPage = 0;
5583 pInfo->enmType = PGMPAGETYPE_INVALID;
5584 }
5585
5586 return rc;
5587}
5588
5589
5590/**
5591 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
5592 * or higher.
5593 *
5594 * @returns VBox status code from callback.
5595 * @param pVM The cross context VM structure.
5596 * @param pVCpu The cross context per CPU structure. This is
5597 * optional as its only for passing to callback.
5598 * @param uMinState The minimum NEM state value to call on.
5599 * @param pfnCallback The callback function.
5600 * @param pvUser User argument for the callback.
5601 */
5602VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
5603 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
5604{
5605 /*
5606 * Just brute force this problem.
5607 */
5608 PGM_LOCK_VOID(pVM);
5609 int rc = VINF_SUCCESS;
5610 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
5611 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries && RT_SUCCESS(rc); idxLookup++)
5612 {
5613 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
5614 AssertContinue(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges));
5615 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
5616 AssertContinue(pRam);
5617 Assert(pRam->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]));
5618
5619#ifdef IN_RING0
5620 uint32_t const cPages = RT_MIN(pRam->cb >> X86_PAGE_SHIFT, pVM->pgmr0.s.acRamRangePages[idRamRange]);
5621#else
5622 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
5623#endif
5624 for (uint32_t iPage = 0; iPage < cPages; iPage++)
5625 {
5626 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
5627 if (u2State < uMinState)
5628 { /* likely */ }
5629 else
5630 {
5631 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
5632 if (RT_SUCCESS(rc))
5633 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
5634 else
5635 break;
5636 }
5637 }
5638 }
5639 PGM_UNLOCK(pVM);
5640
5641 return rc;
5642}
5643
5644
5645/**
5646 * Helper for setting the NEM state for a range of pages.
5647 *
5648 * @param paPages Array of pages to modify.
5649 * @param cPages How many pages to modify.
5650 * @param u2State The new state value.
5651 */
5652DECLHIDDEN(void) pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State)
5653{
5654 PPGMPAGE pPage = paPages;
5655 while (cPages-- > 0)
5656 {
5657 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
5658 pPage++;
5659 }
5660}
5661
5662#endif /* VBOX_WITH_NATIVE_NEM */
5663
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette