VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 85704

Last change on this file since 85704 was 82968, checked in by vboxsync, 5 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 167.1 KB
Line 
1/* $Id: PGMAllPhys.cpp 82968 2020-02-04 10:35:17Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_PHYS
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/trpm.h>
25#include <VBox/vmm/vmm.h>
26#include <VBox/vmm/iom.h>
27#include <VBox/vmm/em.h>
28#include <VBox/vmm/nem.h>
29#include "PGMInternal.h"
30#include <VBox/vmm/vmcc.h>
31#include "PGMInline.h"
32#include <VBox/param.h>
33#include <VBox/err.h>
34#include <iprt/assert.h>
35#include <iprt/string.h>
36#include <iprt/asm-amd64-x86.h>
37#include <VBox/log.h>
38#ifdef IN_RING3
39# include <iprt/thread.h>
40#endif
41
42
43/*********************************************************************************************************************************
44* Defined Constants And Macros *
45*********************************************************************************************************************************/
46/** Enable the physical TLB. */
47#define PGM_WITH_PHYS_TLB
48
49/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
50 * Checks if valid physical access handler return code (normal handler, not PF).
51 *
52 * Checks if the given strict status code is one of the expected ones for a
53 * physical access handler in the current context.
54 *
55 * @returns true or false.
56 * @param a_rcStrict The status code.
57 * @param a_fWrite Whether it is a write or read being serviced.
58 *
59 * @remarks We wish to keep the list of statuses here as short as possible.
60 * When changing, please make sure to update the PGMPhysRead,
61 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
62 */
63#ifdef IN_RING3
64# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
65 ( (a_rcStrict) == VINF_SUCCESS \
66 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
67#elif defined(IN_RING0)
68#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
69 ( (a_rcStrict) == VINF_SUCCESS \
70 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
71 \
72 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
73 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
74 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
75 \
76 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
77 || (a_rcStrict) == VINF_EM_DBG_STOP \
78 || (a_rcStrict) == VINF_EM_DBG_EVENT \
79 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
80 || (a_rcStrict) == VINF_EM_OFF \
81 || (a_rcStrict) == VINF_EM_SUSPEND \
82 || (a_rcStrict) == VINF_EM_RESET \
83 )
84#else
85# error "Context?"
86#endif
87
88/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
89 * Checks if valid virtual access handler return code (normal handler, not PF).
90 *
91 * Checks if the given strict status code is one of the expected ones for a
92 * virtual access handler in the current context.
93 *
94 * @returns true or false.
95 * @param a_rcStrict The status code.
96 * @param a_fWrite Whether it is a write or read being serviced.
97 *
98 * @remarks We wish to keep the list of statuses here as short as possible.
99 * When changing, please make sure to update the PGMPhysRead,
100 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
101 */
102#ifdef IN_RING3
103# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
104 ( (a_rcStrict) == VINF_SUCCESS \
105 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
106#elif defined(IN_RING0)
107# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
108 (false /* no virtual handlers in ring-0! */ )
109#else
110# error "Context?"
111#endif
112
113
114
115#ifndef IN_RING3
116
117/**
118 * @callback_method_impl{FNPGMPHYSHANDLER,
119 * Dummy for forcing ring-3 handling of the access.}
120 */
121DECLEXPORT(VBOXSTRICTRC)
122pgmPhysHandlerRedirectToHC(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
123 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
124{
125 NOREF(pVM); NOREF(pVCpu); NOREF(GCPhys); NOREF(pvPhys); NOREF(pvBuf); NOREF(cbBuf);
126 NOREF(enmAccessType); NOREF(enmOrigin); NOREF(pvUser);
127 return VINF_EM_RAW_EMULATE_INSTR;
128}
129
130
131/**
132 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
133 * Dummy for forcing ring-3 handling of the access.}
134 */
135VMMDECL(VBOXSTRICTRC) pgmPhysPfHandlerRedirectToHC(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
136 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
137{
138 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
139 return VINF_EM_RAW_EMULATE_INSTR;
140}
141
142
143/**
144 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
145 * \#PF access handler callback for guest ROM range write access.}
146 *
147 * @remarks The @a pvUser argument points to the PGMROMRANGE.
148 */
149DECLEXPORT(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
150 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
151{
152 int rc;
153 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
154 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
155 NOREF(uErrorCode); NOREF(pvFault);
156
157 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
158
159 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
160 switch (pRom->aPages[iPage].enmProt)
161 {
162 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
163 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
164 {
165 /*
166 * If it's a simple instruction which doesn't change the cpu state
167 * we will simply skip it. Otherwise we'll have to defer it to REM.
168 */
169 uint32_t cbOp;
170 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
171 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
172 if ( RT_SUCCESS(rc)
173 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
174 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
175 {
176 switch (pDis->bOpCode)
177 {
178 /** @todo Find other instructions we can safely skip, possibly
179 * adding this kind of detection to DIS or EM. */
180 case OP_MOV:
181 pRegFrame->rip += cbOp;
182 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
183 return VINF_SUCCESS;
184 }
185 }
186 break;
187 }
188
189 case PGMROMPROT_READ_RAM_WRITE_RAM:
190 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
191 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
192 AssertRC(rc);
193 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
194
195 case PGMROMPROT_READ_ROM_WRITE_RAM:
196 /* Handle it in ring-3 because it's *way* easier there. */
197 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
198 break;
199
200 default:
201 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
202 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
203 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
204 }
205
206 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
207 return VINF_EM_RAW_EMULATE_INSTR;
208}
209
210#endif /* !IN_RING3 */
211
212
213/**
214 * @callback_method_impl{FNPGMPHYSHANDLER,
215 * Access handler callback for ROM write accesses.}
216 *
217 * @remarks The @a pvUser argument points to the PGMROMRANGE.
218 */
219PGM_ALL_CB2_DECL(VBOXSTRICTRC)
220pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
221 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
222{
223 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
224 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
225 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
226 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
227 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
228 NOREF(pVCpu); NOREF(pvPhys); NOREF(enmOrigin);
229
230 if (enmAccessType == PGMACCESSTYPE_READ)
231 {
232 switch (pRomPage->enmProt)
233 {
234 /*
235 * Take the default action.
236 */
237 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
238 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
239 case PGMROMPROT_READ_ROM_WRITE_RAM:
240 case PGMROMPROT_READ_RAM_WRITE_RAM:
241 return VINF_PGM_HANDLER_DO_DEFAULT;
242
243 default:
244 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
245 pRom->aPages[iPage].enmProt, iPage, GCPhys),
246 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
247 }
248 }
249 else
250 {
251 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
252 switch (pRomPage->enmProt)
253 {
254 /*
255 * Ignore writes.
256 */
257 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
258 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
259 return VINF_SUCCESS;
260
261 /*
262 * Write to the RAM page.
263 */
264 case PGMROMPROT_READ_ROM_WRITE_RAM:
265 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
266 {
267 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
268 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
269
270 /*
271 * Take the lock, do lazy allocation, map the page and copy the data.
272 *
273 * Note that we have to bypass the mapping TLB since it works on
274 * guest physical addresses and entering the shadow page would
275 * kind of screw things up...
276 */
277 int rc = pgmLock(pVM);
278 AssertRC(rc);
279
280 PPGMPAGE pShadowPage = &pRomPage->Shadow;
281 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
282 {
283 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
284 AssertLogRelReturn(pShadowPage, VERR_PGM_PHYS_PAGE_GET_IPE);
285 }
286
287 void *pvDstPage;
288 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
289 if (RT_SUCCESS(rc))
290 {
291 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
292 pRomPage->LiveSave.fWrittenTo = true;
293
294 AssertMsg( rc == VINF_SUCCESS
295 || ( rc == VINF_PGM_SYNC_CR3
296 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
297 , ("%Rrc\n", rc));
298 rc = VINF_SUCCESS;
299 }
300
301 pgmUnlock(pVM);
302 return rc;
303 }
304
305 default:
306 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
307 pRom->aPages[iPage].enmProt, iPage, GCPhys),
308 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
309 }
310 }
311}
312
313
314/**
315 * Invalidates the RAM range TLBs.
316 *
317 * @param pVM The cross context VM structure.
318 */
319void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
320{
321 pgmLock(pVM);
322 RT_ZERO(pVM->pgm.s.apRamRangesTlbR3);
323 RT_ZERO(pVM->pgm.s.apRamRangesTlbR0);
324 pgmUnlock(pVM);
325}
326
327
328/**
329 * Tests if a value of type RTGCPHYS is negative if the type had been signed
330 * instead of unsigned.
331 *
332 * @returns @c true if negative, @c false if positive or zero.
333 * @param a_GCPhys The value to test.
334 * @todo Move me to iprt/types.h.
335 */
336#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
337
338
339/**
340 * Slow worker for pgmPhysGetRange.
341 *
342 * @copydoc pgmPhysGetRange
343 */
344PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
345{
346 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
347
348 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
349 while (pRam)
350 {
351 RTGCPHYS off = GCPhys - pRam->GCPhys;
352 if (off < pRam->cb)
353 {
354 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
355 return pRam;
356 }
357 if (RTGCPHYS_IS_NEGATIVE(off))
358 pRam = pRam->CTX_SUFF(pLeft);
359 else
360 pRam = pRam->CTX_SUFF(pRight);
361 }
362 return NULL;
363}
364
365
366/**
367 * Slow worker for pgmPhysGetRangeAtOrAbove.
368 *
369 * @copydoc pgmPhysGetRangeAtOrAbove
370 */
371PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
372{
373 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
374
375 PPGMRAMRANGE pLastLeft = NULL;
376 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
377 while (pRam)
378 {
379 RTGCPHYS off = GCPhys - pRam->GCPhys;
380 if (off < pRam->cb)
381 {
382 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
383 return pRam;
384 }
385 if (RTGCPHYS_IS_NEGATIVE(off))
386 {
387 pLastLeft = pRam;
388 pRam = pRam->CTX_SUFF(pLeft);
389 }
390 else
391 pRam = pRam->CTX_SUFF(pRight);
392 }
393 return pLastLeft;
394}
395
396
397/**
398 * Slow worker for pgmPhysGetPage.
399 *
400 * @copydoc pgmPhysGetPage
401 */
402PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
403{
404 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
405
406 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
407 while (pRam)
408 {
409 RTGCPHYS off = GCPhys - pRam->GCPhys;
410 if (off < pRam->cb)
411 {
412 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
413 return &pRam->aPages[off >> PAGE_SHIFT];
414 }
415
416 if (RTGCPHYS_IS_NEGATIVE(off))
417 pRam = pRam->CTX_SUFF(pLeft);
418 else
419 pRam = pRam->CTX_SUFF(pRight);
420 }
421 return NULL;
422}
423
424
425/**
426 * Slow worker for pgmPhysGetPageEx.
427 *
428 * @copydoc pgmPhysGetPageEx
429 */
430int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
431{
432 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
433
434 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
435 while (pRam)
436 {
437 RTGCPHYS off = GCPhys - pRam->GCPhys;
438 if (off < pRam->cb)
439 {
440 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
441 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
442 return VINF_SUCCESS;
443 }
444
445 if (RTGCPHYS_IS_NEGATIVE(off))
446 pRam = pRam->CTX_SUFF(pLeft);
447 else
448 pRam = pRam->CTX_SUFF(pRight);
449 }
450
451 *ppPage = NULL;
452 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
453}
454
455
456/**
457 * Slow worker for pgmPhysGetPageAndRangeEx.
458 *
459 * @copydoc pgmPhysGetPageAndRangeEx
460 */
461int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
462{
463 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
464
465 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
466 while (pRam)
467 {
468 RTGCPHYS off = GCPhys - pRam->GCPhys;
469 if (off < pRam->cb)
470 {
471 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
472 *ppRam = pRam;
473 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
474 return VINF_SUCCESS;
475 }
476
477 if (RTGCPHYS_IS_NEGATIVE(off))
478 pRam = pRam->CTX_SUFF(pLeft);
479 else
480 pRam = pRam->CTX_SUFF(pRight);
481 }
482
483 *ppRam = NULL;
484 *ppPage = NULL;
485 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
486}
487
488
489/**
490 * Checks if Address Gate 20 is enabled or not.
491 *
492 * @returns true if enabled.
493 * @returns false if disabled.
494 * @param pVCpu The cross context virtual CPU structure.
495 */
496VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
497{
498 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
499 return pVCpu->pgm.s.fA20Enabled;
500}
501
502
503/**
504 * Validates a GC physical address.
505 *
506 * @returns true if valid.
507 * @returns false if invalid.
508 * @param pVM The cross context VM structure.
509 * @param GCPhys The physical address to validate.
510 */
511VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
512{
513 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
514 return pPage != NULL;
515}
516
517
518/**
519 * Checks if a GC physical address is a normal page,
520 * i.e. not ROM, MMIO or reserved.
521 *
522 * @returns true if normal.
523 * @returns false if invalid, ROM, MMIO or reserved page.
524 * @param pVM The cross context VM structure.
525 * @param GCPhys The physical address to check.
526 */
527VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
528{
529 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
530 return pPage
531 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
532}
533
534
535/**
536 * Converts a GC physical address to a HC physical address.
537 *
538 * @returns VINF_SUCCESS on success.
539 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
540 * page but has no physical backing.
541 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
542 * GC physical address.
543 *
544 * @param pVM The cross context VM structure.
545 * @param GCPhys The GC physical address to convert.
546 * @param pHCPhys Where to store the HC physical address on success.
547 */
548VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
549{
550 pgmLock(pVM);
551 PPGMPAGE pPage;
552 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
553 if (RT_SUCCESS(rc))
554 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
555 pgmUnlock(pVM);
556 return rc;
557}
558
559
560/**
561 * Invalidates all page mapping TLBs.
562 *
563 * @param pVM The cross context VM structure.
564 */
565void pgmPhysInvalidatePageMapTLB(PVMCC pVM)
566{
567 pgmLock(pVM);
568 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
569
570 /* Clear the R3 & R0 TLBs completely. */
571 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
572 {
573 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
574 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
575#ifndef VBOX_WITH_RAM_IN_KERNEL
576 pVM->pgm.s.PhysTlbR0.aEntries[i].pMap = 0;
577#endif
578 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
579 }
580
581 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
582 {
583 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
584 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
585 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
586 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
587 }
588
589 pgmUnlock(pVM);
590}
591
592
593/**
594 * Invalidates a page mapping TLB entry
595 *
596 * @param pVM The cross context VM structure.
597 * @param GCPhys GCPhys entry to flush
598 */
599void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
600{
601 PGM_LOCK_ASSERT_OWNER(pVM);
602
603 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
604
605 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
606
607 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
608 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
609#ifndef VBOX_WITH_RAM_IN_KERNEL
610 pVM->pgm.s.PhysTlbR0.aEntries[idx].pMap = 0;
611#endif
612 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
613
614 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
615 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
616 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
617 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
618}
619
620
621/**
622 * Makes sure that there is at least one handy page ready for use.
623 *
624 * This will also take the appropriate actions when reaching water-marks.
625 *
626 * @returns VBox status code.
627 * @retval VINF_SUCCESS on success.
628 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
629 *
630 * @param pVM The cross context VM structure.
631 *
632 * @remarks Must be called from within the PGM critical section. It may
633 * nip back to ring-3/0 in some cases.
634 */
635static int pgmPhysEnsureHandyPage(PVMCC pVM)
636{
637 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
638
639 /*
640 * Do we need to do anything special?
641 */
642#ifdef IN_RING3
643 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
644#else
645 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
646#endif
647 {
648 /*
649 * Allocate pages only if we're out of them, or in ring-3, almost out.
650 */
651#ifdef IN_RING3
652 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
653#else
654 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
655#endif
656 {
657 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
658 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
659#ifdef IN_RING3
660 int rc = PGMR3PhysAllocateHandyPages(pVM);
661#else
662 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
663#endif
664 if (RT_UNLIKELY(rc != VINF_SUCCESS))
665 {
666 if (RT_FAILURE(rc))
667 return rc;
668 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
669 if (!pVM->pgm.s.cHandyPages)
670 {
671 LogRel(("PGM: no more handy pages!\n"));
672 return VERR_EM_NO_MEMORY;
673 }
674 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
675 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
676#ifndef IN_RING3
677 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
678#endif
679 }
680 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
681 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
682 ("%u\n", pVM->pgm.s.cHandyPages),
683 VERR_PGM_HANDY_PAGE_IPE);
684 }
685 else
686 {
687 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
688 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
689#ifndef IN_RING3
690 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
691 {
692 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
693 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
694 }
695#endif
696 }
697 }
698
699 return VINF_SUCCESS;
700}
701
702
703
704/**
705 * Replace a zero or shared page with new page that we can write to.
706 *
707 * @returns The following VBox status codes.
708 * @retval VINF_SUCCESS on success, pPage is modified.
709 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
710 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
711 *
712 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
713 *
714 * @param pVM The cross context VM structure.
715 * @param pPage The physical page tracking structure. This will
716 * be modified on success.
717 * @param GCPhys The address of the page.
718 *
719 * @remarks Must be called from within the PGM critical section. It may
720 * nip back to ring-3/0 in some cases.
721 *
722 * @remarks This function shouldn't really fail, however if it does
723 * it probably means we've screwed up the size of handy pages and/or
724 * the low-water mark. Or, that some device I/O is causing a lot of
725 * pages to be allocated while while the host is in a low-memory
726 * condition. This latter should be handled elsewhere and in a more
727 * controlled manner, it's on the @bugref{3170} todo list...
728 */
729int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
730{
731 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
732
733 /*
734 * Prereqs.
735 */
736 PGM_LOCK_ASSERT_OWNER(pVM);
737 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
738 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
739
740# ifdef PGM_WITH_LARGE_PAGES
741 /*
742 * Try allocate a large page if applicable.
743 */
744 if ( PGMIsUsingLargePages(pVM)
745 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
746 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
747 {
748 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
749 PPGMPAGE pBasePage;
750
751 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
752 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
753 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
754 {
755 rc = pgmPhysAllocLargePage(pVM, GCPhys);
756 if (rc == VINF_SUCCESS)
757 return rc;
758 }
759 /* Mark the base as type page table, so we don't check over and over again. */
760 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
761
762 /* fall back to 4KB pages. */
763 }
764# endif
765
766 /*
767 * Flush any shadow page table mappings of the page.
768 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
769 */
770 bool fFlushTLBs = false;
771 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
772 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
773
774 /*
775 * Ensure that we've got a page handy, take it and use it.
776 */
777 int rc2 = pgmPhysEnsureHandyPage(pVM);
778 if (RT_FAILURE(rc2))
779 {
780 if (fFlushTLBs)
781 PGM_INVL_ALL_VCPU_TLBS(pVM);
782 Assert(rc2 == VERR_EM_NO_MEMORY);
783 return rc2;
784 }
785 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
786 PGM_LOCK_ASSERT_OWNER(pVM);
787 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
788 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
789
790 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
791 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
792 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
793 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
794 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
795 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
796
797 /*
798 * There are one or two action to be taken the next time we allocate handy pages:
799 * - Tell the GMM (global memory manager) what the page is being used for.
800 * (Speeds up replacement operations - sharing and defragmenting.)
801 * - If the current backing is shared, it must be freed.
802 */
803 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
804 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
805
806 void const *pvSharedPage = NULL;
807 if (PGM_PAGE_IS_SHARED(pPage))
808 {
809 /* Mark this shared page for freeing/dereferencing. */
810 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
811 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
812
813 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
814 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
815 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
816 pVM->pgm.s.cSharedPages--;
817
818 /* Grab the address of the page so we can make a copy later on. (safe) */
819 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
820 AssertRC(rc);
821 }
822 else
823 {
824 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
825 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
826 pVM->pgm.s.cZeroPages--;
827 }
828
829 /*
830 * Do the PGMPAGE modifications.
831 */
832 pVM->pgm.s.cPrivatePages++;
833 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
834 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
835 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
836 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
837 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
838
839 /* Copy the shared page contents to the replacement page. */
840 if (pvSharedPage)
841 {
842 /* Get the virtual address of the new page. */
843 PGMPAGEMAPLOCK PgMpLck;
844 void *pvNewPage;
845 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
846 if (RT_SUCCESS(rc))
847 {
848 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
849 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
850 }
851 }
852
853 if ( fFlushTLBs
854 && rc != VINF_PGM_GCPHYS_ALIASED)
855 PGM_INVL_ALL_VCPU_TLBS(pVM);
856
857 /*
858 * Notify NEM about the mapping change for this page.
859 *
860 * Note! Shadow ROM pages are complicated as they can definitely be
861 * allocated while not visible, so play safe.
862 */
863 if (VM_IS_NEM_ENABLED(pVM))
864 {
865 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
866 if ( enmType != PGMPAGETYPE_ROM_SHADOW
867 || pgmPhysGetPage(pVM, GCPhys) == pPage)
868 {
869 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
870 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
871 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
872 if (RT_SUCCESS(rc))
873 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
874 else
875 rc = rc2;
876 }
877 }
878
879 return rc;
880}
881
882#ifdef PGM_WITH_LARGE_PAGES
883
884/**
885 * Replace a 2 MB range of zero pages with new pages that we can write to.
886 *
887 * @returns The following VBox status codes.
888 * @retval VINF_SUCCESS on success, pPage is modified.
889 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
890 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
891 *
892 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
893 *
894 * @param pVM The cross context VM structure.
895 * @param GCPhys The address of the page.
896 *
897 * @remarks Must be called from within the PGM critical section. It may
898 * nip back to ring-3/0 in some cases.
899 */
900int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
901{
902 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
903 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
904 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
905
906 /*
907 * Prereqs.
908 */
909 PGM_LOCK_ASSERT_OWNER(pVM);
910 Assert(PGMIsUsingLargePages(pVM));
911
912 PPGMPAGE pFirstPage;
913 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
914 if ( RT_SUCCESS(rc)
915 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
916 {
917 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
918
919 /* Don't call this function for already allocated pages. */
920 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
921
922 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
923 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
924 {
925 /* Lazy approach: check all pages in the 2 MB range.
926 * The whole range must be ram and unallocated. */
927 GCPhys = GCPhysBase;
928 unsigned iPage;
929 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
930 {
931 PPGMPAGE pSubPage;
932 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
933 if ( RT_FAILURE(rc)
934 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
935 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
936 {
937 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
938 break;
939 }
940 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
941 GCPhys += PAGE_SIZE;
942 }
943 if (iPage != _2M/PAGE_SIZE)
944 {
945 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
946 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
947 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
948 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
949 }
950
951 /*
952 * Do the allocation.
953 */
954# ifdef IN_RING3
955 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
956# else
957 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
958# endif
959 if (RT_SUCCESS(rc))
960 {
961 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
962 pVM->pgm.s.cLargePages++;
963 return VINF_SUCCESS;
964 }
965
966 /* If we fail once, it most likely means the host's memory is too
967 fragmented; don't bother trying again. */
968 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
969 PGMSetLargePageUsage(pVM, false);
970 return rc;
971 }
972 }
973 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
974}
975
976
977/**
978 * Recheck the entire 2 MB range to see if we can use it again as a large page.
979 *
980 * @returns The following VBox status codes.
981 * @retval VINF_SUCCESS on success, the large page can be used again
982 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
983 *
984 * @param pVM The cross context VM structure.
985 * @param GCPhys The address of the page.
986 * @param pLargePage Page structure of the base page
987 */
988int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
989{
990 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
991
992 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
993
994 GCPhys &= X86_PDE2M_PAE_PG_MASK;
995
996 /* Check the base page. */
997 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
998 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
999 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
1000 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1001 {
1002 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
1003 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1004 }
1005
1006 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
1007 /* Check all remaining pages in the 2 MB range. */
1008 unsigned i;
1009 GCPhys += PAGE_SIZE;
1010 for (i = 1; i < _2M/PAGE_SIZE; i++)
1011 {
1012 PPGMPAGE pPage;
1013 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1014 AssertRCBreak(rc);
1015
1016 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
1017 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
1018 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
1019 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
1020 {
1021 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
1022 break;
1023 }
1024
1025 GCPhys += PAGE_SIZE;
1026 }
1027 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
1028
1029 if (i == _2M/PAGE_SIZE)
1030 {
1031 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
1032 pVM->pgm.s.cLargePagesDisabled--;
1033 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
1034 return VINF_SUCCESS;
1035 }
1036
1037 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
1038}
1039
1040#endif /* PGM_WITH_LARGE_PAGES */
1041
1042
1043/**
1044 * Deal with a write monitored page.
1045 *
1046 * @returns VBox strict status code.
1047 *
1048 * @param pVM The cross context VM structure.
1049 * @param pPage The physical page tracking structure.
1050 * @param GCPhys The guest physical address of the page.
1051 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
1052 * very unlikely situation where it is okay that we let NEM
1053 * fix the page access in a lazy fasion.
1054 *
1055 * @remarks Called from within the PGM critical section.
1056 */
1057void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1058{
1059 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
1060 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1061 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1062 Assert(pVM->pgm.s.cMonitoredPages > 0);
1063 pVM->pgm.s.cMonitoredPages--;
1064 pVM->pgm.s.cWrittenToPages++;
1065
1066 /*
1067 * Notify NEM about the protection change so we won't spin forever.
1068 *
1069 * Note! NEM need to be handle to lazily correct page protection as we cannot
1070 * really get it 100% right here it seems. The page pool does this too.
1071 */
1072 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
1073 {
1074 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1075 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1076 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1077 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1078 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1079 }
1080}
1081
1082
1083/**
1084 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
1085 *
1086 * @returns VBox strict status code.
1087 * @retval VINF_SUCCESS on success.
1088 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1089 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1090 *
1091 * @param pVM The cross context VM structure.
1092 * @param pPage The physical page tracking structure.
1093 * @param GCPhys The address of the page.
1094 *
1095 * @remarks Called from within the PGM critical section.
1096 */
1097int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1098{
1099 PGM_LOCK_ASSERT_OWNER(pVM);
1100 switch (PGM_PAGE_GET_STATE(pPage))
1101 {
1102 case PGM_PAGE_STATE_WRITE_MONITORED:
1103 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
1104 RT_FALL_THRU();
1105 default: /* to shut up GCC */
1106 case PGM_PAGE_STATE_ALLOCATED:
1107 return VINF_SUCCESS;
1108
1109 /*
1110 * Zero pages can be dummy pages for MMIO or reserved memory,
1111 * so we need to check the flags before joining cause with
1112 * shared page replacement.
1113 */
1114 case PGM_PAGE_STATE_ZERO:
1115 if (PGM_PAGE_IS_MMIO(pPage))
1116 return VERR_PGM_PHYS_PAGE_RESERVED;
1117 RT_FALL_THRU();
1118 case PGM_PAGE_STATE_SHARED:
1119 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1120
1121 /* Not allowed to write to ballooned pages. */
1122 case PGM_PAGE_STATE_BALLOONED:
1123 return VERR_PGM_PHYS_PAGE_BALLOONED;
1124 }
1125}
1126
1127
1128/**
1129 * Internal usage: Map the page specified by its GMM ID.
1130 *
1131 * This is similar to pgmPhysPageMap
1132 *
1133 * @returns VBox status code.
1134 *
1135 * @param pVM The cross context VM structure.
1136 * @param idPage The Page ID.
1137 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
1138 * @param ppv Where to store the mapping address.
1139 *
1140 * @remarks Called from within the PGM critical section. The mapping is only
1141 * valid while you are inside this section.
1142 */
1143int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1144{
1145 /*
1146 * Validation.
1147 */
1148 PGM_LOCK_ASSERT_OWNER(pVM);
1149 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1150 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1151 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1152
1153#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1154 /*
1155 * Map it by HCPhys.
1156 */
1157 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1158
1159#elif defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
1160# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1161 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, ppv);
1162# else
1163 return GMMR0PageIdToVirt(pVM, idPage, ppv);
1164# endif
1165
1166#else
1167 /*
1168 * Find/make Chunk TLB entry for the mapping chunk.
1169 */
1170 PPGMCHUNKR3MAP pMap;
1171 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1172 if (pTlbe->idChunk == idChunk)
1173 {
1174 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1175 pMap = pTlbe->pChunk;
1176 }
1177 else
1178 {
1179 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1180
1181 /*
1182 * Find the chunk, map it if necessary.
1183 */
1184 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1185 if (pMap)
1186 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1187 else
1188 {
1189# ifdef IN_RING0
1190 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1191 AssertRCReturn(rc, rc);
1192 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1193 Assert(pMap);
1194# else
1195 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1196 if (RT_FAILURE(rc))
1197 return rc;
1198# endif
1199 }
1200
1201 /*
1202 * Enter it into the Chunk TLB.
1203 */
1204 pTlbe->idChunk = idChunk;
1205 pTlbe->pChunk = pMap;
1206 }
1207
1208 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
1209 return VINF_SUCCESS;
1210#endif
1211}
1212
1213
1214/**
1215 * Maps a page into the current virtual address space so it can be accessed.
1216 *
1217 * @returns VBox status code.
1218 * @retval VINF_SUCCESS on success.
1219 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1220 *
1221 * @param pVM The cross context VM structure.
1222 * @param pPage The physical page tracking structure.
1223 * @param GCPhys The address of the page.
1224 * @param ppMap Where to store the address of the mapping tracking structure.
1225 * @param ppv Where to store the mapping address of the page. The page
1226 * offset is masked off!
1227 *
1228 * @remarks Called from within the PGM critical section.
1229 */
1230static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1231{
1232 PGM_LOCK_ASSERT_OWNER(pVM);
1233 NOREF(GCPhys);
1234
1235#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1236 /*
1237 * Just some sketchy GC/R0-darwin code.
1238 */
1239 *ppMap = NULL;
1240 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1241 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
1242 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1243 return VINF_SUCCESS;
1244
1245#else /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1246
1247
1248 /*
1249 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1250 */
1251 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1252 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1253 {
1254 /* Decode the page id to a page in a MMIO2 ram range. */
1255 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1256 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1257 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1258 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1259 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1260 pPage->s.idPage, pPage->s.uStateY),
1261 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1262 PPGMREGMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1263 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1264 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1265 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1266 *ppMap = NULL;
1267# if defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1268 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1269# elif defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
1270 *ppv = (uint8_t *)pMmio2Range->pvR0 + ((uintptr_t)iPage << PAGE_SHIFT);
1271 return VINF_SUCCESS;
1272# else
1273 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << PAGE_SHIFT);
1274 return VINF_SUCCESS;
1275# endif
1276 }
1277
1278 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1279 if (idChunk == NIL_GMM_CHUNKID)
1280 {
1281 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1282 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1283 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1284 {
1285 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1286 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1287 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1288 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1289 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1290 }
1291 else
1292 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1293 *ppMap = NULL;
1294 return VINF_SUCCESS;
1295 }
1296
1297# if defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1298 /*
1299 * Just use the physical address.
1300 */
1301 *ppMap = NULL;
1302 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
1303
1304# elif defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL)
1305 /*
1306 * Go by page ID thru GMMR0.
1307 */
1308 *ppMap = NULL;
1309 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
1310
1311# else
1312 /*
1313 * Find/make Chunk TLB entry for the mapping chunk.
1314 */
1315 PPGMCHUNKR3MAP pMap;
1316 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1317 if (pTlbe->idChunk == idChunk)
1318 {
1319 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1320 pMap = pTlbe->pChunk;
1321 AssertPtr(pMap->pv);
1322 }
1323 else
1324 {
1325 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1326
1327 /*
1328 * Find the chunk, map it if necessary.
1329 */
1330 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1331 if (pMap)
1332 {
1333 AssertPtr(pMap->pv);
1334 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1335 }
1336 else
1337 {
1338# ifdef IN_RING0
1339 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1340 AssertRCReturn(rc, rc);
1341 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1342 Assert(pMap);
1343# else
1344 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1345 if (RT_FAILURE(rc))
1346 return rc;
1347# endif
1348 AssertPtr(pMap->pv);
1349 }
1350
1351 /*
1352 * Enter it into the Chunk TLB.
1353 */
1354 pTlbe->idChunk = idChunk;
1355 pTlbe->pChunk = pMap;
1356 }
1357
1358 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1359 *ppMap = pMap;
1360 return VINF_SUCCESS;
1361# endif /* !IN_RING0 || !VBOX_WITH_RAM_IN_KERNEL */
1362#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1363}
1364
1365
1366/**
1367 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1368 *
1369 * This is typically used is paths where we cannot use the TLB methods (like ROM
1370 * pages) or where there is no point in using them since we won't get many hits.
1371 *
1372 * @returns VBox strict status code.
1373 * @retval VINF_SUCCESS on success.
1374 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1375 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1376 *
1377 * @param pVM The cross context VM structure.
1378 * @param pPage The physical page tracking structure.
1379 * @param GCPhys The address of the page.
1380 * @param ppv Where to store the mapping address of the page. The page
1381 * offset is masked off!
1382 *
1383 * @remarks Called from within the PGM critical section. The mapping is only
1384 * valid while you are inside section.
1385 */
1386int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1387{
1388 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1389 if (RT_SUCCESS(rc))
1390 {
1391 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1392 PPGMPAGEMAP pMapIgnore;
1393 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1394 if (RT_FAILURE(rc2)) /* preserve rc */
1395 rc = rc2;
1396 }
1397 return rc;
1398}
1399
1400
1401/**
1402 * Maps a page into the current virtual address space so it can be accessed for
1403 * both writing and reading.
1404 *
1405 * This is typically used is paths where we cannot use the TLB methods (like ROM
1406 * pages) or where there is no point in using them since we won't get many hits.
1407 *
1408 * @returns VBox status code.
1409 * @retval VINF_SUCCESS on success.
1410 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1411 *
1412 * @param pVM The cross context VM structure.
1413 * @param pPage The physical page tracking structure. Must be in the
1414 * allocated state.
1415 * @param GCPhys The address of the page.
1416 * @param ppv Where to store the mapping address of the page. The page
1417 * offset is masked off!
1418 *
1419 * @remarks Called from within the PGM critical section. The mapping is only
1420 * valid while you are inside section.
1421 */
1422int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1423{
1424 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1425 PPGMPAGEMAP pMapIgnore;
1426 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1427}
1428
1429
1430/**
1431 * Maps a page into the current virtual address space so it can be accessed for
1432 * reading.
1433 *
1434 * This is typically used is paths where we cannot use the TLB methods (like ROM
1435 * pages) or where there is no point in using them since we won't get many hits.
1436 *
1437 * @returns VBox status code.
1438 * @retval VINF_SUCCESS on success.
1439 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1440 *
1441 * @param pVM The cross context VM structure.
1442 * @param pPage The physical page tracking structure.
1443 * @param GCPhys The address of the page.
1444 * @param ppv Where to store the mapping address of the page. The page
1445 * offset is masked off!
1446 *
1447 * @remarks Called from within the PGM critical section. The mapping is only
1448 * valid while you are inside this section.
1449 */
1450int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1451{
1452 PPGMPAGEMAP pMapIgnore;
1453 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1454}
1455
1456#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1457
1458/**
1459 * Load a guest page into the ring-3 physical TLB.
1460 *
1461 * @returns VBox status code.
1462 * @retval VINF_SUCCESS on success
1463 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1464 * @param pVM The cross context VM structure.
1465 * @param GCPhys The guest physical address in question.
1466 */
1467int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
1468{
1469 PGM_LOCK_ASSERT_OWNER(pVM);
1470
1471 /*
1472 * Find the ram range and page and hand it over to the with-page function.
1473 * 99.8% of requests are expected to be in the first range.
1474 */
1475 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1476 if (!pPage)
1477 {
1478 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1479 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1480 }
1481
1482 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1483}
1484
1485
1486/**
1487 * Load a guest page into the ring-3 physical TLB.
1488 *
1489 * @returns VBox status code.
1490 * @retval VINF_SUCCESS on success
1491 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1492 *
1493 * @param pVM The cross context VM structure.
1494 * @param pPage Pointer to the PGMPAGE structure corresponding to
1495 * GCPhys.
1496 * @param GCPhys The guest physical address in question.
1497 */
1498int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1499{
1500 PGM_LOCK_ASSERT_OWNER(pVM);
1501 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1502
1503 /*
1504 * Map the page.
1505 * Make a special case for the zero page as it is kind of special.
1506 */
1507 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1508 if ( !PGM_PAGE_IS_ZERO(pPage)
1509 && !PGM_PAGE_IS_BALLOONED(pPage))
1510 {
1511 void *pv;
1512 PPGMPAGEMAP pMap;
1513 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1514 if (RT_FAILURE(rc))
1515 return rc;
1516# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1517 pTlbe->pMap = pMap;
1518# endif
1519 pTlbe->pv = pv;
1520 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1521 }
1522 else
1523 {
1524 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1525# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1526 pTlbe->pMap = NULL;
1527# endif
1528 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1529 }
1530# ifdef PGM_WITH_PHYS_TLB
1531 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1532 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1533 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1534 else
1535 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1536# else
1537 pTlbe->GCPhys = NIL_RTGCPHYS;
1538# endif
1539 pTlbe->pPage = pPage;
1540 return VINF_SUCCESS;
1541}
1542
1543#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1544
1545/**
1546 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1547 * own the PGM lock and therefore not need to lock the mapped page.
1548 *
1549 * @returns VBox status code.
1550 * @retval VINF_SUCCESS on success.
1551 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1552 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1553 *
1554 * @param pVM The cross context VM structure.
1555 * @param GCPhys The guest physical address of the page that should be mapped.
1556 * @param pPage Pointer to the PGMPAGE structure for the page.
1557 * @param ppv Where to store the address corresponding to GCPhys.
1558 *
1559 * @internal
1560 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1561 */
1562int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1563{
1564 int rc;
1565 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1566 PGM_LOCK_ASSERT_OWNER(pVM);
1567 pVM->pgm.s.cDeprecatedPageLocks++;
1568
1569 /*
1570 * Make sure the page is writable.
1571 */
1572 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1573 {
1574 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1575 if (RT_FAILURE(rc))
1576 return rc;
1577 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1578 }
1579 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1580
1581 /*
1582 * Get the mapping address.
1583 */
1584#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1585 void *pv;
1586 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1587 PGM_PAGE_GET_HCPHYS(pPage),
1588 &pv
1589 RTLOG_COMMA_SRC_POS);
1590 if (RT_FAILURE(rc))
1591 return rc;
1592 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1593#else
1594 PPGMPAGEMAPTLBE pTlbe;
1595 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1596 if (RT_FAILURE(rc))
1597 return rc;
1598 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1599#endif
1600 return VINF_SUCCESS;
1601}
1602
1603#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1604
1605/**
1606 * Locks a page mapping for writing.
1607 *
1608 * @param pVM The cross context VM structure.
1609 * @param pPage The page.
1610 * @param pTlbe The mapping TLB entry for the page.
1611 * @param pLock The lock structure (output).
1612 */
1613DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1614{
1615# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1616 PPGMPAGEMAP pMap = pTlbe->pMap;
1617 if (pMap)
1618 pMap->cRefs++;
1619# else
1620 RT_NOREF(pTlbe);
1621# endif
1622
1623 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1624 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1625 {
1626 if (cLocks == 0)
1627 pVM->pgm.s.cWriteLockedPages++;
1628 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1629 }
1630 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1631 {
1632 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1633 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1634# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1635 if (pMap)
1636 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1637# endif
1638 }
1639
1640 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1641# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1642 pLock->pvMap = pMap;
1643# else
1644 pLock->pvMap = NULL;
1645# endif
1646}
1647
1648/**
1649 * Locks a page mapping for reading.
1650 *
1651 * @param pVM The cross context VM structure.
1652 * @param pPage The page.
1653 * @param pTlbe The mapping TLB entry for the page.
1654 * @param pLock The lock structure (output).
1655 */
1656DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1657{
1658# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1659 PPGMPAGEMAP pMap = pTlbe->pMap;
1660 if (pMap)
1661 pMap->cRefs++;
1662# else
1663 RT_NOREF(pTlbe);
1664# endif
1665
1666 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1667 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1668 {
1669 if (cLocks == 0)
1670 pVM->pgm.s.cReadLockedPages++;
1671 PGM_PAGE_INC_READ_LOCKS(pPage);
1672 }
1673 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1674 {
1675 PGM_PAGE_INC_READ_LOCKS(pPage);
1676 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1677# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1678 if (pMap)
1679 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1680# endif
1681 }
1682
1683 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1684# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
1685 pLock->pvMap = pMap;
1686# else
1687 pLock->pvMap = NULL;
1688# endif
1689}
1690
1691#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1692
1693
1694/**
1695 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1696 * own the PGM lock and have access to the page structure.
1697 *
1698 * @returns VBox status code.
1699 * @retval VINF_SUCCESS on success.
1700 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1701 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1702 *
1703 * @param pVM The cross context VM structure.
1704 * @param GCPhys The guest physical address of the page that should be mapped.
1705 * @param pPage Pointer to the PGMPAGE structure for the page.
1706 * @param ppv Where to store the address corresponding to GCPhys.
1707 * @param pLock Where to store the lock information that
1708 * pgmPhysReleaseInternalPageMappingLock needs.
1709 *
1710 * @internal
1711 */
1712int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1713{
1714 int rc;
1715 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1716 PGM_LOCK_ASSERT_OWNER(pVM);
1717
1718 /*
1719 * Make sure the page is writable.
1720 */
1721 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1722 {
1723 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1724 if (RT_FAILURE(rc))
1725 return rc;
1726 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1727 }
1728 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1729
1730 /*
1731 * Do the job.
1732 */
1733#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1734 void *pv;
1735 PVMCPU pVCpu = VMMGetCpu(pVM);
1736 rc = pgmRZDynMapHCPageInlined(pVCpu,
1737 PGM_PAGE_GET_HCPHYS(pPage),
1738 &pv
1739 RTLOG_COMMA_SRC_POS);
1740 if (RT_FAILURE(rc))
1741 return rc;
1742 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1743 pLock->pvPage = pv;
1744 pLock->pVCpu = pVCpu;
1745
1746#else
1747 PPGMPAGEMAPTLBE pTlbe;
1748 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1749 if (RT_FAILURE(rc))
1750 return rc;
1751 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1752 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1753#endif
1754 return VINF_SUCCESS;
1755}
1756
1757
1758/**
1759 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1760 * own the PGM lock and have access to the page structure.
1761 *
1762 * @returns VBox status code.
1763 * @retval VINF_SUCCESS on success.
1764 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1765 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1766 *
1767 * @param pVM The cross context VM structure.
1768 * @param GCPhys The guest physical address of the page that should be mapped.
1769 * @param pPage Pointer to the PGMPAGE structure for the page.
1770 * @param ppv Where to store the address corresponding to GCPhys.
1771 * @param pLock Where to store the lock information that
1772 * pgmPhysReleaseInternalPageMappingLock needs.
1773 *
1774 * @internal
1775 */
1776int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1777{
1778 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1779 PGM_LOCK_ASSERT_OWNER(pVM);
1780 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1781
1782 /*
1783 * Do the job.
1784 */
1785#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1786 void *pv;
1787 PVMCPU pVCpu = VMMGetCpu(pVM);
1788 int rc = pgmRZDynMapHCPageInlined(pVCpu,
1789 PGM_PAGE_GET_HCPHYS(pPage),
1790 &pv
1791 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1792 if (RT_FAILURE(rc))
1793 return rc;
1794 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1795 pLock->pvPage = pv;
1796 pLock->pVCpu = pVCpu;
1797
1798#else
1799 PPGMPAGEMAPTLBE pTlbe;
1800 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1801 if (RT_FAILURE(rc))
1802 return rc;
1803 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1804 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1805#endif
1806 return VINF_SUCCESS;
1807}
1808
1809
1810/**
1811 * Requests the mapping of a guest page into the current context.
1812 *
1813 * This API should only be used for very short term, as it will consume scarse
1814 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1815 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1816 *
1817 * This API will assume your intention is to write to the page, and will
1818 * therefore replace shared and zero pages. If you do not intend to modify
1819 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1820 *
1821 * @returns VBox status code.
1822 * @retval VINF_SUCCESS on success.
1823 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1824 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1825 *
1826 * @param pVM The cross context VM structure.
1827 * @param GCPhys The guest physical address of the page that should be
1828 * mapped.
1829 * @param ppv Where to store the address corresponding to GCPhys.
1830 * @param pLock Where to store the lock information that
1831 * PGMPhysReleasePageMappingLock needs.
1832 *
1833 * @remarks The caller is responsible for dealing with access handlers.
1834 * @todo Add an informational return code for pages with access handlers?
1835 *
1836 * @remark Avoid calling this API from within critical sections (other than
1837 * the PGM one) because of the deadlock risk. External threads may
1838 * need to delegate jobs to the EMTs.
1839 * @remarks Only one page is mapped! Make no assumption about what's after or
1840 * before the returned page!
1841 * @thread Any thread.
1842 */
1843VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1844{
1845 int rc = pgmLock(pVM);
1846 AssertRCReturn(rc, rc);
1847
1848#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1849 /*
1850 * Find the page and make sure it's writable.
1851 */
1852 PPGMPAGE pPage;
1853 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1854 if (RT_SUCCESS(rc))
1855 {
1856 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1857 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1858 if (RT_SUCCESS(rc))
1859 {
1860 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1861
1862 PVMCPU pVCpu = VMMGetCpu(pVM);
1863 void *pv;
1864 rc = pgmRZDynMapHCPageInlined(pVCpu,
1865 PGM_PAGE_GET_HCPHYS(pPage),
1866 &pv
1867 RTLOG_COMMA_SRC_POS);
1868 if (RT_SUCCESS(rc))
1869 {
1870 AssertRCSuccess(rc);
1871
1872 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1873 *ppv = pv;
1874 pLock->pvPage = pv;
1875 pLock->pVCpu = pVCpu;
1876 }
1877 }
1878 }
1879
1880#else /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1881 /*
1882 * Query the Physical TLB entry for the page (may fail).
1883 */
1884 PPGMPAGEMAPTLBE pTlbe;
1885 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1886 if (RT_SUCCESS(rc))
1887 {
1888 /*
1889 * If the page is shared, the zero page, or being write monitored
1890 * it must be converted to a page that's writable if possible.
1891 */
1892 PPGMPAGE pPage = pTlbe->pPage;
1893 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1894 {
1895 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1896 if (RT_SUCCESS(rc))
1897 {
1898 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1899 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1900 }
1901 }
1902 if (RT_SUCCESS(rc))
1903 {
1904 /*
1905 * Now, just perform the locking and calculate the return address.
1906 */
1907 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1908 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1909 }
1910 }
1911
1912#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1913 pgmUnlock(pVM);
1914 return rc;
1915}
1916
1917
1918/**
1919 * Requests the mapping of a guest page into the current context.
1920 *
1921 * This API should only be used for very short term, as it will consume scarse
1922 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1923 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1924 *
1925 * @returns VBox status code.
1926 * @retval VINF_SUCCESS on success.
1927 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1928 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1929 *
1930 * @param pVM The cross context VM structure.
1931 * @param GCPhys The guest physical address of the page that should be
1932 * mapped.
1933 * @param ppv Where to store the address corresponding to GCPhys.
1934 * @param pLock Where to store the lock information that
1935 * PGMPhysReleasePageMappingLock needs.
1936 *
1937 * @remarks The caller is responsible for dealing with access handlers.
1938 * @todo Add an informational return code for pages with access handlers?
1939 *
1940 * @remarks Avoid calling this API from within critical sections (other than
1941 * the PGM one) because of the deadlock risk.
1942 * @remarks Only one page is mapped! Make no assumption about what's after or
1943 * before the returned page!
1944 * @thread Any thread.
1945 */
1946VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1947{
1948 int rc = pgmLock(pVM);
1949 AssertRCReturn(rc, rc);
1950
1951#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1952 /*
1953 * Find the page and make sure it's readable.
1954 */
1955 PPGMPAGE pPage;
1956 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1957 if (RT_SUCCESS(rc))
1958 {
1959 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1960 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1961 else
1962 {
1963 PVMCPU pVCpu = VMMGetCpu(pVM);
1964 void *pv;
1965 rc = pgmRZDynMapHCPageInlined(pVCpu,
1966 PGM_PAGE_GET_HCPHYS(pPage),
1967 &pv
1968 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1969 if (RT_SUCCESS(rc))
1970 {
1971 AssertRCSuccess(rc);
1972
1973 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1974 *ppv = pv;
1975 pLock->pvPage = pv;
1976 pLock->pVCpu = pVCpu;
1977 }
1978 }
1979 }
1980
1981#else /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1982 /*
1983 * Query the Physical TLB entry for the page (may fail).
1984 */
1985 PPGMPAGEMAPTLBE pTlbe;
1986 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1987 if (RT_SUCCESS(rc))
1988 {
1989 /* MMIO pages doesn't have any readable backing. */
1990 PPGMPAGE pPage = pTlbe->pPage;
1991 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1992 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1993 else
1994 {
1995 /*
1996 * Now, just perform the locking and calculate the return address.
1997 */
1998 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1999 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
2000 }
2001 }
2002
2003#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2004 pgmUnlock(pVM);
2005 return rc;
2006}
2007
2008
2009/**
2010 * Requests the mapping of a guest page given by virtual address into the current context.
2011 *
2012 * This API should only be used for very short term, as it will consume
2013 * scarse resources (R0 and GC) in the mapping cache. When you're done
2014 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2015 *
2016 * This API will assume your intention is to write to the page, and will
2017 * therefore replace shared and zero pages. If you do not intend to modify
2018 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
2019 *
2020 * @returns VBox status code.
2021 * @retval VINF_SUCCESS on success.
2022 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2023 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2024 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2025 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2026 *
2027 * @param pVCpu The cross context virtual CPU structure.
2028 * @param GCPtr The guest physical address of the page that should be
2029 * mapped.
2030 * @param ppv Where to store the address corresponding to GCPhys.
2031 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2032 *
2033 * @remark Avoid calling this API from within critical sections (other than
2034 * the PGM one) because of the deadlock risk.
2035 * @thread EMT
2036 */
2037VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
2038{
2039 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2040 RTGCPHYS GCPhys;
2041 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2042 if (RT_SUCCESS(rc))
2043 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2044 return rc;
2045}
2046
2047
2048/**
2049 * Requests the mapping of a guest page given by virtual address into the current context.
2050 *
2051 * This API should only be used for very short term, as it will consume
2052 * scarse resources (R0 and GC) in the mapping cache. When you're done
2053 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
2054 *
2055 * @returns VBox status code.
2056 * @retval VINF_SUCCESS on success.
2057 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
2058 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
2059 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2060 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2061 *
2062 * @param pVCpu The cross context virtual CPU structure.
2063 * @param GCPtr The guest physical address of the page that should be
2064 * mapped.
2065 * @param ppv Where to store the address corresponding to GCPtr.
2066 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
2067 *
2068 * @remark Avoid calling this API from within critical sections (other than
2069 * the PGM one) because of the deadlock risk.
2070 * @thread EMT
2071 */
2072VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
2073{
2074 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
2075 RTGCPHYS GCPhys;
2076 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
2077 if (RT_SUCCESS(rc))
2078 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
2079 return rc;
2080}
2081
2082
2083/**
2084 * Release the mapping of a guest page.
2085 *
2086 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
2087 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
2088 *
2089 * @param pVM The cross context VM structure.
2090 * @param pLock The lock structure initialized by the mapping function.
2091 */
2092VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2093{
2094#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2095 Assert(pLock->pvPage != NULL);
2096 Assert(pLock->pVCpu == VMMGetCpu(pVM)); RT_NOREF_PV(pVM);
2097 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
2098 pLock->pVCpu = NULL;
2099 pLock->pvPage = NULL;
2100
2101#else
2102# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
2103 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
2104# endif
2105 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2106 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2107
2108 pLock->uPageAndType = 0;
2109 pLock->pvMap = NULL;
2110
2111 pgmLock(pVM);
2112 if (fWriteLock)
2113 {
2114 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2115 Assert(cLocks > 0);
2116 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2117 {
2118 if (cLocks == 1)
2119 {
2120 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2121 pVM->pgm.s.cWriteLockedPages--;
2122 }
2123 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2124 }
2125
2126 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2127 { /* probably extremely likely */ }
2128 else
2129 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2130 }
2131 else
2132 {
2133 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2134 Assert(cLocks > 0);
2135 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2136 {
2137 if (cLocks == 1)
2138 {
2139 Assert(pVM->pgm.s.cReadLockedPages > 0);
2140 pVM->pgm.s.cReadLockedPages--;
2141 }
2142 PGM_PAGE_DEC_READ_LOCKS(pPage);
2143 }
2144 }
2145
2146# if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL)
2147 if (pMap)
2148 {
2149 Assert(pMap->cRefs >= 1);
2150 pMap->cRefs--;
2151 }
2152# endif
2153 pgmUnlock(pVM);
2154#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2155}
2156
2157
2158#ifdef IN_RING3
2159/**
2160 * Release the mapping of multiple guest pages.
2161 *
2162 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
2163 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
2164 *
2165 * @param pVM The cross context VM structure.
2166 * @param cPages Number of pages to unlock.
2167 * @param paLocks Array of locks lock structure initialized by the mapping
2168 * function.
2169 */
2170VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
2171{
2172 Assert(cPages > 0);
2173 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
2174#ifdef VBOX_STRICT
2175 for (uint32_t i = 1; i < cPages; i++)
2176 {
2177 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
2178 AssertPtr(paLocks[i].uPageAndType);
2179 }
2180#endif
2181
2182 pgmLock(pVM);
2183 if (fWriteLock)
2184 {
2185 /*
2186 * Write locks:
2187 */
2188 for (uint32_t i = 0; i < cPages; i++)
2189 {
2190 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2191 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
2192 Assert(cLocks > 0);
2193 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2194 {
2195 if (cLocks == 1)
2196 {
2197 Assert(pVM->pgm.s.cWriteLockedPages > 0);
2198 pVM->pgm.s.cWriteLockedPages--;
2199 }
2200 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
2201 }
2202
2203 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
2204 { /* probably extremely likely */ }
2205 else
2206 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
2207
2208 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2209 if (pMap)
2210 {
2211 Assert(pMap->cRefs >= 1);
2212 pMap->cRefs--;
2213 }
2214
2215 /* Yield the lock: */
2216 if ((i & 1023) == 1023)
2217 {
2218 pgmLock(pVM);
2219 pgmUnlock(pVM);
2220 }
2221 }
2222 }
2223 else
2224 {
2225 /*
2226 * Read locks:
2227 */
2228 for (uint32_t i = 0; i < cPages; i++)
2229 {
2230 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
2231 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
2232 Assert(cLocks > 0);
2233 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
2234 {
2235 if (cLocks == 1)
2236 {
2237 Assert(pVM->pgm.s.cReadLockedPages > 0);
2238 pVM->pgm.s.cReadLockedPages--;
2239 }
2240 PGM_PAGE_DEC_READ_LOCKS(pPage);
2241 }
2242
2243 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
2244 if (pMap)
2245 {
2246 Assert(pMap->cRefs >= 1);
2247 pMap->cRefs--;
2248 }
2249
2250 /* Yield the lock: */
2251 if ((i & 1023) == 1023)
2252 {
2253 pgmLock(pVM);
2254 pgmUnlock(pVM);
2255 }
2256 }
2257 }
2258 pgmUnlock(pVM);
2259
2260 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
2261}
2262#endif /* IN_RING3 */
2263
2264
2265/**
2266 * Release the internal mapping of a guest page.
2267 *
2268 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2269 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2270 *
2271 * @param pVM The cross context VM structure.
2272 * @param pLock The lock structure initialized by the mapping function.
2273 *
2274 * @remarks Caller must hold the PGM lock.
2275 */
2276void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
2277{
2278 PGM_LOCK_ASSERT_OWNER(pVM);
2279 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2280}
2281
2282
2283/**
2284 * Converts a GC physical address to a HC ring-3 pointer.
2285 *
2286 * @returns VINF_SUCCESS on success.
2287 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2288 * page but has no physical backing.
2289 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2290 * GC physical address.
2291 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2292 * a dynamic ram chunk boundary
2293 *
2294 * @param pVM The cross context VM structure.
2295 * @param GCPhys The GC physical address to convert.
2296 * @param pR3Ptr Where to store the R3 pointer on success.
2297 *
2298 * @deprecated Avoid when possible!
2299 */
2300int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2301{
2302/** @todo this is kind of hacky and needs some more work. */
2303#ifndef DEBUG_sandervl
2304 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2305#endif
2306
2307 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2308#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2309 NOREF(pVM); NOREF(pR3Ptr); RT_NOREF_PV(GCPhys);
2310 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
2311#else
2312 pgmLock(pVM);
2313
2314 PPGMRAMRANGE pRam;
2315 PPGMPAGE pPage;
2316 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2317 if (RT_SUCCESS(rc))
2318 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2319
2320 pgmUnlock(pVM);
2321 Assert(rc <= VINF_SUCCESS);
2322 return rc;
2323#endif
2324}
2325
2326#if 0 /*def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2327
2328/**
2329 * Maps and locks a guest CR3 or PD (PAE) page.
2330 *
2331 * @returns VINF_SUCCESS on success.
2332 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2333 * page but has no physical backing.
2334 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2335 * GC physical address.
2336 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2337 * a dynamic ram chunk boundary
2338 *
2339 * @param pVM The cross context VM structure.
2340 * @param GCPhys The GC physical address to convert.
2341 * @param pR3Ptr Where to store the R3 pointer on success. This may or
2342 * may not be valid in ring-0 depending on the
2343 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option.
2344 *
2345 * @remarks The caller must own the PGM lock.
2346 */
2347int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2348{
2349
2350 PPGMRAMRANGE pRam;
2351 PPGMPAGE pPage;
2352 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2353 if (RT_SUCCESS(rc))
2354 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2355 Assert(rc <= VINF_SUCCESS);
2356 return rc;
2357}
2358
2359
2360int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2361{
2362
2363}
2364
2365#endif
2366
2367/**
2368 * Converts a guest pointer to a GC physical address.
2369 *
2370 * This uses the current CR3/CR0/CR4 of the guest.
2371 *
2372 * @returns VBox status code.
2373 * @param pVCpu The cross context virtual CPU structure.
2374 * @param GCPtr The guest pointer to convert.
2375 * @param pGCPhys Where to store the GC physical address.
2376 */
2377VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2378{
2379 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
2380 if (pGCPhys && RT_SUCCESS(rc))
2381 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
2382 return rc;
2383}
2384
2385
2386/**
2387 * Converts a guest pointer to a HC physical address.
2388 *
2389 * This uses the current CR3/CR0/CR4 of the guest.
2390 *
2391 * @returns VBox status code.
2392 * @param pVCpu The cross context virtual CPU structure.
2393 * @param GCPtr The guest pointer to convert.
2394 * @param pHCPhys Where to store the HC physical address.
2395 */
2396VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2397{
2398 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2399 RTGCPHYS GCPhys;
2400 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2401 if (RT_SUCCESS(rc))
2402 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2403 return rc;
2404}
2405
2406
2407
2408#undef LOG_GROUP
2409#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2410
2411
2412#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2413/**
2414 * Cache PGMPhys memory access
2415 *
2416 * @param pVM The cross context VM structure.
2417 * @param pCache Cache structure pointer
2418 * @param GCPhys GC physical address
2419 * @param pbHC HC pointer corresponding to physical page
2420 *
2421 * @thread EMT.
2422 */
2423static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2424{
2425 uint32_t iCacheIndex;
2426
2427 Assert(VM_IS_EMT(pVM));
2428
2429 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2430 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2431
2432 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2433
2434 ASMBitSet(&pCache->aEntries, iCacheIndex);
2435
2436 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2437 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2438}
2439#endif /* IN_RING3 */
2440
2441
2442/**
2443 * Deals with reading from a page with one or more ALL access handlers.
2444 *
2445 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2446 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2447 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2448 *
2449 * @param pVM The cross context VM structure.
2450 * @param pPage The page descriptor.
2451 * @param GCPhys The physical address to start reading at.
2452 * @param pvBuf Where to put the bits we read.
2453 * @param cb How much to read - less or equal to a page.
2454 * @param enmOrigin The origin of this call.
2455 */
2456static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
2457 PGMACCESSORIGIN enmOrigin)
2458{
2459 /*
2460 * The most frequent access here is MMIO and shadowed ROM.
2461 * The current code ASSUMES all these access handlers covers full pages!
2462 */
2463
2464 /*
2465 * Whatever we do we need the source page, map it first.
2466 */
2467 PGMPAGEMAPLOCK PgMpLck;
2468 const void *pvSrc = NULL;
2469 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2470/** @todo Check how this can work for MMIO pages? */
2471 if (RT_FAILURE(rc))
2472 {
2473 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2474 GCPhys, pPage, rc));
2475 memset(pvBuf, 0xff, cb);
2476 return VINF_SUCCESS;
2477 }
2478
2479 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
2480
2481 /*
2482 * Deal with any physical handlers.
2483 */
2484 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2485 PPGMPHYSHANDLER pPhys = NULL;
2486 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2487 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2488 {
2489 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2490 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2491 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2492 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2493 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2494#ifndef IN_RING3
2495 if (enmOrigin != PGMACCESSORIGIN_IEM)
2496 {
2497 /* Cannot reliably handle informational status codes in this context */
2498 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2499 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2500 }
2501#endif
2502 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); Assert(pfnHandler);
2503 void *pvUser = pPhys->CTX_SUFF(pvUser);
2504
2505 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2506 STAM_PROFILE_START(&pPhys->Stat, h);
2507 PGM_LOCK_ASSERT_OWNER(pVM);
2508
2509 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2510 pgmUnlock(pVM);
2511 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, pvUser);
2512 pgmLock(pVM);
2513
2514#ifdef VBOX_WITH_STATISTICS
2515 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2516 if (pPhys)
2517 STAM_PROFILE_STOP(&pPhys->Stat, h);
2518#else
2519 pPhys = NULL; /* might not be valid anymore. */
2520#endif
2521 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
2522 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
2523 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
2524 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2525 {
2526 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2527 return rcStrict;
2528 }
2529 }
2530
2531 /*
2532 * Take the default action.
2533 */
2534 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2535 {
2536 memcpy(pvBuf, pvSrc, cb);
2537 rcStrict = VINF_SUCCESS;
2538 }
2539 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2540 return rcStrict;
2541}
2542
2543
2544/**
2545 * Read physical memory.
2546 *
2547 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2548 * want to ignore those.
2549 *
2550 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2551 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2552 * @retval VINF_SUCCESS in all context - read completed.
2553 *
2554 * @retval VINF_EM_OFF in RC and R0 - read completed.
2555 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
2556 * @retval VINF_EM_RESET in RC and R0 - read completed.
2557 * @retval VINF_EM_HALT in RC and R0 - read completed.
2558 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
2559 *
2560 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
2561 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
2562 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2563 *
2564 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
2565 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2566 *
2567 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2568 *
2569 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2570 * haven't been cleared for strict status codes yet.
2571 *
2572 * @param pVM The cross context VM structure.
2573 * @param GCPhys Physical address start reading from.
2574 * @param pvBuf Where to put the read bits.
2575 * @param cbRead How many bytes to read.
2576 * @param enmOrigin The origin of this call.
2577 */
2578VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2579{
2580 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2581 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2582
2583 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2584 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2585
2586 pgmLock(pVM);
2587
2588 /*
2589 * Copy loop on ram ranges.
2590 */
2591 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2592 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2593 for (;;)
2594 {
2595 /* Inside range or not? */
2596 if (pRam && GCPhys >= pRam->GCPhys)
2597 {
2598 /*
2599 * Must work our way thru this page by page.
2600 */
2601 RTGCPHYS off = GCPhys - pRam->GCPhys;
2602 while (off < pRam->cb)
2603 {
2604 unsigned iPage = off >> PAGE_SHIFT;
2605 PPGMPAGE pPage = &pRam->aPages[iPage];
2606 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2607 if (cb > cbRead)
2608 cb = cbRead;
2609
2610 /*
2611 * Normal page? Get the pointer to it.
2612 */
2613 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2614 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2615 {
2616 /*
2617 * Get the pointer to the page.
2618 */
2619 PGMPAGEMAPLOCK PgMpLck;
2620 const void *pvSrc;
2621 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2622 if (RT_SUCCESS(rc))
2623 {
2624 memcpy(pvBuf, pvSrc, cb);
2625 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2626 }
2627 else
2628 {
2629 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2630 pRam->GCPhys + off, pPage, rc));
2631 memset(pvBuf, 0xff, cb);
2632 }
2633 }
2634 /*
2635 * Have ALL/MMIO access handlers.
2636 */
2637 else
2638 {
2639 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2640 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2641 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2642 else
2643 {
2644 memset(pvBuf, 0xff, cb);
2645 pgmUnlock(pVM);
2646 return rcStrict2;
2647 }
2648 }
2649
2650 /* next page */
2651 if (cb >= cbRead)
2652 {
2653 pgmUnlock(pVM);
2654 return rcStrict;
2655 }
2656 cbRead -= cb;
2657 off += cb;
2658 pvBuf = (char *)pvBuf + cb;
2659 } /* walk pages in ram range. */
2660
2661 GCPhys = pRam->GCPhysLast + 1;
2662 }
2663 else
2664 {
2665 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2666
2667 /*
2668 * Unassigned address space.
2669 */
2670 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2671 if (cb >= cbRead)
2672 {
2673 memset(pvBuf, 0xff, cbRead);
2674 break;
2675 }
2676 memset(pvBuf, 0xff, cb);
2677
2678 cbRead -= cb;
2679 pvBuf = (char *)pvBuf + cb;
2680 GCPhys += cb;
2681 }
2682
2683 /* Advance range if necessary. */
2684 while (pRam && GCPhys > pRam->GCPhysLast)
2685 pRam = pRam->CTX_SUFF(pNext);
2686 } /* Ram range walk */
2687
2688 pgmUnlock(pVM);
2689 return rcStrict;
2690}
2691
2692
2693/**
2694 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2695 *
2696 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
2697 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
2698 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
2699 *
2700 * @param pVM The cross context VM structure.
2701 * @param pPage The page descriptor.
2702 * @param GCPhys The physical address to start writing at.
2703 * @param pvBuf What to write.
2704 * @param cbWrite How much to write - less or equal to a page.
2705 * @param enmOrigin The origin of this call.
2706 */
2707static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2708 PGMACCESSORIGIN enmOrigin)
2709{
2710 PGMPAGEMAPLOCK PgMpLck;
2711 void *pvDst = NULL;
2712 VBOXSTRICTRC rcStrict;
2713
2714 /*
2715 * Give priority to physical handlers (like #PF does).
2716 *
2717 * Hope for a lonely physical handler first that covers the whole
2718 * write area. This should be a pretty frequent case with MMIO and
2719 * the heavy usage of full page handlers in the page pool.
2720 */
2721 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2722 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2723 if (pCur)
2724 {
2725 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2726#ifndef IN_RING3
2727 if (enmOrigin != PGMACCESSORIGIN_IEM)
2728 /* Cannot reliably handle informational status codes in this context */
2729 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2730#endif
2731 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2732 if (cbRange > cbWrite)
2733 cbRange = cbWrite;
2734
2735 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler));
2736 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2737 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2738 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2739 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2740 else
2741 rcStrict = VINF_SUCCESS;
2742 if (RT_SUCCESS(rcStrict))
2743 {
2744 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler);
2745 void *pvUser = pCur->CTX_SUFF(pvUser);
2746 STAM_PROFILE_START(&pCur->Stat, h);
2747
2748 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2749 PGM_LOCK_ASSERT_OWNER(pVM);
2750 pgmUnlock(pVM);
2751 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2752 pgmLock(pVM);
2753
2754#ifdef VBOX_WITH_STATISTICS
2755 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2756 if (pCur)
2757 STAM_PROFILE_STOP(&pCur->Stat, h);
2758#else
2759 pCur = NULL; /* might not be valid anymore. */
2760#endif
2761 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
2762 {
2763 if (pvDst)
2764 memcpy(pvDst, pvBuf, cbRange);
2765 rcStrict = VINF_SUCCESS;
2766 }
2767 else
2768 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
2769 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
2770 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
2771 }
2772 else
2773 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2774 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
2775 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2776 {
2777 if (pvDst)
2778 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2779 return rcStrict;
2780 }
2781
2782 /* more fun to be had below */
2783 cbWrite -= cbRange;
2784 GCPhys += cbRange;
2785 pvBuf = (uint8_t *)pvBuf + cbRange;
2786 pvDst = (uint8_t *)pvDst + cbRange;
2787 }
2788 else /* The handler is somewhere else in the page, deal with it below. */
2789 rcStrict = VINF_SUCCESS;
2790 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2791
2792 /*
2793 * Deal with all the odd ends (used to be deal with virt+phys).
2794 */
2795 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
2796
2797 /* We need a writable destination page. */
2798 if (!pvDst)
2799 {
2800 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2801 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
2802 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
2803 rc2);
2804 }
2805
2806 /* The loop state (big + ugly). */
2807 PPGMPHYSHANDLER pPhys = NULL;
2808 uint32_t offPhys = PAGE_SIZE;
2809 uint32_t offPhysLast = PAGE_SIZE;
2810 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2811
2812 /* The loop. */
2813 for (;;)
2814 {
2815 if (fMorePhys && !pPhys)
2816 {
2817 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2818 if (pPhys)
2819 {
2820 offPhys = 0;
2821 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2822 }
2823 else
2824 {
2825 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2826 GCPhys, true /* fAbove */);
2827 if ( pPhys
2828 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2829 {
2830 offPhys = pPhys->Core.Key - GCPhys;
2831 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2832 }
2833 else
2834 {
2835 pPhys = NULL;
2836 fMorePhys = false;
2837 offPhys = offPhysLast = PAGE_SIZE;
2838 }
2839 }
2840 }
2841
2842 /*
2843 * Handle access to space without handlers (that's easy).
2844 */
2845 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
2846 uint32_t cbRange = (uint32_t)cbWrite;
2847
2848 /*
2849 * Physical handler.
2850 */
2851 if (!offPhys)
2852 {
2853#ifndef IN_RING3
2854 if (enmOrigin != PGMACCESSORIGIN_IEM)
2855 /* Cannot reliably handle informational status codes in this context */
2856 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2857#endif
2858 if (cbRange > offPhysLast + 1)
2859 cbRange = offPhysLast + 1;
2860
2861 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
2862 void *pvUser = pPhys->CTX_SUFF(pvUser);
2863
2864 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2865 STAM_PROFILE_START(&pPhys->Stat, h);
2866
2867 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2868 PGM_LOCK_ASSERT_OWNER(pVM);
2869 pgmUnlock(pVM);
2870 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2871 pgmLock(pVM);
2872
2873#ifdef VBOX_WITH_STATISTICS
2874 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2875 if (pPhys)
2876 STAM_PROFILE_STOP(&pPhys->Stat, h);
2877#else
2878 pPhys = NULL; /* might not be valid anymore. */
2879#endif
2880 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
2881 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
2882 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
2883 }
2884
2885 /*
2886 * Execute the default action and merge the status codes.
2887 */
2888 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
2889 {
2890 memcpy(pvDst, pvBuf, cbRange);
2891 rcStrict2 = VINF_SUCCESS;
2892 }
2893 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
2894 {
2895 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2896 return rcStrict2;
2897 }
2898 else
2899 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
2900
2901 /*
2902 * Advance if we've got more stuff to do.
2903 */
2904 if (cbRange >= cbWrite)
2905 {
2906 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2907 return rcStrict;
2908 }
2909
2910
2911 cbWrite -= cbRange;
2912 GCPhys += cbRange;
2913 pvBuf = (uint8_t *)pvBuf + cbRange;
2914 pvDst = (uint8_t *)pvDst + cbRange;
2915
2916 offPhys -= cbRange;
2917 offPhysLast -= cbRange;
2918 }
2919}
2920
2921
2922/**
2923 * Write to physical memory.
2924 *
2925 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2926 * want to ignore those.
2927 *
2928 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
2929 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
2930 * @retval VINF_SUCCESS in all context - write completed.
2931 *
2932 * @retval VINF_EM_OFF in RC and R0 - write completed.
2933 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
2934 * @retval VINF_EM_RESET in RC and R0 - write completed.
2935 * @retval VINF_EM_HALT in RC and R0 - write completed.
2936 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
2937 *
2938 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
2939 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
2940 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
2941 *
2942 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
2943 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
2944 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
2945 *
2946 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
2947 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
2948 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
2949 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
2950 * @retval VINF_CSAM_PENDING_ACTION in RC only.
2951 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
2952 *
2953 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
2954 * haven't been cleared for strict status codes yet.
2955 *
2956 *
2957 * @param pVM The cross context VM structure.
2958 * @param GCPhys Physical address to write to.
2959 * @param pvBuf What to write.
2960 * @param cbWrite How many bytes to write.
2961 * @param enmOrigin Who is calling.
2962 */
2963VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2964{
2965 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2966 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2967 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2968
2969 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2970 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2971
2972 pgmLock(pVM);
2973
2974 /*
2975 * Copy loop on ram ranges.
2976 */
2977 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2978 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2979 for (;;)
2980 {
2981 /* Inside range or not? */
2982 if (pRam && GCPhys >= pRam->GCPhys)
2983 {
2984 /*
2985 * Must work our way thru this page by page.
2986 */
2987 RTGCPTR off = GCPhys - pRam->GCPhys;
2988 while (off < pRam->cb)
2989 {
2990 RTGCPTR iPage = off >> PAGE_SHIFT;
2991 PPGMPAGE pPage = &pRam->aPages[iPage];
2992 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2993 if (cb > cbWrite)
2994 cb = cbWrite;
2995
2996 /*
2997 * Normal page? Get the pointer to it.
2998 */
2999 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
3000 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
3001 {
3002 PGMPAGEMAPLOCK PgMpLck;
3003 void *pvDst;
3004 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
3005 if (RT_SUCCESS(rc))
3006 {
3007 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
3008 memcpy(pvDst, pvBuf, cb);
3009 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3010 }
3011 /* Ignore writes to ballooned pages. */
3012 else if (!PGM_PAGE_IS_BALLOONED(pPage))
3013 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
3014 pRam->GCPhys + off, pPage, rc));
3015 }
3016 /*
3017 * Active WRITE or ALL access handlers.
3018 */
3019 else
3020 {
3021 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
3022 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
3023 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
3024 else
3025 {
3026 pgmUnlock(pVM);
3027 return rcStrict2;
3028 }
3029 }
3030
3031 /* next page */
3032 if (cb >= cbWrite)
3033 {
3034 pgmUnlock(pVM);
3035 return rcStrict;
3036 }
3037
3038 cbWrite -= cb;
3039 off += cb;
3040 pvBuf = (const char *)pvBuf + cb;
3041 } /* walk pages in ram range */
3042
3043 GCPhys = pRam->GCPhysLast + 1;
3044 }
3045 else
3046 {
3047 /*
3048 * Unassigned address space, skip it.
3049 */
3050 if (!pRam)
3051 break;
3052 size_t cb = pRam->GCPhys - GCPhys;
3053 if (cb >= cbWrite)
3054 break;
3055 cbWrite -= cb;
3056 pvBuf = (const char *)pvBuf + cb;
3057 GCPhys += cb;
3058 }
3059
3060 /* Advance range if necessary. */
3061 while (pRam && GCPhys > pRam->GCPhysLast)
3062 pRam = pRam->CTX_SUFF(pNext);
3063 } /* Ram range walk */
3064
3065 pgmUnlock(pVM);
3066 return rcStrict;
3067}
3068
3069
3070/**
3071 * Read from guest physical memory by GC physical address, bypassing
3072 * MMIO and access handlers.
3073 *
3074 * @returns VBox status code.
3075 * @param pVM The cross context VM structure.
3076 * @param pvDst The destination address.
3077 * @param GCPhysSrc The source address (GC physical address).
3078 * @param cb The number of bytes to read.
3079 */
3080VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
3081{
3082 /*
3083 * Treat the first page as a special case.
3084 */
3085 if (!cb)
3086 return VINF_SUCCESS;
3087
3088 /* map the 1st page */
3089 void const *pvSrc;
3090 PGMPAGEMAPLOCK Lock;
3091 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3092 if (RT_FAILURE(rc))
3093 return rc;
3094
3095 /* optimize for the case where access is completely within the first page. */
3096 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
3097 if (RT_LIKELY(cb <= cbPage))
3098 {
3099 memcpy(pvDst, pvSrc, cb);
3100 PGMPhysReleasePageMappingLock(pVM, &Lock);
3101 return VINF_SUCCESS;
3102 }
3103
3104 /* copy to the end of the page. */
3105 memcpy(pvDst, pvSrc, cbPage);
3106 PGMPhysReleasePageMappingLock(pVM, &Lock);
3107 GCPhysSrc += cbPage;
3108 pvDst = (uint8_t *)pvDst + cbPage;
3109 cb -= cbPage;
3110
3111 /*
3112 * Page by page.
3113 */
3114 for (;;)
3115 {
3116 /* map the page */
3117 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3118 if (RT_FAILURE(rc))
3119 return rc;
3120
3121 /* last page? */
3122 if (cb <= PAGE_SIZE)
3123 {
3124 memcpy(pvDst, pvSrc, cb);
3125 PGMPhysReleasePageMappingLock(pVM, &Lock);
3126 return VINF_SUCCESS;
3127 }
3128
3129 /* copy the entire page and advance */
3130 memcpy(pvDst, pvSrc, PAGE_SIZE);
3131 PGMPhysReleasePageMappingLock(pVM, &Lock);
3132 GCPhysSrc += PAGE_SIZE;
3133 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3134 cb -= PAGE_SIZE;
3135 }
3136 /* won't ever get here. */
3137}
3138
3139
3140/**
3141 * Write to guest physical memory referenced by GC pointer.
3142 * Write memory to GC physical address in guest physical memory.
3143 *
3144 * This will bypass MMIO and access handlers.
3145 *
3146 * @returns VBox status code.
3147 * @param pVM The cross context VM structure.
3148 * @param GCPhysDst The GC physical address of the destination.
3149 * @param pvSrc The source buffer.
3150 * @param cb The number of bytes to write.
3151 */
3152VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3153{
3154 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3155
3156 /*
3157 * Treat the first page as a special case.
3158 */
3159 if (!cb)
3160 return VINF_SUCCESS;
3161
3162 /* map the 1st page */
3163 void *pvDst;
3164 PGMPAGEMAPLOCK Lock;
3165 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3166 if (RT_FAILURE(rc))
3167 return rc;
3168
3169 /* optimize for the case where access is completely within the first page. */
3170 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
3171 if (RT_LIKELY(cb <= cbPage))
3172 {
3173 memcpy(pvDst, pvSrc, cb);
3174 PGMPhysReleasePageMappingLock(pVM, &Lock);
3175 return VINF_SUCCESS;
3176 }
3177
3178 /* copy to the end of the page. */
3179 memcpy(pvDst, pvSrc, cbPage);
3180 PGMPhysReleasePageMappingLock(pVM, &Lock);
3181 GCPhysDst += cbPage;
3182 pvSrc = (const uint8_t *)pvSrc + cbPage;
3183 cb -= cbPage;
3184
3185 /*
3186 * Page by page.
3187 */
3188 for (;;)
3189 {
3190 /* map the page */
3191 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3192 if (RT_FAILURE(rc))
3193 return rc;
3194
3195 /* last page? */
3196 if (cb <= PAGE_SIZE)
3197 {
3198 memcpy(pvDst, pvSrc, cb);
3199 PGMPhysReleasePageMappingLock(pVM, &Lock);
3200 return VINF_SUCCESS;
3201 }
3202
3203 /* copy the entire page and advance */
3204 memcpy(pvDst, pvSrc, PAGE_SIZE);
3205 PGMPhysReleasePageMappingLock(pVM, &Lock);
3206 GCPhysDst += PAGE_SIZE;
3207 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3208 cb -= PAGE_SIZE;
3209 }
3210 /* won't ever get here. */
3211}
3212
3213
3214/**
3215 * Read from guest physical memory referenced by GC pointer.
3216 *
3217 * This function uses the current CR3/CR0/CR4 of the guest and will
3218 * bypass access handlers and not set any accessed bits.
3219 *
3220 * @returns VBox status code.
3221 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3222 * @param pvDst The destination address.
3223 * @param GCPtrSrc The source address (GC pointer).
3224 * @param cb The number of bytes to read.
3225 */
3226VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3227{
3228 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3229/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3230
3231 /*
3232 * Treat the first page as a special case.
3233 */
3234 if (!cb)
3235 return VINF_SUCCESS;
3236
3237 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
3238 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3239
3240 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3241 * when many VCPUs are fighting for the lock.
3242 */
3243 pgmLock(pVM);
3244
3245 /* map the 1st page */
3246 void const *pvSrc;
3247 PGMPAGEMAPLOCK Lock;
3248 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3249 if (RT_FAILURE(rc))
3250 {
3251 pgmUnlock(pVM);
3252 return rc;
3253 }
3254
3255 /* optimize for the case where access is completely within the first page. */
3256 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3257 if (RT_LIKELY(cb <= cbPage))
3258 {
3259 memcpy(pvDst, pvSrc, cb);
3260 PGMPhysReleasePageMappingLock(pVM, &Lock);
3261 pgmUnlock(pVM);
3262 return VINF_SUCCESS;
3263 }
3264
3265 /* copy to the end of the page. */
3266 memcpy(pvDst, pvSrc, cbPage);
3267 PGMPhysReleasePageMappingLock(pVM, &Lock);
3268 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3269 pvDst = (uint8_t *)pvDst + cbPage;
3270 cb -= cbPage;
3271
3272 /*
3273 * Page by page.
3274 */
3275 for (;;)
3276 {
3277 /* map the page */
3278 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3279 if (RT_FAILURE(rc))
3280 {
3281 pgmUnlock(pVM);
3282 return rc;
3283 }
3284
3285 /* last page? */
3286 if (cb <= PAGE_SIZE)
3287 {
3288 memcpy(pvDst, pvSrc, cb);
3289 PGMPhysReleasePageMappingLock(pVM, &Lock);
3290 pgmUnlock(pVM);
3291 return VINF_SUCCESS;
3292 }
3293
3294 /* copy the entire page and advance */
3295 memcpy(pvDst, pvSrc, PAGE_SIZE);
3296 PGMPhysReleasePageMappingLock(pVM, &Lock);
3297 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3298 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3299 cb -= PAGE_SIZE;
3300 }
3301 /* won't ever get here. */
3302}
3303
3304
3305/**
3306 * Write to guest physical memory referenced by GC pointer.
3307 *
3308 * This function uses the current CR3/CR0/CR4 of the guest and will
3309 * bypass access handlers and not set dirty or accessed bits.
3310 *
3311 * @returns VBox status code.
3312 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3313 * @param GCPtrDst The destination address (GC pointer).
3314 * @param pvSrc The source address.
3315 * @param cb The number of bytes to write.
3316 */
3317VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3318{
3319 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3320 VMCPU_ASSERT_EMT(pVCpu);
3321
3322 /*
3323 * Treat the first page as a special case.
3324 */
3325 if (!cb)
3326 return VINF_SUCCESS;
3327
3328 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
3329 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3330
3331 /* map the 1st page */
3332 void *pvDst;
3333 PGMPAGEMAPLOCK Lock;
3334 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3335 if (RT_FAILURE(rc))
3336 return rc;
3337
3338 /* optimize for the case where access is completely within the first page. */
3339 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3340 if (RT_LIKELY(cb <= cbPage))
3341 {
3342 memcpy(pvDst, pvSrc, cb);
3343 PGMPhysReleasePageMappingLock(pVM, &Lock);
3344 return VINF_SUCCESS;
3345 }
3346
3347 /* copy to the end of the page. */
3348 memcpy(pvDst, pvSrc, cbPage);
3349 PGMPhysReleasePageMappingLock(pVM, &Lock);
3350 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3351 pvSrc = (const uint8_t *)pvSrc + cbPage;
3352 cb -= cbPage;
3353
3354 /*
3355 * Page by page.
3356 */
3357 for (;;)
3358 {
3359 /* map the page */
3360 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3361 if (RT_FAILURE(rc))
3362 return rc;
3363
3364 /* last page? */
3365 if (cb <= PAGE_SIZE)
3366 {
3367 memcpy(pvDst, pvSrc, cb);
3368 PGMPhysReleasePageMappingLock(pVM, &Lock);
3369 return VINF_SUCCESS;
3370 }
3371
3372 /* copy the entire page and advance */
3373 memcpy(pvDst, pvSrc, PAGE_SIZE);
3374 PGMPhysReleasePageMappingLock(pVM, &Lock);
3375 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3376 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3377 cb -= PAGE_SIZE;
3378 }
3379 /* won't ever get here. */
3380}
3381
3382
3383/**
3384 * Write to guest physical memory referenced by GC pointer and update the PTE.
3385 *
3386 * This function uses the current CR3/CR0/CR4 of the guest and will
3387 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3388 *
3389 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3390 *
3391 * @returns VBox status code.
3392 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3393 * @param GCPtrDst The destination address (GC pointer).
3394 * @param pvSrc The source address.
3395 * @param cb The number of bytes to write.
3396 */
3397VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3398{
3399 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3400 VMCPU_ASSERT_EMT(pVCpu);
3401
3402 /*
3403 * Treat the first page as a special case.
3404 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3405 */
3406 if (!cb)
3407 return VINF_SUCCESS;
3408
3409 /* map the 1st page */
3410 void *pvDst;
3411 PGMPAGEMAPLOCK Lock;
3412 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3413 if (RT_FAILURE(rc))
3414 return rc;
3415
3416 /* optimize for the case where access is completely within the first page. */
3417 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3418 if (RT_LIKELY(cb <= cbPage))
3419 {
3420 memcpy(pvDst, pvSrc, cb);
3421 PGMPhysReleasePageMappingLock(pVM, &Lock);
3422 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3423 return VINF_SUCCESS;
3424 }
3425
3426 /* copy to the end of the page. */
3427 memcpy(pvDst, pvSrc, cbPage);
3428 PGMPhysReleasePageMappingLock(pVM, &Lock);
3429 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3430 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3431 pvSrc = (const uint8_t *)pvSrc + cbPage;
3432 cb -= cbPage;
3433
3434 /*
3435 * Page by page.
3436 */
3437 for (;;)
3438 {
3439 /* map the page */
3440 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3441 if (RT_FAILURE(rc))
3442 return rc;
3443
3444 /* last page? */
3445 if (cb <= PAGE_SIZE)
3446 {
3447 memcpy(pvDst, pvSrc, cb);
3448 PGMPhysReleasePageMappingLock(pVM, &Lock);
3449 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3450 return VINF_SUCCESS;
3451 }
3452
3453 /* copy the entire page and advance */
3454 memcpy(pvDst, pvSrc, PAGE_SIZE);
3455 PGMPhysReleasePageMappingLock(pVM, &Lock);
3456 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3457 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3458 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3459 cb -= PAGE_SIZE;
3460 }
3461 /* won't ever get here. */
3462}
3463
3464
3465/**
3466 * Read from guest physical memory referenced by GC pointer.
3467 *
3468 * This function uses the current CR3/CR0/CR4 of the guest and will
3469 * respect access handlers and set accessed bits.
3470 *
3471 * @returns Strict VBox status, see PGMPhysRead for details.
3472 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3473 * specified virtual address.
3474 *
3475 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3476 * @param pvDst The destination address.
3477 * @param GCPtrSrc The source address (GC pointer).
3478 * @param cb The number of bytes to read.
3479 * @param enmOrigin Who is calling.
3480 * @thread EMT(pVCpu)
3481 */
3482VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3483{
3484 RTGCPHYS GCPhys;
3485 uint64_t fFlags;
3486 int rc;
3487 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3488 VMCPU_ASSERT_EMT(pVCpu);
3489
3490 /*
3491 * Anything to do?
3492 */
3493 if (!cb)
3494 return VINF_SUCCESS;
3495
3496 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3497
3498 /*
3499 * Optimize reads within a single page.
3500 */
3501 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3502 {
3503 /* Convert virtual to physical address + flags */
3504 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3505 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3506 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3507
3508 /* mark the guest page as accessed. */
3509 if (!(fFlags & X86_PTE_A))
3510 {
3511 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3512 AssertRC(rc);
3513 }
3514
3515 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3516 }
3517
3518 /*
3519 * Page by page.
3520 */
3521 for (;;)
3522 {
3523 /* Convert virtual to physical address + flags */
3524 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3525 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3526 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3527
3528 /* mark the guest page as accessed. */
3529 if (!(fFlags & X86_PTE_A))
3530 {
3531 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3532 AssertRC(rc);
3533 }
3534
3535 /* copy */
3536 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3537 if (cbRead < cb)
3538 {
3539 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3540 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3541 { /* likely */ }
3542 else
3543 return rcStrict;
3544 }
3545 else /* Last page (cbRead is PAGE_SIZE, we only need cb!) */
3546 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3547
3548 /* next */
3549 Assert(cb > cbRead);
3550 cb -= cbRead;
3551 pvDst = (uint8_t *)pvDst + cbRead;
3552 GCPtrSrc += cbRead;
3553 }
3554}
3555
3556
3557/**
3558 * Write to guest physical memory referenced by GC pointer.
3559 *
3560 * This function uses the current CR3/CR0/CR4 of the guest and will
3561 * respect access handlers and set dirty and accessed bits.
3562 *
3563 * @returns Strict VBox status, see PGMPhysWrite for details.
3564 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
3565 * specified virtual address.
3566 *
3567 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3568 * @param GCPtrDst The destination address (GC pointer).
3569 * @param pvSrc The source address.
3570 * @param cb The number of bytes to write.
3571 * @param enmOrigin Who is calling.
3572 */
3573VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3574{
3575 RTGCPHYS GCPhys;
3576 uint64_t fFlags;
3577 int rc;
3578 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3579 VMCPU_ASSERT_EMT(pVCpu);
3580
3581 /*
3582 * Anything to do?
3583 */
3584 if (!cb)
3585 return VINF_SUCCESS;
3586
3587 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3588
3589 /*
3590 * Optimize writes within a single page.
3591 */
3592 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3593 {
3594 /* Convert virtual to physical address + flags */
3595 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3596 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3597 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3598
3599 /* Mention when we ignore X86_PTE_RW... */
3600 if (!(fFlags & X86_PTE_RW))
3601 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3602
3603 /* Mark the guest page as accessed and dirty if necessary. */
3604 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3605 {
3606 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3607 AssertRC(rc);
3608 }
3609
3610 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3611 }
3612
3613 /*
3614 * Page by page.
3615 */
3616 for (;;)
3617 {
3618 /* Convert virtual to physical address + flags */
3619 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3620 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3621 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3622
3623 /* Mention when we ignore X86_PTE_RW... */
3624 if (!(fFlags & X86_PTE_RW))
3625 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3626
3627 /* Mark the guest page as accessed and dirty if necessary. */
3628 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3629 {
3630 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3631 AssertRC(rc);
3632 }
3633
3634 /* copy */
3635 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3636 if (cbWrite < cb)
3637 {
3638 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3639 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3640 { /* likely */ }
3641 else
3642 return rcStrict;
3643 }
3644 else /* Last page (cbWrite is PAGE_SIZE, we only need cb!) */
3645 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3646
3647 /* next */
3648 Assert(cb > cbWrite);
3649 cb -= cbWrite;
3650 pvSrc = (uint8_t *)pvSrc + cbWrite;
3651 GCPtrDst += cbWrite;
3652 }
3653}
3654
3655
3656/**
3657 * Performs a read of guest virtual memory for instruction emulation.
3658 *
3659 * This will check permissions, raise exceptions and update the access bits.
3660 *
3661 * The current implementation will bypass all access handlers. It may later be
3662 * changed to at least respect MMIO.
3663 *
3664 *
3665 * @returns VBox status code suitable to scheduling.
3666 * @retval VINF_SUCCESS if the read was performed successfully.
3667 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3668 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3669 *
3670 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3671 * @param pCtxCore The context core.
3672 * @param pvDst Where to put the bytes we've read.
3673 * @param GCPtrSrc The source address.
3674 * @param cb The number of bytes to read. Not more than a page.
3675 *
3676 * @remark This function will dynamically map physical pages in GC. This may unmap
3677 * mappings done by the caller. Be careful!
3678 */
3679VMMDECL(int) PGMPhysInterpretedRead(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3680{
3681 NOREF(pCtxCore);
3682 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3683 Assert(cb <= PAGE_SIZE);
3684 VMCPU_ASSERT_EMT(pVCpu);
3685
3686/** @todo r=bird: This isn't perfect!
3687 * -# It's not checking for reserved bits being 1.
3688 * -# It's not correctly dealing with the access bit.
3689 * -# It's not respecting MMIO memory or any other access handlers.
3690 */
3691 /*
3692 * 1. Translate virtual to physical. This may fault.
3693 * 2. Map the physical address.
3694 * 3. Do the read operation.
3695 * 4. Set access bits if required.
3696 */
3697 int rc;
3698 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3699 if (cb <= cb1)
3700 {
3701 /*
3702 * Not crossing pages.
3703 */
3704 RTGCPHYS GCPhys;
3705 uint64_t fFlags;
3706 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3707 if (RT_SUCCESS(rc))
3708 {
3709 /** @todo we should check reserved bits ... */
3710 PGMPAGEMAPLOCK PgMpLck;
3711 void const *pvSrc;
3712 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3713 switch (rc)
3714 {
3715 case VINF_SUCCESS:
3716 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3717 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3718 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3719 break;
3720 case VERR_PGM_PHYS_PAGE_RESERVED:
3721 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3722 memset(pvDst, 0xff, cb);
3723 break;
3724 default:
3725 Assert(RT_FAILURE_NP(rc));
3726 return rc;
3727 }
3728
3729 /** @todo access bit emulation isn't 100% correct. */
3730 if (!(fFlags & X86_PTE_A))
3731 {
3732 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3733 AssertRC(rc);
3734 }
3735 return VINF_SUCCESS;
3736 }
3737 }
3738 else
3739 {
3740 /*
3741 * Crosses pages.
3742 */
3743 size_t cb2 = cb - cb1;
3744 uint64_t fFlags1;
3745 RTGCPHYS GCPhys1;
3746 uint64_t fFlags2;
3747 RTGCPHYS GCPhys2;
3748 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3749 if (RT_SUCCESS(rc))
3750 {
3751 rc = PGMGstGetPage(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3752 if (RT_SUCCESS(rc))
3753 {
3754 /** @todo we should check reserved bits ... */
3755 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3756 PGMPAGEMAPLOCK PgMpLck;
3757 void const *pvSrc1;
3758 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3759 switch (rc)
3760 {
3761 case VINF_SUCCESS:
3762 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3763 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3764 break;
3765 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3766 memset(pvDst, 0xff, cb1);
3767 break;
3768 default:
3769 Assert(RT_FAILURE_NP(rc));
3770 return rc;
3771 }
3772
3773 void const *pvSrc2;
3774 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3775 switch (rc)
3776 {
3777 case VINF_SUCCESS:
3778 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3779 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3780 break;
3781 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3782 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3783 break;
3784 default:
3785 Assert(RT_FAILURE_NP(rc));
3786 return rc;
3787 }
3788
3789 if (!(fFlags1 & X86_PTE_A))
3790 {
3791 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3792 AssertRC(rc);
3793 }
3794 if (!(fFlags2 & X86_PTE_A))
3795 {
3796 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3797 AssertRC(rc);
3798 }
3799 return VINF_SUCCESS;
3800 }
3801 }
3802 }
3803
3804 /*
3805 * Raise a #PF.
3806 */
3807 uint32_t uErr;
3808
3809 /* Get the current privilege level. */
3810 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3811 switch (rc)
3812 {
3813 case VINF_SUCCESS:
3814 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3815 break;
3816
3817 case VERR_PAGE_NOT_PRESENT:
3818 case VERR_PAGE_TABLE_NOT_PRESENT:
3819 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3820 break;
3821
3822 default:
3823 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3824 return rc;
3825 }
3826 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3827 rc = TRPMAssertXcptPF(pVCpu, GCPtrSrc, uErr);
3828 if (RT_SUCCESS(rc))
3829 return VINF_EM_RAW_GUEST_TRAP;
3830 return rc;
3831}
3832
3833
3834/**
3835 * Performs a read of guest virtual memory for instruction emulation.
3836 *
3837 * This will check permissions, raise exceptions and update the access bits.
3838 *
3839 * The current implementation will bypass all access handlers. It may later be
3840 * changed to at least respect MMIO.
3841 *
3842 *
3843 * @returns VBox status code suitable to scheduling.
3844 * @retval VINF_SUCCESS if the read was performed successfully.
3845 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3846 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3847 *
3848 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3849 * @param pCtxCore The context core.
3850 * @param pvDst Where to put the bytes we've read.
3851 * @param GCPtrSrc The source address.
3852 * @param cb The number of bytes to read. Not more than a page.
3853 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3854 * an appropriate error status will be returned (no
3855 * informational at all).
3856 *
3857 *
3858 * @remarks Takes the PGM lock.
3859 * @remarks A page fault on the 2nd page of the access will be raised without
3860 * writing the bits on the first page since we're ASSUMING that the
3861 * caller is emulating an instruction access.
3862 * @remarks This function will dynamically map physical pages in GC. This may
3863 * unmap mappings done by the caller. Be careful!
3864 */
3865VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3866 bool fRaiseTrap)
3867{
3868 NOREF(pCtxCore);
3869 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3870 Assert(cb <= PAGE_SIZE);
3871 VMCPU_ASSERT_EMT(pVCpu);
3872
3873 /*
3874 * 1. Translate virtual to physical. This may fault.
3875 * 2. Map the physical address.
3876 * 3. Do the read operation.
3877 * 4. Set access bits if required.
3878 */
3879 int rc;
3880 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3881 if (cb <= cb1)
3882 {
3883 /*
3884 * Not crossing pages.
3885 */
3886 RTGCPHYS GCPhys;
3887 uint64_t fFlags;
3888 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3889 if (RT_SUCCESS(rc))
3890 {
3891 if (1) /** @todo we should check reserved bits ... */
3892 {
3893 const void *pvSrc;
3894 PGMPAGEMAPLOCK Lock;
3895 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3896 switch (rc)
3897 {
3898 case VINF_SUCCESS:
3899 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3900 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3901 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3902 PGMPhysReleasePageMappingLock(pVM, &Lock);
3903 break;
3904 case VERR_PGM_PHYS_PAGE_RESERVED:
3905 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3906 memset(pvDst, 0xff, cb);
3907 break;
3908 default:
3909 AssertMsgFailed(("%Rrc\n", rc));
3910 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3911 return rc;
3912 }
3913
3914 if (!(fFlags & X86_PTE_A))
3915 {
3916 /** @todo access bit emulation isn't 100% correct. */
3917 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3918 AssertRC(rc);
3919 }
3920 return VINF_SUCCESS;
3921 }
3922 }
3923 }
3924 else
3925 {
3926 /*
3927 * Crosses pages.
3928 */
3929 size_t cb2 = cb - cb1;
3930 uint64_t fFlags1;
3931 RTGCPHYS GCPhys1;
3932 uint64_t fFlags2;
3933 RTGCPHYS GCPhys2;
3934 rc = PGMGstGetPage(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3935 if (RT_SUCCESS(rc))
3936 {
3937 rc = PGMGstGetPage(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3938 if (RT_SUCCESS(rc))
3939 {
3940 if (1) /** @todo we should check reserved bits ... */
3941 {
3942 const void *pvSrc;
3943 PGMPAGEMAPLOCK Lock;
3944 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3945 switch (rc)
3946 {
3947 case VINF_SUCCESS:
3948 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3949 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3950 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3951 PGMPhysReleasePageMappingLock(pVM, &Lock);
3952 break;
3953 case VERR_PGM_PHYS_PAGE_RESERVED:
3954 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3955 memset(pvDst, 0xff, cb1);
3956 break;
3957 default:
3958 AssertMsgFailed(("%Rrc\n", rc));
3959 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3960 return rc;
3961 }
3962
3963 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3964 switch (rc)
3965 {
3966 case VINF_SUCCESS:
3967 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3968 PGMPhysReleasePageMappingLock(pVM, &Lock);
3969 break;
3970 case VERR_PGM_PHYS_PAGE_RESERVED:
3971 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3972 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3973 break;
3974 default:
3975 AssertMsgFailed(("%Rrc\n", rc));
3976 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3977 return rc;
3978 }
3979
3980 if (!(fFlags1 & X86_PTE_A))
3981 {
3982 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3983 AssertRC(rc);
3984 }
3985 if (!(fFlags2 & X86_PTE_A))
3986 {
3987 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3988 AssertRC(rc);
3989 }
3990 return VINF_SUCCESS;
3991 }
3992 /* sort out which page */
3993 }
3994 else
3995 GCPtrSrc += cb1; /* fault on 2nd page */
3996 }
3997 }
3998
3999 /*
4000 * Raise a #PF if we're allowed to do that.
4001 */
4002 /* Calc the error bits. */
4003 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
4004 uint32_t uErr;
4005 switch (rc)
4006 {
4007 case VINF_SUCCESS:
4008 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
4009 rc = VERR_ACCESS_DENIED;
4010 break;
4011
4012 case VERR_PAGE_NOT_PRESENT:
4013 case VERR_PAGE_TABLE_NOT_PRESENT:
4014 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
4015 break;
4016
4017 default:
4018 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
4019 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4020 return rc;
4021 }
4022 if (fRaiseTrap)
4023 {
4024 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
4025 rc = TRPMAssertXcptPF(pVCpu, GCPtrSrc, uErr);
4026 if (RT_SUCCESS(rc))
4027 return VINF_EM_RAW_GUEST_TRAP;
4028 return rc;
4029 }
4030 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
4031 return rc;
4032}
4033
4034
4035/**
4036 * Performs a write to guest virtual memory for instruction emulation.
4037 *
4038 * This will check permissions, raise exceptions and update the dirty and access
4039 * bits.
4040 *
4041 * @returns VBox status code suitable to scheduling.
4042 * @retval VINF_SUCCESS if the read was performed successfully.
4043 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
4044 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
4045 *
4046 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4047 * @param pCtxCore The context core.
4048 * @param GCPtrDst The destination address.
4049 * @param pvSrc What to write.
4050 * @param cb The number of bytes to write. Not more than a page.
4051 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
4052 * an appropriate error status will be returned (no
4053 * informational at all).
4054 *
4055 * @remarks Takes the PGM lock.
4056 * @remarks A page fault on the 2nd page of the access will be raised without
4057 * writing the bits on the first page since we're ASSUMING that the
4058 * caller is emulating an instruction access.
4059 * @remarks This function will dynamically map physical pages in GC. This may
4060 * unmap mappings done by the caller. Be careful!
4061 */
4062VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
4063 size_t cb, bool fRaiseTrap)
4064{
4065 NOREF(pCtxCore);
4066 Assert(cb <= PAGE_SIZE);
4067 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4068 VMCPU_ASSERT_EMT(pVCpu);
4069
4070 /*
4071 * 1. Translate virtual to physical. This may fault.
4072 * 2. Map the physical address.
4073 * 3. Do the write operation.
4074 * 4. Set access bits if required.
4075 */
4076 /** @todo Since this method is frequently used by EMInterpret or IOM
4077 * upon a write fault to an write access monitored page, we can
4078 * reuse the guest page table walking from the \#PF code. */
4079 int rc;
4080 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
4081 if (cb <= cb1)
4082 {
4083 /*
4084 * Not crossing pages.
4085 */
4086 RTGCPHYS GCPhys;
4087 uint64_t fFlags;
4088 rc = PGMGstGetPage(pVCpu, GCPtrDst, &fFlags, &GCPhys);
4089 if (RT_SUCCESS(rc))
4090 {
4091 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
4092 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
4093 && CPUMGetGuestCPL(pVCpu) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
4094 {
4095 void *pvDst;
4096 PGMPAGEMAPLOCK Lock;
4097 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
4098 switch (rc)
4099 {
4100 case VINF_SUCCESS:
4101 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
4102 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
4103 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
4104 PGMPhysReleasePageMappingLock(pVM, &Lock);
4105 break;
4106 case VERR_PGM_PHYS_PAGE_RESERVED:
4107 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4108 /* bit bucket */
4109 break;
4110 default:
4111 AssertMsgFailed(("%Rrc\n", rc));
4112 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4113 return rc;
4114 }
4115
4116 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
4117 {
4118 /** @todo dirty & access bit emulation isn't 100% correct. */
4119 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
4120 AssertRC(rc);
4121 }
4122 return VINF_SUCCESS;
4123 }
4124 rc = VERR_ACCESS_DENIED;
4125 }
4126 }
4127 else
4128 {
4129 /*
4130 * Crosses pages.
4131 */
4132 size_t cb2 = cb - cb1;
4133 uint64_t fFlags1;
4134 RTGCPHYS GCPhys1;
4135 uint64_t fFlags2;
4136 RTGCPHYS GCPhys2;
4137 rc = PGMGstGetPage(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
4138 if (RT_SUCCESS(rc))
4139 {
4140 rc = PGMGstGetPage(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
4141 if (RT_SUCCESS(rc))
4142 {
4143 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
4144 && (fFlags2 & X86_PTE_RW))
4145 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
4146 && CPUMGetGuestCPL(pVCpu) <= 2) )
4147 {
4148 void *pvDst;
4149 PGMPAGEMAPLOCK Lock;
4150 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
4151 switch (rc)
4152 {
4153 case VINF_SUCCESS:
4154 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
4155 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
4156 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
4157 PGMPhysReleasePageMappingLock(pVM, &Lock);
4158 break;
4159 case VERR_PGM_PHYS_PAGE_RESERVED:
4160 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4161 /* bit bucket */
4162 break;
4163 default:
4164 AssertMsgFailed(("%Rrc\n", rc));
4165 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4166 return rc;
4167 }
4168
4169 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
4170 switch (rc)
4171 {
4172 case VINF_SUCCESS:
4173 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
4174 PGMPhysReleasePageMappingLock(pVM, &Lock);
4175 break;
4176 case VERR_PGM_PHYS_PAGE_RESERVED:
4177 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4178 /* bit bucket */
4179 break;
4180 default:
4181 AssertMsgFailed(("%Rrc\n", rc));
4182 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4183 return rc;
4184 }
4185
4186 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
4187 {
4188 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
4189 AssertRC(rc);
4190 }
4191 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
4192 {
4193 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
4194 AssertRC(rc);
4195 }
4196 return VINF_SUCCESS;
4197 }
4198 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
4199 GCPtrDst += cb1; /* fault on the 2nd page. */
4200 rc = VERR_ACCESS_DENIED;
4201 }
4202 else
4203 GCPtrDst += cb1; /* fault on the 2nd page. */
4204 }
4205 }
4206
4207 /*
4208 * Raise a #PF if we're allowed to do that.
4209 */
4210 /* Calc the error bits. */
4211 uint32_t uErr;
4212 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
4213 switch (rc)
4214 {
4215 case VINF_SUCCESS:
4216 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
4217 rc = VERR_ACCESS_DENIED;
4218 break;
4219
4220 case VERR_ACCESS_DENIED:
4221 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
4222 break;
4223
4224 case VERR_PAGE_NOT_PRESENT:
4225 case VERR_PAGE_TABLE_NOT_PRESENT:
4226 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
4227 break;
4228
4229 default:
4230 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
4231 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4232 return rc;
4233 }
4234 if (fRaiseTrap)
4235 {
4236 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
4237 rc = TRPMAssertXcptPF(pVCpu, GCPtrDst, uErr);
4238 if (RT_SUCCESS(rc))
4239 return VINF_EM_RAW_GUEST_TRAP;
4240 return rc;
4241 }
4242 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
4243 return rc;
4244}
4245
4246
4247/**
4248 * Return the page type of the specified physical address.
4249 *
4250 * @returns The page type.
4251 * @param pVM The cross context VM structure.
4252 * @param GCPhys Guest physical address
4253 */
4254VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
4255{
4256 pgmLock(pVM);
4257 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
4258 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
4259 pgmUnlock(pVM);
4260
4261 return enmPgType;
4262}
4263
4264
4265/**
4266 * Converts a GC physical address to a HC ring-3 pointer, with some
4267 * additional checks.
4268 *
4269 * @returns VBox status code (no informational statuses).
4270 *
4271 * @param pVM The cross context VM structure.
4272 * @param pVCpu The cross context virtual CPU structure of the
4273 * calling EMT.
4274 * @param GCPhys The GC physical address to convert. This API mask
4275 * the A20 line when necessary.
4276 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
4277 * be done while holding the PGM lock.
4278 * @param ppb Where to store the pointer corresponding to GCPhys
4279 * on success.
4280 * @param pfTlb The TLB flags and revision. We only add stuff.
4281 *
4282 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
4283 * PGMPhysIemGCPhys2Ptr.
4284 *
4285 * @thread EMT(pVCpu).
4286 */
4287VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
4288#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4289 R3PTRTYPE(uint8_t *) *ppb,
4290#else
4291 R3R0PTRTYPE(uint8_t *) *ppb,
4292#endif
4293 uint64_t *pfTlb)
4294{
4295 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
4296 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
4297
4298 pgmLock(pVM);
4299
4300 PPGMRAMRANGE pRam;
4301 PPGMPAGE pPage;
4302 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4303 if (RT_SUCCESS(rc))
4304 {
4305 if (!PGM_PAGE_IS_BALLOONED(pPage))
4306 {
4307 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4308 {
4309 if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
4310 {
4311 /*
4312 * No access handler.
4313 */
4314 switch (PGM_PAGE_GET_STATE(pPage))
4315 {
4316 case PGM_PAGE_STATE_ALLOCATED:
4317 *pfTlb |= *puTlbPhysRev;
4318 break;
4319 case PGM_PAGE_STATE_BALLOONED:
4320 AssertFailed();
4321 RT_FALL_THRU();
4322 case PGM_PAGE_STATE_ZERO:
4323 case PGM_PAGE_STATE_SHARED:
4324 case PGM_PAGE_STATE_WRITE_MONITORED:
4325 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
4326 break;
4327 }
4328#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4329 *pfTlb |= PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4330 *ppb = NULL;
4331#else
4332 PPGMPAGEMAPTLBE pTlbe;
4333 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4334 AssertLogRelRCReturn(rc, rc);
4335 *ppb = (uint8_t *)pTlbe->pv;
4336#endif
4337 }
4338 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
4339 {
4340 /*
4341 * MMIO or similar all access handler: Catch all access.
4342 */
4343 *pfTlb |= *puTlbPhysRev
4344 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4345 *ppb = NULL;
4346 }
4347 else
4348 {
4349 /*
4350 * Write access handler: Catch write accesses if active.
4351 */
4352 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
4353 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
4354 else
4355 switch (PGM_PAGE_GET_STATE(pPage))
4356 {
4357 case PGM_PAGE_STATE_ALLOCATED:
4358 *pfTlb |= *puTlbPhysRev;
4359 break;
4360 case PGM_PAGE_STATE_BALLOONED:
4361 AssertFailed();
4362 RT_FALL_THRU();
4363 case PGM_PAGE_STATE_ZERO:
4364 case PGM_PAGE_STATE_SHARED:
4365 case PGM_PAGE_STATE_WRITE_MONITORED:
4366 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
4367 break;
4368 }
4369#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4370 *pfTlb |= PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4371 *ppb = NULL;
4372#else
4373 PPGMPAGEMAPTLBE pTlbe;
4374 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4375 AssertLogRelRCReturn(rc, rc);
4376 *ppb = (uint8_t *)pTlbe->pv;
4377#endif
4378 }
4379 }
4380 else
4381 {
4382 /* Alias MMIO: For now, we catch all access. */
4383 *pfTlb |= *puTlbPhysRev
4384 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4385 *ppb = NULL;
4386 }
4387 }
4388 else
4389 {
4390 /* Ballooned: Shouldn't get here, but we read zero page via PGMPhysRead and writes goes to /dev/null. */
4391 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4392 *ppb = NULL;
4393 }
4394 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 pPage=%R[pgmpage]\n", GCPhys, *ppb, *pfTlb, pPage));
4395 }
4396 else
4397 {
4398 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
4399 *ppb = NULL;
4400 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
4401 }
4402
4403 pgmUnlock(pVM);
4404 return VINF_SUCCESS;
4405}
4406
4407
4408/**
4409 * Converts a GC physical address to a HC ring-3 pointer, with some
4410 * additional checks.
4411 *
4412 * @returns VBox status code (no informational statuses).
4413 * @retval VINF_SUCCESS on success.
4414 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4415 * access handler of some kind.
4416 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4417 * accesses or is odd in any way.
4418 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4419 *
4420 * @param pVM The cross context VM structure.
4421 * @param pVCpu The cross context virtual CPU structure of the
4422 * calling EMT.
4423 * @param GCPhys The GC physical address to convert. This API mask
4424 * the A20 line when necessary.
4425 * @param fWritable Whether write access is required.
4426 * @param fByPassHandlers Whether to bypass access handlers.
4427 * @param ppv Where to store the pointer corresponding to GCPhys
4428 * on success.
4429 * @param pLock
4430 *
4431 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
4432 * @thread EMT(pVCpu).
4433 */
4434VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
4435 void **ppv, PPGMPAGEMAPLOCK pLock)
4436{
4437 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
4438
4439 pgmLock(pVM);
4440
4441 PPGMRAMRANGE pRam;
4442 PPGMPAGE pPage;
4443 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4444 if (RT_SUCCESS(rc))
4445 {
4446 if (PGM_PAGE_IS_BALLOONED(pPage))
4447 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4448 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4449 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4450 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4451 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4452 rc = VINF_SUCCESS;
4453 else
4454 {
4455 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4456 {
4457 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4458 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4459 }
4460 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4461 {
4462 Assert(!fByPassHandlers);
4463 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4464 }
4465 }
4466 if (RT_SUCCESS(rc))
4467 {
4468 int rc2;
4469
4470 /* Make sure what we return is writable. */
4471 if (fWritable)
4472 switch (PGM_PAGE_GET_STATE(pPage))
4473 {
4474 case PGM_PAGE_STATE_ALLOCATED:
4475 break;
4476 case PGM_PAGE_STATE_BALLOONED:
4477 AssertFailed();
4478 break;
4479 case PGM_PAGE_STATE_ZERO:
4480 case PGM_PAGE_STATE_SHARED:
4481 case PGM_PAGE_STATE_WRITE_MONITORED:
4482 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4483 AssertLogRelRCReturn(rc2, rc2);
4484 break;
4485 }
4486
4487#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4488 void *pv;
4489 rc = pgmRZDynMapHCPageInlined(pVCpu,
4490 PGM_PAGE_GET_HCPHYS(pPage),
4491 &pv
4492 RTLOG_COMMA_SRC_POS);
4493 if (RT_FAILURE(rc))
4494 return rc;
4495 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4496 pLock->pvPage = pv;
4497 pLock->pVCpu = pVCpu;
4498
4499#else
4500 /* Get a ring-3 mapping of the address. */
4501 PPGMPAGEMAPTLBE pTlbe;
4502 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4503 AssertLogRelRCReturn(rc2, rc2);
4504
4505 /* Lock it and calculate the address. */
4506 if (fWritable)
4507 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
4508 else
4509 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
4510 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4511#endif
4512
4513 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4514 }
4515 else
4516 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4517
4518 /* else: handler catching all access, no pointer returned. */
4519 }
4520 else
4521 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4522
4523 pgmUnlock(pVM);
4524 return rc;
4525}
4526
4527
4528/**
4529 * Checks if the give GCPhys page requires special handling for the given access
4530 * because it's MMIO or otherwise monitored.
4531 *
4532 * @returns VBox status code (no informational statuses).
4533 * @retval VINF_SUCCESS on success.
4534 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4535 * access handler of some kind.
4536 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4537 * accesses or is odd in any way.
4538 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4539 *
4540 * @param pVM The cross context VM structure.
4541 * @param GCPhys The GC physical address to convert. Since this is
4542 * only used for filling the REM TLB, the A20 mask must
4543 * be applied before calling this API.
4544 * @param fWritable Whether write access is required.
4545 * @param fByPassHandlers Whether to bypass access handlers.
4546 *
4547 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
4548 * a stop gap thing that should be removed once there is a better TLB
4549 * for virtual address accesses.
4550 */
4551VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
4552{
4553 pgmLock(pVM);
4554 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4555
4556 PPGMRAMRANGE pRam;
4557 PPGMPAGE pPage;
4558 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4559 if (RT_SUCCESS(rc))
4560 {
4561 if (PGM_PAGE_IS_BALLOONED(pPage))
4562 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4563 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4564 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4565 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4566 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4567 rc = VINF_SUCCESS;
4568 else
4569 {
4570 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4571 {
4572 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4573 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4574 }
4575 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4576 {
4577 Assert(!fByPassHandlers);
4578 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4579 }
4580 }
4581 }
4582
4583 pgmUnlock(pVM);
4584 return rc;
4585}
4586
4587
4588/**
4589 * Interface used by NEM to check what to do on a memory access exit.
4590 *
4591 * @returns VBox status code.
4592 * @param pVM The cross context VM structure.
4593 * @param pVCpu The cross context per virtual CPU structure.
4594 * Optional.
4595 * @param GCPhys The guest physical address.
4596 * @param fMakeWritable Whether to try make the page writable or not. If it
4597 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
4598 * be returned and the return code will be unaffected
4599 * @param pInfo Where to return the page information. This is
4600 * initialized even on failure.
4601 * @param pfnChecker Page in-sync checker callback. Optional.
4602 * @param pvUser User argument to pass to pfnChecker.
4603 */
4604VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable, PPGMPHYSNEMPAGEINFO pInfo,
4605 PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
4606{
4607 pgmLock(pVM);
4608
4609 PPGMPAGE pPage;
4610 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
4611 if (RT_SUCCESS(rc))
4612 {
4613 /* Try make it writable if requested. */
4614 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
4615 if (fMakeWritable)
4616 switch (PGM_PAGE_GET_STATE(pPage))
4617 {
4618 case PGM_PAGE_STATE_SHARED:
4619 case PGM_PAGE_STATE_WRITE_MONITORED:
4620 case PGM_PAGE_STATE_ZERO:
4621 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
4622 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
4623 rc = VINF_SUCCESS;
4624 break;
4625 }
4626
4627 /* Fill in the info. */
4628 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
4629 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
4630 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
4631 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
4632 pInfo->enmType = enmType;
4633 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
4634 switch (PGM_PAGE_GET_STATE(pPage))
4635 {
4636 case PGM_PAGE_STATE_ALLOCATED:
4637 pInfo->fZeroPage = 0;
4638 break;
4639
4640 case PGM_PAGE_STATE_ZERO:
4641 pInfo->fZeroPage = 1;
4642 break;
4643
4644 case PGM_PAGE_STATE_WRITE_MONITORED:
4645 pInfo->fZeroPage = 0;
4646 break;
4647
4648 case PGM_PAGE_STATE_SHARED:
4649 pInfo->fZeroPage = 0;
4650 break;
4651
4652 case PGM_PAGE_STATE_BALLOONED:
4653 pInfo->fZeroPage = 1;
4654 break;
4655
4656 default:
4657 pInfo->fZeroPage = 1;
4658 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
4659 }
4660
4661 /* Call the checker and update NEM state. */
4662 if (pfnChecker)
4663 {
4664 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
4665 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
4666 }
4667
4668 /* Done. */
4669 pgmUnlock(pVM);
4670 }
4671 else
4672 {
4673 pgmUnlock(pVM);
4674
4675 pInfo->HCPhys = NIL_RTHCPHYS;
4676 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
4677 pInfo->u2NemState = 0;
4678 pInfo->fHasHandlers = 0;
4679 pInfo->fZeroPage = 0;
4680 pInfo->enmType = PGMPAGETYPE_INVALID;
4681 }
4682
4683 return rc;
4684}
4685
4686
4687/**
4688 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
4689 * or higher.
4690 *
4691 * @returns VBox status code from callback.
4692 * @param pVM The cross context VM structure.
4693 * @param pVCpu The cross context per CPU structure. This is
4694 * optional as its only for passing to callback.
4695 * @param uMinState The minimum NEM state value to call on.
4696 * @param pfnCallback The callback function.
4697 * @param pvUser User argument for the callback.
4698 */
4699VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
4700 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
4701{
4702 /*
4703 * Just brute force this problem.
4704 */
4705 pgmLock(pVM);
4706 int rc = VINF_SUCCESS;
4707 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
4708 {
4709 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
4710 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4711 {
4712 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
4713 if (u2State < uMinState)
4714 { /* likely */ }
4715 else
4716 {
4717 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
4718 if (RT_SUCCESS(rc))
4719 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
4720 else
4721 break;
4722 }
4723 }
4724 }
4725 pgmUnlock(pVM);
4726
4727 return rc;
4728}
4729
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette