VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 47786

Last change on this file since 47786 was 47786, checked in by vboxsync, 11 years ago

PGM: Added a new page type for the VT-x APIC access page MMIO alias instead of abusing the MMIO2 aliasing. There are important differences, we can safely access the MMIO2 page when aliased and save time doing so, while the alias created by IOMMMIOMapMMIOHCPage must not be accessed outside the VT-x execution AFAIK.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 148.5 KB
Line 
1/* $Id: PGMAllPhys.cpp 47786 2013-08-16 08:59:32Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM_PHYS
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/trpm.h>
24#include <VBox/vmm/vmm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/em.h>
27#ifdef VBOX_WITH_REM
28# include <VBox/vmm/rem.h>
29#endif
30#include "PGMInternal.h"
31#include <VBox/vmm/vm.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50
51
52#ifndef IN_RING3
53
54/**
55 * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
56 * This simply pushes everything to the HC handler.
57 *
58 * @returns VBox status code (appropriate for trap handling and GC return).
59 * @param pVM Pointer to the VM.
60 * @param uErrorCode CPU Error code.
61 * @param pRegFrame Trap register frame.
62 * @param pvFault The fault address (cr2).
63 * @param GCPhysFault The GC physical address corresponding to pvFault.
64 * @param pvUser User argument.
65 */
66VMMDECL(int) pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
67{
68 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
69 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
70}
71
72
73/**
74 * \#PF Handler callback for Guest ROM range write access.
75 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
76 *
77 * @returns VBox status code (appropriate for trap handling and GC return).
78 * @param pVM Pointer to the VM.
79 * @param uErrorCode CPU Error code.
80 * @param pRegFrame Trap register frame.
81 * @param pvFault The fault address (cr2).
82 * @param GCPhysFault The GC physical address corresponding to pvFault.
83 * @param pvUser User argument. Pointer to the ROM range structure.
84 */
85VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
86{
87 int rc;
88 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
89 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
90 PVMCPU pVCpu = VMMGetCpu(pVM);
91 NOREF(uErrorCode); NOREF(pvFault);
92
93 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
94
95 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
96 switch (pRom->aPages[iPage].enmProt)
97 {
98 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
99 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
100 {
101 /*
102 * If it's a simple instruction which doesn't change the cpu state
103 * we will simply skip it. Otherwise we'll have to defer it to REM.
104 */
105 uint32_t cbOp;
106 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
107 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
108 if ( RT_SUCCESS(rc)
109 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
110 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
111 {
112 switch (pDis->bOpCode)
113 {
114 /** @todo Find other instructions we can safely skip, possibly
115 * adding this kind of detection to DIS or EM. */
116 case OP_MOV:
117 pRegFrame->rip += cbOp;
118 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
119 return VINF_SUCCESS;
120 }
121 }
122 break;
123 }
124
125 case PGMROMPROT_READ_RAM_WRITE_RAM:
126 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
127 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
128 AssertRC(rc);
129 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
130
131 case PGMROMPROT_READ_ROM_WRITE_RAM:
132 /* Handle it in ring-3 because it's *way* easier there. */
133 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
134 break;
135
136 default:
137 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
138 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
139 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
140 }
141
142 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
143 return VINF_EM_RAW_EMULATE_INSTR;
144}
145
146#endif /* IN_RING3 */
147
148/**
149 * Invalidates the RAM range TLBs.
150 *
151 * @param pVM Pointer to the VM.
152 */
153void pgmPhysInvalidRamRangeTlbs(PVM pVM)
154{
155 pgmLock(pVM);
156 for (uint32_t i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
157 {
158 pVM->pgm.s.apRamRangesTlbR3[i] = NIL_RTR3PTR;
159 pVM->pgm.s.apRamRangesTlbR0[i] = NIL_RTR0PTR;
160 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
161 }
162 pgmUnlock(pVM);
163}
164
165
166/**
167 * Tests if a value of type RTGCPHYS is negative if the type had been signed
168 * instead of unsigned.
169 *
170 * @returns @c true if negative, @c false if positive or zero.
171 * @param a_GCPhys The value to test.
172 * @todo Move me to iprt/types.h.
173 */
174#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
175
176
177/**
178 * Slow worker for pgmPhysGetRange.
179 *
180 * @copydoc pgmPhysGetRange
181 */
182PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
183{
184 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
185
186 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
187 while (pRam)
188 {
189 RTGCPHYS off = GCPhys - pRam->GCPhys;
190 if (off < pRam->cb)
191 {
192 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
193 return pRam;
194 }
195 if (RTGCPHYS_IS_NEGATIVE(off))
196 pRam = pRam->CTX_SUFF(pLeft);
197 else
198 pRam = pRam->CTX_SUFF(pRight);
199 }
200 return NULL;
201}
202
203
204/**
205 * Slow worker for pgmPhysGetRangeAtOrAbove.
206 *
207 * @copydoc pgmPhysGetRangeAtOrAbove
208 */
209PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
210{
211 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
212
213 PPGMRAMRANGE pLastLeft = NULL;
214 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
215 while (pRam)
216 {
217 RTGCPHYS off = GCPhys - pRam->GCPhys;
218 if (off < pRam->cb)
219 {
220 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
221 return pRam;
222 }
223 if (RTGCPHYS_IS_NEGATIVE(off))
224 {
225 pLastLeft = pRam;
226 pRam = pRam->CTX_SUFF(pLeft);
227 }
228 else
229 pRam = pRam->CTX_SUFF(pRight);
230 }
231 return pLastLeft;
232}
233
234
235/**
236 * Slow worker for pgmPhysGetPage.
237 *
238 * @copydoc pgmPhysGetPage
239 */
240PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
241{
242 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
243
244 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
245 while (pRam)
246 {
247 RTGCPHYS off = GCPhys - pRam->GCPhys;
248 if (off < pRam->cb)
249 {
250 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
251 return &pRam->aPages[off >> PAGE_SHIFT];
252 }
253
254 if (RTGCPHYS_IS_NEGATIVE(off))
255 pRam = pRam->CTX_SUFF(pLeft);
256 else
257 pRam = pRam->CTX_SUFF(pRight);
258 }
259 return NULL;
260}
261
262
263/**
264 * Slow worker for pgmPhysGetPageEx.
265 *
266 * @copydoc pgmPhysGetPageEx
267 */
268int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
269{
270 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
271
272 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
273 while (pRam)
274 {
275 RTGCPHYS off = GCPhys - pRam->GCPhys;
276 if (off < pRam->cb)
277 {
278 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
279 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
280 return VINF_SUCCESS;
281 }
282
283 if (RTGCPHYS_IS_NEGATIVE(off))
284 pRam = pRam->CTX_SUFF(pLeft);
285 else
286 pRam = pRam->CTX_SUFF(pRight);
287 }
288
289 *ppPage = NULL;
290 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
291}
292
293
294/**
295 * Slow worker for pgmPhysGetPageAndRangeEx.
296 *
297 * @copydoc pgmPhysGetPageAndRangeEx
298 */
299int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
300{
301 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
302
303 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
304 while (pRam)
305 {
306 RTGCPHYS off = GCPhys - pRam->GCPhys;
307 if (off < pRam->cb)
308 {
309 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
310 *ppRam = pRam;
311 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
312 return VINF_SUCCESS;
313 }
314
315 if (RTGCPHYS_IS_NEGATIVE(off))
316 pRam = pRam->CTX_SUFF(pLeft);
317 else
318 pRam = pRam->CTX_SUFF(pRight);
319 }
320
321 *ppRam = NULL;
322 *ppPage = NULL;
323 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
324}
325
326
327/**
328 * Checks if Address Gate 20 is enabled or not.
329 *
330 * @returns true if enabled.
331 * @returns false if disabled.
332 * @param pVCpu Pointer to the VMCPU.
333 */
334VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
335{
336 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
337 return pVCpu->pgm.s.fA20Enabled;
338}
339
340
341/**
342 * Validates a GC physical address.
343 *
344 * @returns true if valid.
345 * @returns false if invalid.
346 * @param pVM Pointer to the VM.
347 * @param GCPhys The physical address to validate.
348 */
349VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
350{
351 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
352 return pPage != NULL;
353}
354
355
356/**
357 * Checks if a GC physical address is a normal page,
358 * i.e. not ROM, MMIO or reserved.
359 *
360 * @returns true if normal.
361 * @returns false if invalid, ROM, MMIO or reserved page.
362 * @param pVM Pointer to the VM.
363 * @param GCPhys The physical address to check.
364 */
365VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
366{
367 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
368 return pPage
369 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
370}
371
372
373/**
374 * Converts a GC physical address to a HC physical address.
375 *
376 * @returns VINF_SUCCESS on success.
377 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
378 * page but has no physical backing.
379 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
380 * GC physical address.
381 *
382 * @param pVM Pointer to the VM.
383 * @param GCPhys The GC physical address to convert.
384 * @param pHCPhys Where to store the HC physical address on success.
385 */
386VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
387{
388 pgmLock(pVM);
389 PPGMPAGE pPage;
390 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
391 if (RT_SUCCESS(rc))
392 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
393 pgmUnlock(pVM);
394 return rc;
395}
396
397
398/**
399 * Invalidates all page mapping TLBs.
400 *
401 * @param pVM Pointer to the VM.
402 */
403void pgmPhysInvalidatePageMapTLB(PVM pVM)
404{
405 pgmLock(pVM);
406 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
407
408 /* Clear the shared R0/R3 TLB completely. */
409 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
410 {
411 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
412 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
413 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
414 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
415 }
416
417 /** @todo clear the RC TLB whenever we add it. */
418
419 pgmUnlock(pVM);
420}
421
422
423/**
424 * Invalidates a page mapping TLB entry
425 *
426 * @param pVM Pointer to the VM.
427 * @param GCPhys GCPhys entry to flush
428 */
429void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
430{
431 PGM_LOCK_ASSERT_OWNER(pVM);
432
433 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
434
435#ifdef IN_RC
436 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
437 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
438 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
439 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
440 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
441#else
442 /* Clear the shared R0/R3 TLB entry. */
443 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
444 pTlbe->GCPhys = NIL_RTGCPHYS;
445 pTlbe->pPage = 0;
446 pTlbe->pMap = 0;
447 pTlbe->pv = 0;
448#endif
449
450 /** @todo clear the RC TLB whenever we add it. */
451}
452
453/**
454 * Makes sure that there is at least one handy page ready for use.
455 *
456 * This will also take the appropriate actions when reaching water-marks.
457 *
458 * @returns VBox status code.
459 * @retval VINF_SUCCESS on success.
460 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
461 *
462 * @param pVM Pointer to the VM.
463 *
464 * @remarks Must be called from within the PGM critical section. It may
465 * nip back to ring-3/0 in some cases.
466 */
467static int pgmPhysEnsureHandyPage(PVM pVM)
468{
469 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
470
471 /*
472 * Do we need to do anything special?
473 */
474#ifdef IN_RING3
475 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
476#else
477 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
478#endif
479 {
480 /*
481 * Allocate pages only if we're out of them, or in ring-3, almost out.
482 */
483#ifdef IN_RING3
484 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
485#else
486 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
487#endif
488 {
489 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
490 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
491#ifdef IN_RING3
492 int rc = PGMR3PhysAllocateHandyPages(pVM);
493#else
494 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
495#endif
496 if (RT_UNLIKELY(rc != VINF_SUCCESS))
497 {
498 if (RT_FAILURE(rc))
499 return rc;
500 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
501 if (!pVM->pgm.s.cHandyPages)
502 {
503 LogRel(("PGM: no more handy pages!\n"));
504 return VERR_EM_NO_MEMORY;
505 }
506 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
507 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
508#ifdef IN_RING3
509# ifdef VBOX_WITH_REM
510 REMR3NotifyFF(pVM);
511# endif
512#else
513 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
514#endif
515 }
516 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
517 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
518 ("%u\n", pVM->pgm.s.cHandyPages),
519 VERR_PGM_HANDY_PAGE_IPE);
520 }
521 else
522 {
523 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
524 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
525#ifndef IN_RING3
526 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
527 {
528 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
529 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
530 }
531#endif
532 }
533 }
534
535 return VINF_SUCCESS;
536}
537
538
539/**
540 * Replace a zero or shared page with new page that we can write to.
541 *
542 * @returns The following VBox status codes.
543 * @retval VINF_SUCCESS on success, pPage is modified.
544 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
545 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
546 *
547 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
548 *
549 * @param pVM Pointer to the VM.
550 * @param pPage The physical page tracking structure. This will
551 * be modified on success.
552 * @param GCPhys The address of the page.
553 *
554 * @remarks Must be called from within the PGM critical section. It may
555 * nip back to ring-3/0 in some cases.
556 *
557 * @remarks This function shouldn't really fail, however if it does
558 * it probably means we've screwed up the size of handy pages and/or
559 * the low-water mark. Or, that some device I/O is causing a lot of
560 * pages to be allocated while while the host is in a low-memory
561 * condition. This latter should be handled elsewhere and in a more
562 * controlled manner, it's on the @bugref{3170} todo list...
563 */
564int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
565{
566 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
567
568 /*
569 * Prereqs.
570 */
571 PGM_LOCK_ASSERT_OWNER(pVM);
572 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
573 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
574
575# ifdef PGM_WITH_LARGE_PAGES
576 /*
577 * Try allocate a large page if applicable.
578 */
579 if ( PGMIsUsingLargePages(pVM)
580 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
581 {
582 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
583 PPGMPAGE pBasePage;
584
585 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
586 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
587 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
588 {
589 rc = pgmPhysAllocLargePage(pVM, GCPhys);
590 if (rc == VINF_SUCCESS)
591 return rc;
592 }
593 /* Mark the base as type page table, so we don't check over and over again. */
594 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
595
596 /* fall back to 4KB pages. */
597 }
598# endif
599
600 /*
601 * Flush any shadow page table mappings of the page.
602 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
603 */
604 bool fFlushTLBs = false;
605 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
606 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
607
608 /*
609 * Ensure that we've got a page handy, take it and use it.
610 */
611 int rc2 = pgmPhysEnsureHandyPage(pVM);
612 if (RT_FAILURE(rc2))
613 {
614 if (fFlushTLBs)
615 PGM_INVL_ALL_VCPU_TLBS(pVM);
616 Assert(rc2 == VERR_EM_NO_MEMORY);
617 return rc2;
618 }
619 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
620 PGM_LOCK_ASSERT_OWNER(pVM);
621 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
622 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
623
624 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
625 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
626 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
627 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
628 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
629 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
630
631 /*
632 * There are one or two action to be taken the next time we allocate handy pages:
633 * - Tell the GMM (global memory manager) what the page is being used for.
634 * (Speeds up replacement operations - sharing and defragmenting.)
635 * - If the current backing is shared, it must be freed.
636 */
637 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
638 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
639
640 void const *pvSharedPage = NULL;
641 if (PGM_PAGE_IS_SHARED(pPage))
642 {
643 /* Mark this shared page for freeing/dereferencing. */
644 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
645 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
646
647 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
648 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
649 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
650 pVM->pgm.s.cSharedPages--;
651
652 /* Grab the address of the page so we can make a copy later on. (safe) */
653 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
654 AssertRC(rc);
655 }
656 else
657 {
658 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
659 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
660 pVM->pgm.s.cZeroPages--;
661 }
662
663 /*
664 * Do the PGMPAGE modifications.
665 */
666 pVM->pgm.s.cPrivatePages++;
667 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
668 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
669 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
670 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
671 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
672
673 /* Copy the shared page contents to the replacement page. */
674 if (pvSharedPage)
675 {
676 /* Get the virtual address of the new page. */
677 PGMPAGEMAPLOCK PgMpLck;
678 void *pvNewPage;
679 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
680 if (RT_SUCCESS(rc))
681 {
682 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
683 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
684 }
685 }
686
687 if ( fFlushTLBs
688 && rc != VINF_PGM_GCPHYS_ALIASED)
689 PGM_INVL_ALL_VCPU_TLBS(pVM);
690 return rc;
691}
692
693#ifdef PGM_WITH_LARGE_PAGES
694
695/**
696 * Replace a 2 MB range of zero pages with new pages that we can write to.
697 *
698 * @returns The following VBox status codes.
699 * @retval VINF_SUCCESS on success, pPage is modified.
700 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
701 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
702 *
703 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
704 *
705 * @param pVM Pointer to the VM.
706 * @param GCPhys The address of the page.
707 *
708 * @remarks Must be called from within the PGM critical section. It may
709 * nip back to ring-3/0 in some cases.
710 */
711int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
712{
713 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
714 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
715
716 /*
717 * Prereqs.
718 */
719 PGM_LOCK_ASSERT_OWNER(pVM);
720 Assert(PGMIsUsingLargePages(pVM));
721
722 PPGMPAGE pFirstPage;
723 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
724 if ( RT_SUCCESS(rc)
725 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
726 {
727 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
728
729 /* Don't call this function for already allocated pages. */
730 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
731
732 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
733 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
734 {
735 /* Lazy approach: check all pages in the 2 MB range.
736 * The whole range must be ram and unallocated. */
737 GCPhys = GCPhysBase;
738 unsigned iPage;
739 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
740 {
741 PPGMPAGE pSubPage;
742 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
743 if ( RT_FAILURE(rc)
744 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
745 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
746 {
747 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
748 break;
749 }
750 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
751 GCPhys += PAGE_SIZE;
752 }
753 if (iPage != _2M/PAGE_SIZE)
754 {
755 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
756 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
757 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
758 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
759 }
760
761 /*
762 * Do the allocation.
763 */
764# ifdef IN_RING3
765 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
766# else
767 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
768# endif
769 if (RT_SUCCESS(rc))
770 {
771 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
772 pVM->pgm.s.cLargePages++;
773 return VINF_SUCCESS;
774 }
775
776 /* If we fail once, it most likely means the host's memory is too
777 fragmented; don't bother trying again. */
778 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
779 PGMSetLargePageUsage(pVM, false);
780 return rc;
781 }
782 }
783 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
784}
785
786
787/**
788 * Recheck the entire 2 MB range to see if we can use it again as a large page.
789 *
790 * @returns The following VBox status codes.
791 * @retval VINF_SUCCESS on success, the large page can be used again
792 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
793 *
794 * @param pVM Pointer to the VM.
795 * @param GCPhys The address of the page.
796 * @param pLargePage Page structure of the base page
797 */
798int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
799{
800 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
801
802 GCPhys &= X86_PDE2M_PAE_PG_MASK;
803
804 /* Check the base page. */
805 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
806 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
807 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
808 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
809 {
810 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
811 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
812 }
813
814 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
815 /* Check all remaining pages in the 2 MB range. */
816 unsigned i;
817 GCPhys += PAGE_SIZE;
818 for (i = 1; i < _2M/PAGE_SIZE; i++)
819 {
820 PPGMPAGE pPage;
821 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
822 AssertRCBreak(rc);
823
824 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
825 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
826 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
827 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
828 {
829 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
830 break;
831 }
832
833 GCPhys += PAGE_SIZE;
834 }
835 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
836
837 if (i == _2M/PAGE_SIZE)
838 {
839 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
840 pVM->pgm.s.cLargePagesDisabled--;
841 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
842 return VINF_SUCCESS;
843 }
844
845 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
846}
847
848#endif /* PGM_WITH_LARGE_PAGES */
849
850/**
851 * Deal with a write monitored page.
852 *
853 * @returns VBox strict status code.
854 *
855 * @param pVM Pointer to the VM.
856 * @param pPage The physical page tracking structure.
857 *
858 * @remarks Called from within the PGM critical section.
859 */
860void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
861{
862 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
863 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
864 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
865 Assert(pVM->pgm.s.cMonitoredPages > 0);
866 pVM->pgm.s.cMonitoredPages--;
867 pVM->pgm.s.cWrittenToPages++;
868}
869
870
871/**
872 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
873 *
874 * @returns VBox strict status code.
875 * @retval VINF_SUCCESS on success.
876 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
877 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
878 *
879 * @param pVM Pointer to the VM.
880 * @param pPage The physical page tracking structure.
881 * @param GCPhys The address of the page.
882 *
883 * @remarks Called from within the PGM critical section.
884 */
885int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
886{
887 PGM_LOCK_ASSERT_OWNER(pVM);
888 switch (PGM_PAGE_GET_STATE(pPage))
889 {
890 case PGM_PAGE_STATE_WRITE_MONITORED:
891 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
892 /* fall thru */
893 default: /* to shut up GCC */
894 case PGM_PAGE_STATE_ALLOCATED:
895 return VINF_SUCCESS;
896
897 /*
898 * Zero pages can be dummy pages for MMIO or reserved memory,
899 * so we need to check the flags before joining cause with
900 * shared page replacement.
901 */
902 case PGM_PAGE_STATE_ZERO:
903 if (PGM_PAGE_IS_MMIO(pPage))
904 return VERR_PGM_PHYS_PAGE_RESERVED;
905 /* fall thru */
906 case PGM_PAGE_STATE_SHARED:
907 return pgmPhysAllocPage(pVM, pPage, GCPhys);
908
909 /* Not allowed to write to ballooned pages. */
910 case PGM_PAGE_STATE_BALLOONED:
911 return VERR_PGM_PHYS_PAGE_BALLOONED;
912 }
913}
914
915
916/**
917 * Internal usage: Map the page specified by its GMM ID.
918 *
919 * This is similar to pgmPhysPageMap
920 *
921 * @returns VBox status code.
922 *
923 * @param pVM Pointer to the VM.
924 * @param idPage The Page ID.
925 * @param HCPhys The physical address (for RC).
926 * @param ppv Where to store the mapping address.
927 *
928 * @remarks Called from within the PGM critical section. The mapping is only
929 * valid while you are inside this section.
930 */
931int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
932{
933 /*
934 * Validation.
935 */
936 PGM_LOCK_ASSERT_OWNER(pVM);
937 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
938 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
939 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
940
941#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
942 /*
943 * Map it by HCPhys.
944 */
945 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
946
947#else
948 /*
949 * Find/make Chunk TLB entry for the mapping chunk.
950 */
951 PPGMCHUNKR3MAP pMap;
952 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
953 if (pTlbe->idChunk == idChunk)
954 {
955 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
956 pMap = pTlbe->pChunk;
957 }
958 else
959 {
960 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
961
962 /*
963 * Find the chunk, map it if necessary.
964 */
965 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
966 if (pMap)
967 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
968 else
969 {
970# ifdef IN_RING0
971 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
972 AssertRCReturn(rc, rc);
973 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
974 Assert(pMap);
975# else
976 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
977 if (RT_FAILURE(rc))
978 return rc;
979# endif
980 }
981
982 /*
983 * Enter it into the Chunk TLB.
984 */
985 pTlbe->idChunk = idChunk;
986 pTlbe->pChunk = pMap;
987 }
988
989 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
990 return VINF_SUCCESS;
991#endif
992}
993
994
995/**
996 * Maps a page into the current virtual address space so it can be accessed.
997 *
998 * @returns VBox status code.
999 * @retval VINF_SUCCESS on success.
1000 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1001 *
1002 * @param pVM Pointer to the VM.
1003 * @param pPage The physical page tracking structure.
1004 * @param GCPhys The address of the page.
1005 * @param ppMap Where to store the address of the mapping tracking structure.
1006 * @param ppv Where to store the mapping address of the page. The page
1007 * offset is masked off!
1008 *
1009 * @remarks Called from within the PGM critical section.
1010 */
1011static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1012{
1013 PGM_LOCK_ASSERT_OWNER(pVM);
1014
1015#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1016 /*
1017 * Just some sketchy GC/R0-darwin code.
1018 */
1019 *ppMap = NULL;
1020 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1021 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
1022 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1023 NOREF(GCPhys);
1024 return VINF_SUCCESS;
1025
1026#else /* IN_RING3 || IN_RING0 */
1027
1028
1029 /*
1030 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1031 */
1032 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1033 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1034 {
1035 /* Fend off the VT-x APIC access page hack. */
1036 AssertLogRelReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, VERR_PGM_MAP_MMIO2_ALIAS_MMIO);
1037
1038 /* Decode the page id to a page in a MMIO2 ram range. */
1039 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1040 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1041 AssertLogRelReturn((uint8_t)(idMmio2 - 1U)< RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1042 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1043 PPGMMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1044 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1045 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1046 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1047 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << PAGE_SHIFT);
1048 *ppMap = NULL;
1049 return VINF_SUCCESS;
1050 }
1051
1052 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1053 if (idChunk == NIL_GMM_CHUNKID)
1054 {
1055 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1056 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1057 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1058 {
1059 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1060 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1061 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1062 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1063 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1064 }
1065 else
1066 {
1067 static uint8_t s_abPlayItSafe[0x1000*2]; /* I don't dare return the zero page at the moment. */
1068 *ppv = (uint8_t *)((uintptr_t)&s_abPlayItSafe[0x1000] & ~(uintptr_t)0xfff);
1069 }
1070 *ppMap = NULL;
1071 return VINF_SUCCESS;
1072 }
1073
1074 /*
1075 * Find/make Chunk TLB entry for the mapping chunk.
1076 */
1077 PPGMCHUNKR3MAP pMap;
1078 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1079 if (pTlbe->idChunk == idChunk)
1080 {
1081 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1082 pMap = pTlbe->pChunk;
1083 AssertPtr(pMap->pv);
1084 }
1085 else
1086 {
1087 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1088
1089 /*
1090 * Find the chunk, map it if necessary.
1091 */
1092 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1093 if (pMap)
1094 {
1095 AssertPtr(pMap->pv);
1096 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1097 }
1098 else
1099 {
1100#ifdef IN_RING0
1101 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1102 AssertRCReturn(rc, rc);
1103 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1104 Assert(pMap);
1105#else
1106 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1107 if (RT_FAILURE(rc))
1108 return rc;
1109#endif
1110 AssertPtr(pMap->pv);
1111 }
1112
1113 /*
1114 * Enter it into the Chunk TLB.
1115 */
1116 pTlbe->idChunk = idChunk;
1117 pTlbe->pChunk = pMap;
1118 }
1119
1120 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1121 *ppMap = pMap;
1122 return VINF_SUCCESS;
1123#endif /* IN_RING3 */
1124}
1125
1126
1127/**
1128 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1129 *
1130 * This is typically used is paths where we cannot use the TLB methods (like ROM
1131 * pages) or where there is no point in using them since we won't get many hits.
1132 *
1133 * @returns VBox strict status code.
1134 * @retval VINF_SUCCESS on success.
1135 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1136 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1137 *
1138 * @param pVM Pointer to the VM.
1139 * @param pPage The physical page tracking structure.
1140 * @param GCPhys The address of the page.
1141 * @param ppv Where to store the mapping address of the page. The page
1142 * offset is masked off!
1143 *
1144 * @remarks Called from within the PGM critical section. The mapping is only
1145 * valid while you are inside section.
1146 */
1147int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1148{
1149 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1150 if (RT_SUCCESS(rc))
1151 {
1152 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1153 PPGMPAGEMAP pMapIgnore;
1154 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1155 if (RT_FAILURE(rc2)) /* preserve rc */
1156 rc = rc2;
1157 }
1158 return rc;
1159}
1160
1161
1162/**
1163 * Maps a page into the current virtual address space so it can be accessed for
1164 * both writing and reading.
1165 *
1166 * This is typically used is paths where we cannot use the TLB methods (like ROM
1167 * pages) or where there is no point in using them since we won't get many hits.
1168 *
1169 * @returns VBox status code.
1170 * @retval VINF_SUCCESS on success.
1171 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1172 *
1173 * @param pVM Pointer to the VM.
1174 * @param pPage The physical page tracking structure. Must be in the
1175 * allocated state.
1176 * @param GCPhys The address of the page.
1177 * @param ppv Where to store the mapping address of the page. The page
1178 * offset is masked off!
1179 *
1180 * @remarks Called from within the PGM critical section. The mapping is only
1181 * valid while you are inside section.
1182 */
1183int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1184{
1185 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1186 PPGMPAGEMAP pMapIgnore;
1187 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1188}
1189
1190
1191/**
1192 * Maps a page into the current virtual address space so it can be accessed for
1193 * reading.
1194 *
1195 * This is typically used is paths where we cannot use the TLB methods (like ROM
1196 * pages) or where there is no point in using them since we won't get many hits.
1197 *
1198 * @returns VBox status code.
1199 * @retval VINF_SUCCESS on success.
1200 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1201 *
1202 * @param pVM Pointer to the VM.
1203 * @param pPage The physical page tracking structure.
1204 * @param GCPhys The address of the page.
1205 * @param ppv Where to store the mapping address of the page. The page
1206 * offset is masked off!
1207 *
1208 * @remarks Called from within the PGM critical section. The mapping is only
1209 * valid while you are inside this section.
1210 */
1211int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1212{
1213 PPGMPAGEMAP pMapIgnore;
1214 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1215}
1216
1217#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1218
1219/**
1220 * Load a guest page into the ring-3 physical TLB.
1221 *
1222 * @returns VBox status code.
1223 * @retval VINF_SUCCESS on success
1224 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1225 * @param pPGM The PGM instance pointer.
1226 * @param GCPhys The guest physical address in question.
1227 */
1228int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys)
1229{
1230 PGM_LOCK_ASSERT_OWNER(pVM);
1231
1232 /*
1233 * Find the ram range and page and hand it over to the with-page function.
1234 * 99.8% of requests are expected to be in the first range.
1235 */
1236 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1237 if (!pPage)
1238 {
1239 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1240 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1241 }
1242
1243 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1244}
1245
1246
1247/**
1248 * Load a guest page into the ring-3 physical TLB.
1249 *
1250 * @returns VBox status code.
1251 * @retval VINF_SUCCESS on success
1252 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1253 *
1254 * @param pVM Pointer to the VM.
1255 * @param pPage Pointer to the PGMPAGE structure corresponding to
1256 * GCPhys.
1257 * @param GCPhys The guest physical address in question.
1258 */
1259int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1260{
1261 PGM_LOCK_ASSERT_OWNER(pVM);
1262 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1263
1264 /*
1265 * Map the page.
1266 * Make a special case for the zero page as it is kind of special.
1267 */
1268 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1269 if ( !PGM_PAGE_IS_ZERO(pPage)
1270 && !PGM_PAGE_IS_BALLOONED(pPage))
1271 {
1272 void *pv;
1273 PPGMPAGEMAP pMap;
1274 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1275 if (RT_FAILURE(rc))
1276 return rc;
1277 pTlbe->pMap = pMap;
1278 pTlbe->pv = pv;
1279 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1280 }
1281 else
1282 {
1283 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1284 pTlbe->pMap = NULL;
1285 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1286 }
1287#ifdef PGM_WITH_PHYS_TLB
1288 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1289 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1290 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1291 else
1292 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1293#else
1294 pTlbe->GCPhys = NIL_RTGCPHYS;
1295#endif
1296 pTlbe->pPage = pPage;
1297 return VINF_SUCCESS;
1298}
1299
1300#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1301
1302/**
1303 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1304 * own the PGM lock and therefore not need to lock the mapped page.
1305 *
1306 * @returns VBox status code.
1307 * @retval VINF_SUCCESS on success.
1308 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1309 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1310 *
1311 * @param pVM Pointer to the VM.
1312 * @param GCPhys The guest physical address of the page that should be mapped.
1313 * @param pPage Pointer to the PGMPAGE structure for the page.
1314 * @param ppv Where to store the address corresponding to GCPhys.
1315 *
1316 * @internal
1317 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1318 */
1319int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1320{
1321 int rc;
1322 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1323 PGM_LOCK_ASSERT_OWNER(pVM);
1324 pVM->pgm.s.cDeprecatedPageLocks++;
1325
1326 /*
1327 * Make sure the page is writable.
1328 */
1329 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1330 {
1331 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1332 if (RT_FAILURE(rc))
1333 return rc;
1334 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1335 }
1336 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1337
1338 /*
1339 * Get the mapping address.
1340 */
1341#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1342 void *pv;
1343 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1344 PGM_PAGE_GET_HCPHYS(pPage),
1345 &pv
1346 RTLOG_COMMA_SRC_POS);
1347 if (RT_FAILURE(rc))
1348 return rc;
1349 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1350#else
1351 PPGMPAGEMAPTLBE pTlbe;
1352 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1353 if (RT_FAILURE(rc))
1354 return rc;
1355 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1356#endif
1357 return VINF_SUCCESS;
1358}
1359
1360#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1361
1362/**
1363 * Locks a page mapping for writing.
1364 *
1365 * @param pVM Pointer to the VM.
1366 * @param pPage The page.
1367 * @param pTlbe The mapping TLB entry for the page.
1368 * @param pLock The lock structure (output).
1369 */
1370DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1371{
1372 PPGMPAGEMAP pMap = pTlbe->pMap;
1373 if (pMap)
1374 pMap->cRefs++;
1375
1376 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1377 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1378 {
1379 if (cLocks == 0)
1380 pVM->pgm.s.cWriteLockedPages++;
1381 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1382 }
1383 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1384 {
1385 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1386 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1387 if (pMap)
1388 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1389 }
1390
1391 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1392 pLock->pvMap = pMap;
1393}
1394
1395/**
1396 * Locks a page mapping for reading.
1397 *
1398 * @param pVM Pointer to the VM.
1399 * @param pPage The page.
1400 * @param pTlbe The mapping TLB entry for the page.
1401 * @param pLock The lock structure (output).
1402 */
1403DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1404{
1405 PPGMPAGEMAP pMap = pTlbe->pMap;
1406 if (pMap)
1407 pMap->cRefs++;
1408
1409 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1410 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1411 {
1412 if (cLocks == 0)
1413 pVM->pgm.s.cReadLockedPages++;
1414 PGM_PAGE_INC_READ_LOCKS(pPage);
1415 }
1416 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1417 {
1418 PGM_PAGE_INC_READ_LOCKS(pPage);
1419 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1420 if (pMap)
1421 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1422 }
1423
1424 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1425 pLock->pvMap = pMap;
1426}
1427
1428#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1429
1430
1431/**
1432 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1433 * own the PGM lock and have access to the page structure.
1434 *
1435 * @returns VBox status code.
1436 * @retval VINF_SUCCESS on success.
1437 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1438 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1439 *
1440 * @param pVM Pointer to the VM.
1441 * @param GCPhys The guest physical address of the page that should be mapped.
1442 * @param pPage Pointer to the PGMPAGE structure for the page.
1443 * @param ppv Where to store the address corresponding to GCPhys.
1444 * @param pLock Where to store the lock information that
1445 * pgmPhysReleaseInternalPageMappingLock needs.
1446 *
1447 * @internal
1448 */
1449int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1450{
1451 int rc;
1452 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1453 PGM_LOCK_ASSERT_OWNER(pVM);
1454
1455 /*
1456 * Make sure the page is writable.
1457 */
1458 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1459 {
1460 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1461 if (RT_FAILURE(rc))
1462 return rc;
1463 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1464 }
1465 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1466
1467 /*
1468 * Do the job.
1469 */
1470#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1471 void *pv;
1472 PVMCPU pVCpu = VMMGetCpu(pVM);
1473 rc = pgmRZDynMapHCPageInlined(pVCpu,
1474 PGM_PAGE_GET_HCPHYS(pPage),
1475 &pv
1476 RTLOG_COMMA_SRC_POS);
1477 if (RT_FAILURE(rc))
1478 return rc;
1479 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1480 pLock->pvPage = pv;
1481 pLock->pVCpu = pVCpu;
1482
1483#else
1484 PPGMPAGEMAPTLBE pTlbe;
1485 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1486 if (RT_FAILURE(rc))
1487 return rc;
1488 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1489 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1490#endif
1491 return VINF_SUCCESS;
1492}
1493
1494
1495/**
1496 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1497 * own the PGM lock and have access to the page structure.
1498 *
1499 * @returns VBox status code.
1500 * @retval VINF_SUCCESS on success.
1501 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1502 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1503 *
1504 * @param pVM Pointer to the VM.
1505 * @param GCPhys The guest physical address of the page that should be mapped.
1506 * @param pPage Pointer to the PGMPAGE structure for the page.
1507 * @param ppv Where to store the address corresponding to GCPhys.
1508 * @param pLock Where to store the lock information that
1509 * pgmPhysReleaseInternalPageMappingLock needs.
1510 *
1511 * @internal
1512 */
1513int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1514{
1515 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1516 PGM_LOCK_ASSERT_OWNER(pVM);
1517 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1518
1519 /*
1520 * Do the job.
1521 */
1522#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1523 void *pv;
1524 PVMCPU pVCpu = VMMGetCpu(pVM);
1525 int rc = pgmRZDynMapHCPageInlined(pVCpu,
1526 PGM_PAGE_GET_HCPHYS(pPage),
1527 &pv
1528 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1529 if (RT_FAILURE(rc))
1530 return rc;
1531 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1532 pLock->pvPage = pv;
1533 pLock->pVCpu = pVCpu;
1534
1535#else
1536 PPGMPAGEMAPTLBE pTlbe;
1537 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1538 if (RT_FAILURE(rc))
1539 return rc;
1540 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1541 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1542#endif
1543 return VINF_SUCCESS;
1544}
1545
1546
1547/**
1548 * Requests the mapping of a guest page into the current context.
1549 *
1550 * This API should only be used for very short term, as it will consume scarse
1551 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1552 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1553 *
1554 * This API will assume your intention is to write to the page, and will
1555 * therefore replace shared and zero pages. If you do not intend to modify
1556 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1557 *
1558 * @returns VBox status code.
1559 * @retval VINF_SUCCESS on success.
1560 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1561 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1562 *
1563 * @param pVM Pointer to the VM.
1564 * @param GCPhys The guest physical address of the page that should be
1565 * mapped.
1566 * @param ppv Where to store the address corresponding to GCPhys.
1567 * @param pLock Where to store the lock information that
1568 * PGMPhysReleasePageMappingLock needs.
1569 *
1570 * @remarks The caller is responsible for dealing with access handlers.
1571 * @todo Add an informational return code for pages with access handlers?
1572 *
1573 * @remark Avoid calling this API from within critical sections (other than
1574 * the PGM one) because of the deadlock risk. External threads may
1575 * need to delegate jobs to the EMTs.
1576 * @remarks Only one page is mapped! Make no assumption about what's after or
1577 * before the returned page!
1578 * @thread Any thread.
1579 */
1580VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1581{
1582 int rc = pgmLock(pVM);
1583 AssertRCReturn(rc, rc);
1584
1585#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1586 /*
1587 * Find the page and make sure it's writable.
1588 */
1589 PPGMPAGE pPage;
1590 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1591 if (RT_SUCCESS(rc))
1592 {
1593 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1594 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1595 if (RT_SUCCESS(rc))
1596 {
1597 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1598
1599 PVMCPU pVCpu = VMMGetCpu(pVM);
1600 void *pv;
1601 rc = pgmRZDynMapHCPageInlined(pVCpu,
1602 PGM_PAGE_GET_HCPHYS(pPage),
1603 &pv
1604 RTLOG_COMMA_SRC_POS);
1605 if (RT_SUCCESS(rc))
1606 {
1607 AssertRCSuccess(rc);
1608
1609 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1610 *ppv = pv;
1611 pLock->pvPage = pv;
1612 pLock->pVCpu = pVCpu;
1613 }
1614 }
1615 }
1616
1617#else /* IN_RING3 || IN_RING0 */
1618 /*
1619 * Query the Physical TLB entry for the page (may fail).
1620 */
1621 PPGMPAGEMAPTLBE pTlbe;
1622 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1623 if (RT_SUCCESS(rc))
1624 {
1625 /*
1626 * If the page is shared, the zero page, or being write monitored
1627 * it must be converted to a page that's writable if possible.
1628 */
1629 PPGMPAGE pPage = pTlbe->pPage;
1630 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1631 {
1632 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1633 if (RT_SUCCESS(rc))
1634 {
1635 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1636 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1637 }
1638 }
1639 if (RT_SUCCESS(rc))
1640 {
1641 /*
1642 * Now, just perform the locking and calculate the return address.
1643 */
1644 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1645 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1646 }
1647 }
1648
1649#endif /* IN_RING3 || IN_RING0 */
1650 pgmUnlock(pVM);
1651 return rc;
1652}
1653
1654
1655/**
1656 * Requests the mapping of a guest page into the current context.
1657 *
1658 * This API should only be used for very short term, as it will consume scarse
1659 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1660 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1661 *
1662 * @returns VBox status code.
1663 * @retval VINF_SUCCESS on success.
1664 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1665 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1666 *
1667 * @param pVM Pointer to the VM.
1668 * @param GCPhys The guest physical address of the page that should be
1669 * mapped.
1670 * @param ppv Where to store the address corresponding to GCPhys.
1671 * @param pLock Where to store the lock information that
1672 * PGMPhysReleasePageMappingLock needs.
1673 *
1674 * @remarks The caller is responsible for dealing with access handlers.
1675 * @todo Add an informational return code for pages with access handlers?
1676 *
1677 * @remarks Avoid calling this API from within critical sections (other than
1678 * the PGM one) because of the deadlock risk.
1679 * @remarks Only one page is mapped! Make no assumption about what's after or
1680 * before the returned page!
1681 * @thread Any thread.
1682 */
1683VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1684{
1685 int rc = pgmLock(pVM);
1686 AssertRCReturn(rc, rc);
1687
1688#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1689 /*
1690 * Find the page and make sure it's readable.
1691 */
1692 PPGMPAGE pPage;
1693 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1694 if (RT_SUCCESS(rc))
1695 {
1696 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1697 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1698 else
1699 {
1700 PVMCPU pVCpu = VMMGetCpu(pVM);
1701 void *pv;
1702 rc = pgmRZDynMapHCPageInlined(pVCpu,
1703 PGM_PAGE_GET_HCPHYS(pPage),
1704 &pv
1705 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1706 if (RT_SUCCESS(rc))
1707 {
1708 AssertRCSuccess(rc);
1709
1710 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1711 *ppv = pv;
1712 pLock->pvPage = pv;
1713 pLock->pVCpu = pVCpu;
1714 }
1715 }
1716 }
1717
1718#else /* IN_RING3 || IN_RING0 */
1719 /*
1720 * Query the Physical TLB entry for the page (may fail).
1721 */
1722 PPGMPAGEMAPTLBE pTlbe;
1723 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1724 if (RT_SUCCESS(rc))
1725 {
1726 /* MMIO pages doesn't have any readable backing. */
1727 PPGMPAGE pPage = pTlbe->pPage;
1728 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1729 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1730 else
1731 {
1732 /*
1733 * Now, just perform the locking and calculate the return address.
1734 */
1735 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1736 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1737 }
1738 }
1739
1740#endif /* IN_RING3 || IN_RING0 */
1741 pgmUnlock(pVM);
1742 return rc;
1743}
1744
1745
1746/**
1747 * Requests the mapping of a guest page given by virtual address into the current context.
1748 *
1749 * This API should only be used for very short term, as it will consume
1750 * scarse resources (R0 and GC) in the mapping cache. When you're done
1751 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1752 *
1753 * This API will assume your intention is to write to the page, and will
1754 * therefore replace shared and zero pages. If you do not intend to modify
1755 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1756 *
1757 * @returns VBox status code.
1758 * @retval VINF_SUCCESS on success.
1759 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1760 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1761 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1762 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1763 *
1764 * @param pVCpu Pointer to the VMCPU.
1765 * @param GCPhys The guest physical address of the page that should be mapped.
1766 * @param ppv Where to store the address corresponding to GCPhys.
1767 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1768 *
1769 * @remark Avoid calling this API from within critical sections (other than
1770 * the PGM one) because of the deadlock risk.
1771 * @thread EMT
1772 */
1773VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1774{
1775 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1776 RTGCPHYS GCPhys;
1777 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1778 if (RT_SUCCESS(rc))
1779 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1780 return rc;
1781}
1782
1783
1784/**
1785 * Requests the mapping of a guest page given by virtual address into the current context.
1786 *
1787 * This API should only be used for very short term, as it will consume
1788 * scarse resources (R0 and GC) in the mapping cache. When you're done
1789 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1790 *
1791 * @returns VBox status code.
1792 * @retval VINF_SUCCESS on success.
1793 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1794 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1795 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1796 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1797 *
1798 * @param pVCpu Pointer to the VMCPU.
1799 * @param GCPhys The guest physical address of the page that should be mapped.
1800 * @param ppv Where to store the address corresponding to GCPhys.
1801 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1802 *
1803 * @remark Avoid calling this API from within critical sections (other than
1804 * the PGM one) because of the deadlock risk.
1805 * @thread EMT
1806 */
1807VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1808{
1809 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1810 RTGCPHYS GCPhys;
1811 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1812 if (RT_SUCCESS(rc))
1813 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1814 return rc;
1815}
1816
1817
1818/**
1819 * Release the mapping of a guest page.
1820 *
1821 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1822 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1823 *
1824 * @param pVM Pointer to the VM.
1825 * @param pLock The lock structure initialized by the mapping function.
1826 */
1827VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1828{
1829#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1830 Assert(pLock->pvPage != NULL);
1831 Assert(pLock->pVCpu == VMMGetCpu(pVM));
1832 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
1833 pLock->pVCpu = NULL;
1834 pLock->pvPage = NULL;
1835
1836#else
1837 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1838 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1839 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1840
1841 pLock->uPageAndType = 0;
1842 pLock->pvMap = NULL;
1843
1844 pgmLock(pVM);
1845 if (fWriteLock)
1846 {
1847 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1848 Assert(cLocks > 0);
1849 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1850 {
1851 if (cLocks == 1)
1852 {
1853 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1854 pVM->pgm.s.cWriteLockedPages--;
1855 }
1856 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1857 }
1858
1859 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1860 {
1861 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1862 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1863 Assert(pVM->pgm.s.cMonitoredPages > 0);
1864 pVM->pgm.s.cMonitoredPages--;
1865 pVM->pgm.s.cWrittenToPages++;
1866 }
1867 }
1868 else
1869 {
1870 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1871 Assert(cLocks > 0);
1872 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1873 {
1874 if (cLocks == 1)
1875 {
1876 Assert(pVM->pgm.s.cReadLockedPages > 0);
1877 pVM->pgm.s.cReadLockedPages--;
1878 }
1879 PGM_PAGE_DEC_READ_LOCKS(pPage);
1880 }
1881 }
1882
1883 if (pMap)
1884 {
1885 Assert(pMap->cRefs >= 1);
1886 pMap->cRefs--;
1887 }
1888 pgmUnlock(pVM);
1889#endif /* IN_RING3 */
1890}
1891
1892
1893/**
1894 * Release the internal mapping of a guest page.
1895 *
1896 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
1897 * pgmPhysGCPhys2CCPtrInternalReadOnly.
1898 *
1899 * @param pVM Pointer to the VM.
1900 * @param pLock The lock structure initialized by the mapping function.
1901 *
1902 * @remarks Caller must hold the PGM lock.
1903 */
1904void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1905{
1906 PGM_LOCK_ASSERT_OWNER(pVM);
1907 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
1908}
1909
1910
1911/**
1912 * Converts a GC physical address to a HC ring-3 pointer.
1913 *
1914 * @returns VINF_SUCCESS on success.
1915 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1916 * page but has no physical backing.
1917 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1918 * GC physical address.
1919 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1920 * a dynamic ram chunk boundary
1921 *
1922 * @param pVM Pointer to the VM.
1923 * @param GCPhys The GC physical address to convert.
1924 * @param pR3Ptr Where to store the R3 pointer on success.
1925 *
1926 * @deprecated Avoid when possible!
1927 */
1928int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1929{
1930/** @todo this is kind of hacky and needs some more work. */
1931#ifndef DEBUG_sandervl
1932 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1933#endif
1934
1935 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
1936#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1937 NOREF(pVM); NOREF(pR3Ptr);
1938 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1939#else
1940 pgmLock(pVM);
1941
1942 PPGMRAMRANGE pRam;
1943 PPGMPAGE pPage;
1944 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1945 if (RT_SUCCESS(rc))
1946 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1947
1948 pgmUnlock(pVM);
1949 Assert(rc <= VINF_SUCCESS);
1950 return rc;
1951#endif
1952}
1953
1954#if 0 /*defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)*/
1955
1956/**
1957 * Maps and locks a guest CR3 or PD (PAE) page.
1958 *
1959 * @returns VINF_SUCCESS on success.
1960 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1961 * page but has no physical backing.
1962 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1963 * GC physical address.
1964 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1965 * a dynamic ram chunk boundary
1966 *
1967 * @param pVM Pointer to the VM.
1968 * @param GCPhys The GC physical address to convert.
1969 * @param pR3Ptr Where to store the R3 pointer on success. This may or
1970 * may not be valid in ring-0 depending on the
1971 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option.
1972 *
1973 * @remarks The caller must own the PGM lock.
1974 */
1975int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1976{
1977
1978 PPGMRAMRANGE pRam;
1979 PPGMPAGE pPage;
1980 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1981 if (RT_SUCCESS(rc))
1982 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1983 Assert(rc <= VINF_SUCCESS);
1984 return rc;
1985}
1986
1987
1988int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1989{
1990
1991}
1992
1993#endif
1994
1995/**
1996 * Converts a guest pointer to a GC physical address.
1997 *
1998 * This uses the current CR3/CR0/CR4 of the guest.
1999 *
2000 * @returns VBox status code.
2001 * @param pVCpu Pointer to the VMCPU.
2002 * @param GCPtr The guest pointer to convert.
2003 * @param pGCPhys Where to store the GC physical address.
2004 */
2005VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2006{
2007 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
2008 if (pGCPhys && RT_SUCCESS(rc))
2009 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
2010 return rc;
2011}
2012
2013
2014/**
2015 * Converts a guest pointer to a HC physical address.
2016 *
2017 * This uses the current CR3/CR0/CR4 of the guest.
2018 *
2019 * @returns VBox status code.
2020 * @param pVCpu Pointer to the VMCPU.
2021 * @param GCPtr The guest pointer to convert.
2022 * @param pHCPhys Where to store the HC physical address.
2023 */
2024VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2025{
2026 PVM pVM = pVCpu->CTX_SUFF(pVM);
2027 RTGCPHYS GCPhys;
2028 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2029 if (RT_SUCCESS(rc))
2030 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2031 return rc;
2032}
2033
2034
2035
2036#undef LOG_GROUP
2037#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2038
2039
2040#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2041/**
2042 * Cache PGMPhys memory access
2043 *
2044 * @param pVM Pointer to the VM.
2045 * @param pCache Cache structure pointer
2046 * @param GCPhys GC physical address
2047 * @param pbHC HC pointer corresponding to physical page
2048 *
2049 * @thread EMT.
2050 */
2051static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2052{
2053 uint32_t iCacheIndex;
2054
2055 Assert(VM_IS_EMT(pVM));
2056
2057 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2058 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2059
2060 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2061
2062 ASMBitSet(&pCache->aEntries, iCacheIndex);
2063
2064 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2065 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2066}
2067#endif /* IN_RING3 */
2068
2069
2070/**
2071 * Deals with reading from a page with one or more ALL access handlers.
2072 *
2073 * @returns VBox status code. Can be ignored in ring-3.
2074 * @retval VINF_SUCCESS.
2075 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2076 *
2077 * @param pVM Pointer to the VM.
2078 * @param pPage The page descriptor.
2079 * @param GCPhys The physical address to start reading at.
2080 * @param pvBuf Where to put the bits we read.
2081 * @param cb How much to read - less or equal to a page.
2082 */
2083static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
2084{
2085 /*
2086 * The most frequent access here is MMIO and shadowed ROM.
2087 * The current code ASSUMES all these access handlers covers full pages!
2088 */
2089
2090 /*
2091 * Whatever we do we need the source page, map it first.
2092 */
2093 PGMPAGEMAPLOCK PgMpLck;
2094 const void *pvSrc = NULL;
2095 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2096 if (RT_FAILURE(rc))
2097 {
2098 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2099 GCPhys, pPage, rc));
2100 memset(pvBuf, 0xff, cb);
2101 return VINF_SUCCESS;
2102 }
2103 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2104
2105 /*
2106 * Deal with any physical handlers.
2107 */
2108#ifdef IN_RING3
2109 PPGMPHYSHANDLER pPhys = NULL;
2110#endif
2111 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2112 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2113 {
2114#ifdef IN_RING3
2115 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2116 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2117 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2118 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2119 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2120 Assert(pPhys->CTX_SUFF(pfnHandler));
2121
2122 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2123 void *pvUser = pPhys->CTX_SUFF(pvUser);
2124
2125 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2126 STAM_PROFILE_START(&pPhys->Stat, h);
2127 PGM_LOCK_ASSERT_OWNER(pVM);
2128 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2129 pgmUnlock(pVM);
2130 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
2131 pgmLock(pVM);
2132# ifdef VBOX_WITH_STATISTICS
2133 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2134 if (pPhys)
2135 STAM_PROFILE_STOP(&pPhys->Stat, h);
2136# else
2137 pPhys = NULL; /* might not be valid anymore. */
2138# endif
2139 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
2140#else
2141 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2142 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2143 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2144 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2145#endif
2146 }
2147
2148 /*
2149 * Deal with any virtual handlers.
2150 */
2151 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
2152 {
2153 unsigned iPage;
2154 PPGMVIRTHANDLER pVirt;
2155
2156 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
2157 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
2158 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
2159 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2160 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
2161
2162#ifdef IN_RING3
2163 if (pVirt->pfnHandlerR3)
2164 {
2165 if (!pPhys)
2166 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2167 else
2168 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
2169 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2170 + (iPage << PAGE_SHIFT)
2171 + (GCPhys & PAGE_OFFSET_MASK);
2172
2173 STAM_PROFILE_START(&pVirt->Stat, h);
2174 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
2175 STAM_PROFILE_STOP(&pVirt->Stat, h);
2176 if (rc2 == VINF_SUCCESS)
2177 rc = VINF_SUCCESS;
2178 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
2179 }
2180 else
2181 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2182#else
2183 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2184 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2185 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2186 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2187#endif
2188 }
2189
2190 /*
2191 * Take the default action.
2192 */
2193 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2194 memcpy(pvBuf, pvSrc, cb);
2195 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2196 return rc;
2197}
2198
2199
2200/**
2201 * Read physical memory.
2202 *
2203 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2204 * want to ignore those.
2205 *
2206 * @returns VBox status code. Can be ignored in ring-3.
2207 * @retval VINF_SUCCESS.
2208 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2209 *
2210 * @param pVM Pointer to the VM.
2211 * @param GCPhys Physical address start reading from.
2212 * @param pvBuf Where to put the read bits.
2213 * @param cbRead How many bytes to read.
2214 */
2215VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
2216{
2217 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2218 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2219
2220 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2221 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2222
2223 pgmLock(pVM);
2224
2225 /*
2226 * Copy loop on ram ranges.
2227 */
2228 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2229 for (;;)
2230 {
2231 /* Inside range or not? */
2232 if (pRam && GCPhys >= pRam->GCPhys)
2233 {
2234 /*
2235 * Must work our way thru this page by page.
2236 */
2237 RTGCPHYS off = GCPhys - pRam->GCPhys;
2238 while (off < pRam->cb)
2239 {
2240 unsigned iPage = off >> PAGE_SHIFT;
2241 PPGMPAGE pPage = &pRam->aPages[iPage];
2242 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2243 if (cb > cbRead)
2244 cb = cbRead;
2245
2246 /*
2247 * Any ALL access handlers?
2248 */
2249 if (RT_UNLIKELY( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2250 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage)))
2251 {
2252 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2253 if (RT_FAILURE(rc))
2254 {
2255 pgmUnlock(pVM);
2256 return rc;
2257 }
2258 }
2259 else
2260 {
2261 /*
2262 * Get the pointer to the page.
2263 */
2264 PGMPAGEMAPLOCK PgMpLck;
2265 const void *pvSrc;
2266 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2267 if (RT_SUCCESS(rc))
2268 {
2269 memcpy(pvBuf, pvSrc, cb);
2270 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2271 }
2272 else
2273 {
2274 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2275 pRam->GCPhys + off, pPage, rc));
2276 memset(pvBuf, 0xff, cb);
2277 }
2278 }
2279
2280 /* next page */
2281 if (cb >= cbRead)
2282 {
2283 pgmUnlock(pVM);
2284 return VINF_SUCCESS;
2285 }
2286 cbRead -= cb;
2287 off += cb;
2288 pvBuf = (char *)pvBuf + cb;
2289 } /* walk pages in ram range. */
2290
2291 GCPhys = pRam->GCPhysLast + 1;
2292 }
2293 else
2294 {
2295 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2296
2297 /*
2298 * Unassigned address space.
2299 */
2300 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2301 if (cb >= cbRead)
2302 {
2303 memset(pvBuf, 0xff, cbRead);
2304 break;
2305 }
2306 memset(pvBuf, 0xff, cb);
2307
2308 cbRead -= cb;
2309 pvBuf = (char *)pvBuf + cb;
2310 GCPhys += cb;
2311 }
2312
2313 /* Advance range if necessary. */
2314 while (pRam && GCPhys > pRam->GCPhysLast)
2315 pRam = pRam->CTX_SUFF(pNext);
2316 } /* Ram range walk */
2317
2318 pgmUnlock(pVM);
2319 return VINF_SUCCESS;
2320}
2321
2322
2323/**
2324 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2325 *
2326 * @returns VBox status code. Can be ignored in ring-3.
2327 * @retval VINF_SUCCESS.
2328 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2329 *
2330 * @param pVM Pointer to the VM.
2331 * @param pPage The page descriptor.
2332 * @param GCPhys The physical address to start writing at.
2333 * @param pvBuf What to write.
2334 * @param cbWrite How much to write - less or equal to a page.
2335 */
2336static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
2337{
2338 PGMPAGEMAPLOCK PgMpLck;
2339 void *pvDst = NULL;
2340 int rc;
2341
2342 /*
2343 * Give priority to physical handlers (like #PF does).
2344 *
2345 * Hope for a lonely physical handler first that covers the whole
2346 * write area. This should be a pretty frequent case with MMIO and
2347 * the heavy usage of full page handlers in the page pool.
2348 */
2349 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2350 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage) /* screw virtual handlers on MMIO pages */)
2351 {
2352 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2353 if (pCur)
2354 {
2355 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2356 Assert(pCur->CTX_SUFF(pfnHandler));
2357
2358 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2359 if (cbRange > cbWrite)
2360 cbRange = cbWrite;
2361
2362#ifndef IN_RING3
2363 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2364 NOREF(cbRange);
2365 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2366 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2367
2368#else /* IN_RING3 */
2369 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2370 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2371 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2372 else
2373 rc = VINF_SUCCESS;
2374 if (RT_SUCCESS(rc))
2375 {
2376 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
2377 void *pvUser = pCur->CTX_SUFF(pvUser);
2378
2379 STAM_PROFILE_START(&pCur->Stat, h);
2380 PGM_LOCK_ASSERT_OWNER(pVM);
2381 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2382 pgmUnlock(pVM);
2383 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2384 pgmLock(pVM);
2385# ifdef VBOX_WITH_STATISTICS
2386 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2387 if (pCur)
2388 STAM_PROFILE_STOP(&pCur->Stat, h);
2389# else
2390 pCur = NULL; /* might not be valid anymore. */
2391# endif
2392 if (rc == VINF_PGM_HANDLER_DO_DEFAULT && pvDst)
2393 {
2394 if (pvDst)
2395 memcpy(pvDst, pvBuf, cbRange);
2396 }
2397 else
2398 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
2399 }
2400 else
2401 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2402 GCPhys, pPage, rc), rc);
2403 if (RT_LIKELY(cbRange == cbWrite))
2404 {
2405 if (pvDst)
2406 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2407 return VINF_SUCCESS;
2408 }
2409
2410 /* more fun to be had below */
2411 cbWrite -= cbRange;
2412 GCPhys += cbRange;
2413 pvBuf = (uint8_t *)pvBuf + cbRange;
2414 pvDst = (uint8_t *)pvDst + cbRange;
2415#endif /* IN_RING3 */
2416 }
2417 /* else: the handler is somewhere else in the page, deal with it below. */
2418 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2419 }
2420 /*
2421 * A virtual handler without any interfering physical handlers.
2422 * Hopefully it'll cover the whole write.
2423 */
2424 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2425 {
2426 unsigned iPage;
2427 PPGMVIRTHANDLER pCur;
2428 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2429 if (RT_SUCCESS(rc))
2430 {
2431 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2432 if (cbRange > cbWrite)
2433 cbRange = cbWrite;
2434
2435#ifndef IN_RING3
2436 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2437 NOREF(cbRange);
2438 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2439 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2440
2441#else /* IN_RING3 */
2442
2443 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2444 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2445 if (RT_SUCCESS(rc))
2446 {
2447 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2448 if (pCur->pfnHandlerR3)
2449 {
2450 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2451 + (iPage << PAGE_SHIFT)
2452 + (GCPhys & PAGE_OFFSET_MASK);
2453
2454 STAM_PROFILE_START(&pCur->Stat, h);
2455 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2456 STAM_PROFILE_STOP(&pCur->Stat, h);
2457 }
2458 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2459 memcpy(pvDst, pvBuf, cbRange);
2460 else
2461 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2462 }
2463 else
2464 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2465 GCPhys, pPage, rc), rc);
2466 if (RT_LIKELY(cbRange == cbWrite))
2467 {
2468 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2469 return VINF_SUCCESS;
2470 }
2471
2472 /* more fun to be had below */
2473 cbWrite -= cbRange;
2474 GCPhys += cbRange;
2475 pvBuf = (uint8_t *)pvBuf + cbRange;
2476 pvDst = (uint8_t *)pvDst + cbRange;
2477#endif
2478 }
2479 /* else: the handler is somewhere else in the page, deal with it below. */
2480 }
2481
2482 /*
2483 * Deal with all the odd ends.
2484 */
2485
2486 /* We need a writable destination page. */
2487 if (!pvDst)
2488 {
2489 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2490 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2491 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2492 GCPhys, pPage, rc), rc);
2493 }
2494
2495 /* The loop state (big + ugly). */
2496 unsigned iVirtPage = 0;
2497 PPGMVIRTHANDLER pVirt = NULL;
2498 uint32_t offVirt = PAGE_SIZE;
2499 uint32_t offVirtLast = PAGE_SIZE;
2500 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2501
2502 PPGMPHYSHANDLER pPhys = NULL;
2503 uint32_t offPhys = PAGE_SIZE;
2504 uint32_t offPhysLast = PAGE_SIZE;
2505 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2506
2507 /* The loop. */
2508 for (;;)
2509 {
2510 /*
2511 * Find the closest handler at or above GCPhys.
2512 */
2513 if (fMoreVirt && !pVirt)
2514 {
2515 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2516 if (RT_SUCCESS(rc))
2517 {
2518 offVirt = 0;
2519 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2520 }
2521 else
2522 {
2523 PPGMPHYS2VIRTHANDLER pVirtPhys;
2524 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2525 GCPhys, true /* fAbove */);
2526 if ( pVirtPhys
2527 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2528 {
2529 /* ASSUME that pVirtPhys only covers one page. */
2530 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2531 Assert(pVirtPhys->Core.Key > GCPhys);
2532
2533 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2534 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2535 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2536 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2537 }
2538 else
2539 {
2540 pVirt = NULL;
2541 fMoreVirt = false;
2542 offVirt = offVirtLast = PAGE_SIZE;
2543 }
2544 }
2545 }
2546
2547 if (fMorePhys && !pPhys)
2548 {
2549 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2550 if (pPhys)
2551 {
2552 offPhys = 0;
2553 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2554 }
2555 else
2556 {
2557 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2558 GCPhys, true /* fAbove */);
2559 if ( pPhys
2560 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2561 {
2562 offPhys = pPhys->Core.Key - GCPhys;
2563 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2564 }
2565 else
2566 {
2567 pPhys = NULL;
2568 fMorePhys = false;
2569 offPhys = offPhysLast = PAGE_SIZE;
2570 }
2571 }
2572 }
2573
2574 /*
2575 * Handle access to space without handlers (that's easy).
2576 */
2577 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2578 uint32_t cbRange = (uint32_t)cbWrite;
2579 if (offPhys && offVirt)
2580 {
2581 if (cbRange > offPhys)
2582 cbRange = offPhys;
2583 if (cbRange > offVirt)
2584 cbRange = offVirt;
2585 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2586 }
2587 /*
2588 * Physical handler.
2589 */
2590 else if (!offPhys && offVirt)
2591 {
2592 if (cbRange > offPhysLast + 1)
2593 cbRange = offPhysLast + 1;
2594 if (cbRange > offVirt)
2595 cbRange = offVirt;
2596#ifdef IN_RING3
2597 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2598 void *pvUser = pPhys->CTX_SUFF(pvUser);
2599
2600 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2601 STAM_PROFILE_START(&pPhys->Stat, h);
2602 PGM_LOCK_ASSERT_OWNER(pVM);
2603 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2604 pgmUnlock(pVM);
2605 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2606 pgmLock(pVM);
2607# ifdef VBOX_WITH_STATISTICS
2608 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2609 if (pPhys)
2610 STAM_PROFILE_STOP(&pPhys->Stat, h);
2611# else
2612 pPhys = NULL; /* might not be valid anymore. */
2613# endif
2614 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2615#else
2616 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2617 NOREF(cbRange);
2618 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2619 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2620 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2621#endif
2622 }
2623 /*
2624 * Virtual handler.
2625 */
2626 else if (offPhys && !offVirt)
2627 {
2628 if (cbRange > offVirtLast + 1)
2629 cbRange = offVirtLast + 1;
2630 if (cbRange > offPhys)
2631 cbRange = offPhys;
2632#ifdef IN_RING3
2633 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2634 if (pVirt->pfnHandlerR3)
2635 {
2636 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2637 + (iVirtPage << PAGE_SHIFT)
2638 + (GCPhys & PAGE_OFFSET_MASK);
2639 STAM_PROFILE_START(&pVirt->Stat, h);
2640 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2641 STAM_PROFILE_STOP(&pVirt->Stat, h);
2642 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2643 }
2644 pVirt = NULL;
2645#else
2646 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2647 NOREF(cbRange);
2648 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2649 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2650 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2651#endif
2652 }
2653 /*
2654 * Both... give the physical one priority.
2655 */
2656 else
2657 {
2658 Assert(!offPhys && !offVirt);
2659 if (cbRange > offVirtLast + 1)
2660 cbRange = offVirtLast + 1;
2661 if (cbRange > offPhysLast + 1)
2662 cbRange = offPhysLast + 1;
2663
2664#ifdef IN_RING3
2665 if (pVirt->pfnHandlerR3)
2666 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2667 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2668
2669 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2670 void *pvUser = pPhys->CTX_SUFF(pvUser);
2671
2672 STAM_PROFILE_START(&pPhys->Stat, h);
2673 PGM_LOCK_ASSERT_OWNER(pVM);
2674 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2675 pgmUnlock(pVM);
2676 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2677 pgmLock(pVM);
2678# ifdef VBOX_WITH_STATISTICS
2679 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2680 if (pPhys)
2681 STAM_PROFILE_STOP(&pPhys->Stat, h);
2682# else
2683 pPhys = NULL; /* might not be valid anymore. */
2684# endif
2685 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2686 if (pVirt->pfnHandlerR3)
2687 {
2688
2689 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2690 + (iVirtPage << PAGE_SHIFT)
2691 + (GCPhys & PAGE_OFFSET_MASK);
2692 STAM_PROFILE_START(&pVirt->Stat, h2);
2693 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2694 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2695 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2696 rc = VINF_SUCCESS;
2697 else
2698 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2699 }
2700 pPhys = NULL;
2701 pVirt = NULL;
2702#else
2703 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2704 NOREF(cbRange);
2705 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2706 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2707 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2708#endif
2709 }
2710 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2711 memcpy(pvDst, pvBuf, cbRange);
2712
2713 /*
2714 * Advance if we've got more stuff to do.
2715 */
2716 if (cbRange >= cbWrite)
2717 {
2718 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2719 return VINF_SUCCESS;
2720 }
2721
2722 cbWrite -= cbRange;
2723 GCPhys += cbRange;
2724 pvBuf = (uint8_t *)pvBuf + cbRange;
2725 pvDst = (uint8_t *)pvDst + cbRange;
2726
2727 offPhys -= cbRange;
2728 offPhysLast -= cbRange;
2729 offVirt -= cbRange;
2730 offVirtLast -= cbRange;
2731 }
2732}
2733
2734
2735/**
2736 * Write to physical memory.
2737 *
2738 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2739 * want to ignore those.
2740 *
2741 * @returns VBox status code. Can be ignored in ring-3.
2742 * @retval VINF_SUCCESS.
2743 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2744 *
2745 * @param pVM Pointer to the VM.
2746 * @param GCPhys Physical address to write to.
2747 * @param pvBuf What to write.
2748 * @param cbWrite How many bytes to write.
2749 */
2750VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2751{
2752 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2753 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2754 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2755
2756 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2757 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2758
2759 pgmLock(pVM);
2760
2761 /*
2762 * Copy loop on ram ranges.
2763 */
2764 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2765 for (;;)
2766 {
2767 /* Inside range or not? */
2768 if (pRam && GCPhys >= pRam->GCPhys)
2769 {
2770 /*
2771 * Must work our way thru this page by page.
2772 */
2773 RTGCPTR off = GCPhys - pRam->GCPhys;
2774 while (off < pRam->cb)
2775 {
2776 RTGCPTR iPage = off >> PAGE_SHIFT;
2777 PPGMPAGE pPage = &pRam->aPages[iPage];
2778 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2779 if (cb > cbWrite)
2780 cb = cbWrite;
2781
2782 /*
2783 * Any active WRITE or ALL access handlers?
2784 */
2785 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
2786 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2787 {
2788 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2789 if (RT_FAILURE(rc))
2790 {
2791 pgmUnlock(pVM);
2792 return rc;
2793 }
2794 }
2795 else
2796 {
2797 /*
2798 * Get the pointer to the page.
2799 */
2800 PGMPAGEMAPLOCK PgMpLck;
2801 void *pvDst;
2802 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2803 if (RT_SUCCESS(rc))
2804 {
2805 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2806 memcpy(pvDst, pvBuf, cb);
2807 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2808 }
2809 /* Ignore writes to ballooned pages. */
2810 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2811 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2812 pRam->GCPhys + off, pPage, rc));
2813 }
2814
2815 /* next page */
2816 if (cb >= cbWrite)
2817 {
2818 pgmUnlock(pVM);
2819 return VINF_SUCCESS;
2820 }
2821
2822 cbWrite -= cb;
2823 off += cb;
2824 pvBuf = (const char *)pvBuf + cb;
2825 } /* walk pages in ram range */
2826
2827 GCPhys = pRam->GCPhysLast + 1;
2828 }
2829 else
2830 {
2831 /*
2832 * Unassigned address space, skip it.
2833 */
2834 if (!pRam)
2835 break;
2836 size_t cb = pRam->GCPhys - GCPhys;
2837 if (cb >= cbWrite)
2838 break;
2839 cbWrite -= cb;
2840 pvBuf = (const char *)pvBuf + cb;
2841 GCPhys += cb;
2842 }
2843
2844 /* Advance range if necessary. */
2845 while (pRam && GCPhys > pRam->GCPhysLast)
2846 pRam = pRam->CTX_SUFF(pNext);
2847 } /* Ram range walk */
2848
2849 pgmUnlock(pVM);
2850 return VINF_SUCCESS;
2851}
2852
2853
2854/**
2855 * Read from guest physical memory by GC physical address, bypassing
2856 * MMIO and access handlers.
2857 *
2858 * @returns VBox status.
2859 * @param pVM Pointer to the VM.
2860 * @param pvDst The destination address.
2861 * @param GCPhysSrc The source address (GC physical address).
2862 * @param cb The number of bytes to read.
2863 */
2864VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2865{
2866 /*
2867 * Treat the first page as a special case.
2868 */
2869 if (!cb)
2870 return VINF_SUCCESS;
2871
2872 /* map the 1st page */
2873 void const *pvSrc;
2874 PGMPAGEMAPLOCK Lock;
2875 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2876 if (RT_FAILURE(rc))
2877 return rc;
2878
2879 /* optimize for the case where access is completely within the first page. */
2880 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2881 if (RT_LIKELY(cb <= cbPage))
2882 {
2883 memcpy(pvDst, pvSrc, cb);
2884 PGMPhysReleasePageMappingLock(pVM, &Lock);
2885 return VINF_SUCCESS;
2886 }
2887
2888 /* copy to the end of the page. */
2889 memcpy(pvDst, pvSrc, cbPage);
2890 PGMPhysReleasePageMappingLock(pVM, &Lock);
2891 GCPhysSrc += cbPage;
2892 pvDst = (uint8_t *)pvDst + cbPage;
2893 cb -= cbPage;
2894
2895 /*
2896 * Page by page.
2897 */
2898 for (;;)
2899 {
2900 /* map the page */
2901 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2902 if (RT_FAILURE(rc))
2903 return rc;
2904
2905 /* last page? */
2906 if (cb <= PAGE_SIZE)
2907 {
2908 memcpy(pvDst, pvSrc, cb);
2909 PGMPhysReleasePageMappingLock(pVM, &Lock);
2910 return VINF_SUCCESS;
2911 }
2912
2913 /* copy the entire page and advance */
2914 memcpy(pvDst, pvSrc, PAGE_SIZE);
2915 PGMPhysReleasePageMappingLock(pVM, &Lock);
2916 GCPhysSrc += PAGE_SIZE;
2917 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2918 cb -= PAGE_SIZE;
2919 }
2920 /* won't ever get here. */
2921}
2922
2923
2924/**
2925 * Write to guest physical memory referenced by GC pointer.
2926 * Write memory to GC physical address in guest physical memory.
2927 *
2928 * This will bypass MMIO and access handlers.
2929 *
2930 * @returns VBox status.
2931 * @param pVM Pointer to the VM.
2932 * @param GCPhysDst The GC physical address of the destination.
2933 * @param pvSrc The source buffer.
2934 * @param cb The number of bytes to write.
2935 */
2936VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2937{
2938 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2939
2940 /*
2941 * Treat the first page as a special case.
2942 */
2943 if (!cb)
2944 return VINF_SUCCESS;
2945
2946 /* map the 1st page */
2947 void *pvDst;
2948 PGMPAGEMAPLOCK Lock;
2949 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2950 if (RT_FAILURE(rc))
2951 return rc;
2952
2953 /* optimize for the case where access is completely within the first page. */
2954 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2955 if (RT_LIKELY(cb <= cbPage))
2956 {
2957 memcpy(pvDst, pvSrc, cb);
2958 PGMPhysReleasePageMappingLock(pVM, &Lock);
2959 return VINF_SUCCESS;
2960 }
2961
2962 /* copy to the end of the page. */
2963 memcpy(pvDst, pvSrc, cbPage);
2964 PGMPhysReleasePageMappingLock(pVM, &Lock);
2965 GCPhysDst += cbPage;
2966 pvSrc = (const uint8_t *)pvSrc + cbPage;
2967 cb -= cbPage;
2968
2969 /*
2970 * Page by page.
2971 */
2972 for (;;)
2973 {
2974 /* map the page */
2975 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2976 if (RT_FAILURE(rc))
2977 return rc;
2978
2979 /* last page? */
2980 if (cb <= PAGE_SIZE)
2981 {
2982 memcpy(pvDst, pvSrc, cb);
2983 PGMPhysReleasePageMappingLock(pVM, &Lock);
2984 return VINF_SUCCESS;
2985 }
2986
2987 /* copy the entire page and advance */
2988 memcpy(pvDst, pvSrc, PAGE_SIZE);
2989 PGMPhysReleasePageMappingLock(pVM, &Lock);
2990 GCPhysDst += PAGE_SIZE;
2991 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2992 cb -= PAGE_SIZE;
2993 }
2994 /* won't ever get here. */
2995}
2996
2997
2998/**
2999 * Read from guest physical memory referenced by GC pointer.
3000 *
3001 * This function uses the current CR3/CR0/CR4 of the guest and will
3002 * bypass access handlers and not set any accessed bits.
3003 *
3004 * @returns VBox status.
3005 * @param pVCpu Handle to the current virtual CPU.
3006 * @param pvDst The destination address.
3007 * @param GCPtrSrc The source address (GC pointer).
3008 * @param cb The number of bytes to read.
3009 */
3010VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3011{
3012 PVM pVM = pVCpu->CTX_SUFF(pVM);
3013/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3014
3015 /*
3016 * Treat the first page as a special case.
3017 */
3018 if (!cb)
3019 return VINF_SUCCESS;
3020
3021 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
3022 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3023
3024 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3025 * when many VCPUs are fighting for the lock.
3026 */
3027 pgmLock(pVM);
3028
3029 /* map the 1st page */
3030 void const *pvSrc;
3031 PGMPAGEMAPLOCK Lock;
3032 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3033 if (RT_FAILURE(rc))
3034 {
3035 pgmUnlock(pVM);
3036 return rc;
3037 }
3038
3039 /* optimize for the case where access is completely within the first page. */
3040 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3041 if (RT_LIKELY(cb <= cbPage))
3042 {
3043 memcpy(pvDst, pvSrc, cb);
3044 PGMPhysReleasePageMappingLock(pVM, &Lock);
3045 pgmUnlock(pVM);
3046 return VINF_SUCCESS;
3047 }
3048
3049 /* copy to the end of the page. */
3050 memcpy(pvDst, pvSrc, cbPage);
3051 PGMPhysReleasePageMappingLock(pVM, &Lock);
3052 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3053 pvDst = (uint8_t *)pvDst + cbPage;
3054 cb -= cbPage;
3055
3056 /*
3057 * Page by page.
3058 */
3059 for (;;)
3060 {
3061 /* map the page */
3062 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3063 if (RT_FAILURE(rc))
3064 {
3065 pgmUnlock(pVM);
3066 return rc;
3067 }
3068
3069 /* last page? */
3070 if (cb <= PAGE_SIZE)
3071 {
3072 memcpy(pvDst, pvSrc, cb);
3073 PGMPhysReleasePageMappingLock(pVM, &Lock);
3074 pgmUnlock(pVM);
3075 return VINF_SUCCESS;
3076 }
3077
3078 /* copy the entire page and advance */
3079 memcpy(pvDst, pvSrc, PAGE_SIZE);
3080 PGMPhysReleasePageMappingLock(pVM, &Lock);
3081 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3082 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3083 cb -= PAGE_SIZE;
3084 }
3085 /* won't ever get here. */
3086}
3087
3088
3089/**
3090 * Write to guest physical memory referenced by GC pointer.
3091 *
3092 * This function uses the current CR3/CR0/CR4 of the guest and will
3093 * bypass access handlers and not set dirty or accessed bits.
3094 *
3095 * @returns VBox status.
3096 * @param pVCpu Handle to the current virtual CPU.
3097 * @param GCPtrDst The destination address (GC pointer).
3098 * @param pvSrc The source address.
3099 * @param cb The number of bytes to write.
3100 */
3101VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3102{
3103 PVM pVM = pVCpu->CTX_SUFF(pVM);
3104 VMCPU_ASSERT_EMT(pVCpu);
3105
3106 /*
3107 * Treat the first page as a special case.
3108 */
3109 if (!cb)
3110 return VINF_SUCCESS;
3111
3112 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
3113 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3114
3115 /* map the 1st page */
3116 void *pvDst;
3117 PGMPAGEMAPLOCK Lock;
3118 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3119 if (RT_FAILURE(rc))
3120 return rc;
3121
3122 /* optimize for the case where access is completely within the first page. */
3123 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3124 if (RT_LIKELY(cb <= cbPage))
3125 {
3126 memcpy(pvDst, pvSrc, cb);
3127 PGMPhysReleasePageMappingLock(pVM, &Lock);
3128 return VINF_SUCCESS;
3129 }
3130
3131 /* copy to the end of the page. */
3132 memcpy(pvDst, pvSrc, cbPage);
3133 PGMPhysReleasePageMappingLock(pVM, &Lock);
3134 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3135 pvSrc = (const uint8_t *)pvSrc + cbPage;
3136 cb -= cbPage;
3137
3138 /*
3139 * Page by page.
3140 */
3141 for (;;)
3142 {
3143 /* map the page */
3144 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3145 if (RT_FAILURE(rc))
3146 return rc;
3147
3148 /* last page? */
3149 if (cb <= PAGE_SIZE)
3150 {
3151 memcpy(pvDst, pvSrc, cb);
3152 PGMPhysReleasePageMappingLock(pVM, &Lock);
3153 return VINF_SUCCESS;
3154 }
3155
3156 /* copy the entire page and advance */
3157 memcpy(pvDst, pvSrc, PAGE_SIZE);
3158 PGMPhysReleasePageMappingLock(pVM, &Lock);
3159 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3160 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3161 cb -= PAGE_SIZE;
3162 }
3163 /* won't ever get here. */
3164}
3165
3166
3167/**
3168 * Write to guest physical memory referenced by GC pointer and update the PTE.
3169 *
3170 * This function uses the current CR3/CR0/CR4 of the guest and will
3171 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3172 *
3173 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3174 *
3175 * @returns VBox status.
3176 * @param pVCpu Handle to the current virtual CPU.
3177 * @param GCPtrDst The destination address (GC pointer).
3178 * @param pvSrc The source address.
3179 * @param cb The number of bytes to write.
3180 */
3181VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3182{
3183 PVM pVM = pVCpu->CTX_SUFF(pVM);
3184 VMCPU_ASSERT_EMT(pVCpu);
3185
3186 /*
3187 * Treat the first page as a special case.
3188 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3189 */
3190 if (!cb)
3191 return VINF_SUCCESS;
3192
3193 /* map the 1st page */
3194 void *pvDst;
3195 PGMPAGEMAPLOCK Lock;
3196 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3197 if (RT_FAILURE(rc))
3198 return rc;
3199
3200 /* optimize for the case where access is completely within the first page. */
3201 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3202 if (RT_LIKELY(cb <= cbPage))
3203 {
3204 memcpy(pvDst, pvSrc, cb);
3205 PGMPhysReleasePageMappingLock(pVM, &Lock);
3206 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3207 return VINF_SUCCESS;
3208 }
3209
3210 /* copy to the end of the page. */
3211 memcpy(pvDst, pvSrc, cbPage);
3212 PGMPhysReleasePageMappingLock(pVM, &Lock);
3213 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3214 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3215 pvSrc = (const uint8_t *)pvSrc + cbPage;
3216 cb -= cbPage;
3217
3218 /*
3219 * Page by page.
3220 */
3221 for (;;)
3222 {
3223 /* map the page */
3224 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3225 if (RT_FAILURE(rc))
3226 return rc;
3227
3228 /* last page? */
3229 if (cb <= PAGE_SIZE)
3230 {
3231 memcpy(pvDst, pvSrc, cb);
3232 PGMPhysReleasePageMappingLock(pVM, &Lock);
3233 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3234 return VINF_SUCCESS;
3235 }
3236
3237 /* copy the entire page and advance */
3238 memcpy(pvDst, pvSrc, PAGE_SIZE);
3239 PGMPhysReleasePageMappingLock(pVM, &Lock);
3240 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3241 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3242 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3243 cb -= PAGE_SIZE;
3244 }
3245 /* won't ever get here. */
3246}
3247
3248
3249/**
3250 * Read from guest physical memory referenced by GC pointer.
3251 *
3252 * This function uses the current CR3/CR0/CR4 of the guest and will
3253 * respect access handlers and set accessed bits.
3254 *
3255 * @returns VBox status.
3256 * @param pVCpu Handle to the current virtual CPU.
3257 * @param pvDst The destination address.
3258 * @param GCPtrSrc The source address (GC pointer).
3259 * @param cb The number of bytes to read.
3260 * @thread The vCPU EMT.
3261 */
3262VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3263{
3264 RTGCPHYS GCPhys;
3265 uint64_t fFlags;
3266 int rc;
3267 PVM pVM = pVCpu->CTX_SUFF(pVM);
3268 VMCPU_ASSERT_EMT(pVCpu);
3269
3270 /*
3271 * Anything to do?
3272 */
3273 if (!cb)
3274 return VINF_SUCCESS;
3275
3276 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3277
3278 /*
3279 * Optimize reads within a single page.
3280 */
3281 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3282 {
3283 /* Convert virtual to physical address + flags */
3284 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3285 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3286 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3287
3288 /* mark the guest page as accessed. */
3289 if (!(fFlags & X86_PTE_A))
3290 {
3291 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3292 AssertRC(rc);
3293 }
3294
3295 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
3296 }
3297
3298 /*
3299 * Page by page.
3300 */
3301 for (;;)
3302 {
3303 /* Convert virtual to physical address + flags */
3304 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3305 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3306 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3307
3308 /* mark the guest page as accessed. */
3309 if (!(fFlags & X86_PTE_A))
3310 {
3311 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3312 AssertRC(rc);
3313 }
3314
3315 /* copy */
3316 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3317 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
3318 if (cbRead >= cb || RT_FAILURE(rc))
3319 return rc;
3320
3321 /* next */
3322 cb -= cbRead;
3323 pvDst = (uint8_t *)pvDst + cbRead;
3324 GCPtrSrc += cbRead;
3325 }
3326}
3327
3328
3329/**
3330 * Write to guest physical memory referenced by GC pointer.
3331 *
3332 * This function uses the current CR3/CR0/CR4 of the guest and will
3333 * respect access handlers and set dirty and accessed bits.
3334 *
3335 * @returns VBox status.
3336 * @retval VINF_SUCCESS.
3337 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
3338 *
3339 * @param pVCpu Handle to the current virtual CPU.
3340 * @param GCPtrDst The destination address (GC pointer).
3341 * @param pvSrc The source address.
3342 * @param cb The number of bytes to write.
3343 */
3344VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3345{
3346 RTGCPHYS GCPhys;
3347 uint64_t fFlags;
3348 int rc;
3349 PVM pVM = pVCpu->CTX_SUFF(pVM);
3350 VMCPU_ASSERT_EMT(pVCpu);
3351
3352 /*
3353 * Anything to do?
3354 */
3355 if (!cb)
3356 return VINF_SUCCESS;
3357
3358 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3359
3360 /*
3361 * Optimize writes within a single page.
3362 */
3363 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3364 {
3365 /* Convert virtual to physical address + flags */
3366 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3367 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3368 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3369
3370 /* Mention when we ignore X86_PTE_RW... */
3371 if (!(fFlags & X86_PTE_RW))
3372 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3373
3374 /* Mark the guest page as accessed and dirty if necessary. */
3375 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3376 {
3377 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3378 AssertRC(rc);
3379 }
3380
3381 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
3382 }
3383
3384 /*
3385 * Page by page.
3386 */
3387 for (;;)
3388 {
3389 /* Convert virtual to physical address + flags */
3390 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3391 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3392 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3393
3394 /* Mention when we ignore X86_PTE_RW... */
3395 if (!(fFlags & X86_PTE_RW))
3396 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3397
3398 /* Mark the guest page as accessed and dirty if necessary. */
3399 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3400 {
3401 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3402 AssertRC(rc);
3403 }
3404
3405 /* copy */
3406 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3407 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
3408 if (cbWrite >= cb || RT_FAILURE(rc))
3409 return rc;
3410
3411 /* next */
3412 cb -= cbWrite;
3413 pvSrc = (uint8_t *)pvSrc + cbWrite;
3414 GCPtrDst += cbWrite;
3415 }
3416}
3417
3418
3419/**
3420 * Performs a read of guest virtual memory for instruction emulation.
3421 *
3422 * This will check permissions, raise exceptions and update the access bits.
3423 *
3424 * The current implementation will bypass all access handlers. It may later be
3425 * changed to at least respect MMIO.
3426 *
3427 *
3428 * @returns VBox status code suitable to scheduling.
3429 * @retval VINF_SUCCESS if the read was performed successfully.
3430 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3431 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3432 *
3433 * @param pVCpu Handle to the current virtual CPU.
3434 * @param pCtxCore The context core.
3435 * @param pvDst Where to put the bytes we've read.
3436 * @param GCPtrSrc The source address.
3437 * @param cb The number of bytes to read. Not more than a page.
3438 *
3439 * @remark This function will dynamically map physical pages in GC. This may unmap
3440 * mappings done by the caller. Be careful!
3441 */
3442VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3443{
3444 PVM pVM = pVCpu->CTX_SUFF(pVM);
3445 Assert(cb <= PAGE_SIZE);
3446 VMCPU_ASSERT_EMT(pVCpu);
3447
3448/** @todo r=bird: This isn't perfect!
3449 * -# It's not checking for reserved bits being 1.
3450 * -# It's not correctly dealing with the access bit.
3451 * -# It's not respecting MMIO memory or any other access handlers.
3452 */
3453 /*
3454 * 1. Translate virtual to physical. This may fault.
3455 * 2. Map the physical address.
3456 * 3. Do the read operation.
3457 * 4. Set access bits if required.
3458 */
3459 int rc;
3460 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3461 if (cb <= cb1)
3462 {
3463 /*
3464 * Not crossing pages.
3465 */
3466 RTGCPHYS GCPhys;
3467 uint64_t fFlags;
3468 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3469 if (RT_SUCCESS(rc))
3470 {
3471 /** @todo we should check reserved bits ... */
3472 PGMPAGEMAPLOCK PgMpLck;
3473 void const *pvSrc;
3474 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3475 switch (rc)
3476 {
3477 case VINF_SUCCESS:
3478 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3479 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3480 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3481 break;
3482 case VERR_PGM_PHYS_PAGE_RESERVED:
3483 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3484 memset(pvDst, 0xff, cb);
3485 break;
3486 default:
3487 Assert(RT_FAILURE_NP(rc));
3488 return rc;
3489 }
3490
3491 /** @todo access bit emulation isn't 100% correct. */
3492 if (!(fFlags & X86_PTE_A))
3493 {
3494 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3495 AssertRC(rc);
3496 }
3497 return VINF_SUCCESS;
3498 }
3499 }
3500 else
3501 {
3502 /*
3503 * Crosses pages.
3504 */
3505 size_t cb2 = cb - cb1;
3506 uint64_t fFlags1;
3507 RTGCPHYS GCPhys1;
3508 uint64_t fFlags2;
3509 RTGCPHYS GCPhys2;
3510 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3511 if (RT_SUCCESS(rc))
3512 {
3513 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3514 if (RT_SUCCESS(rc))
3515 {
3516 /** @todo we should check reserved bits ... */
3517 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3518 PGMPAGEMAPLOCK PgMpLck;
3519 void const *pvSrc1;
3520 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3521 switch (rc)
3522 {
3523 case VINF_SUCCESS:
3524 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3525 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3526 break;
3527 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3528 memset(pvDst, 0xff, cb1);
3529 break;
3530 default:
3531 Assert(RT_FAILURE_NP(rc));
3532 return rc;
3533 }
3534
3535 void const *pvSrc2;
3536 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3537 switch (rc)
3538 {
3539 case VINF_SUCCESS:
3540 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3541 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3542 break;
3543 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3544 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3545 break;
3546 default:
3547 Assert(RT_FAILURE_NP(rc));
3548 return rc;
3549 }
3550
3551 if (!(fFlags1 & X86_PTE_A))
3552 {
3553 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3554 AssertRC(rc);
3555 }
3556 if (!(fFlags2 & X86_PTE_A))
3557 {
3558 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3559 AssertRC(rc);
3560 }
3561 return VINF_SUCCESS;
3562 }
3563 }
3564 }
3565
3566 /*
3567 * Raise a #PF.
3568 */
3569 uint32_t uErr;
3570
3571 /* Get the current privilege level. */
3572 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3573 switch (rc)
3574 {
3575 case VINF_SUCCESS:
3576 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3577 break;
3578
3579 case VERR_PAGE_NOT_PRESENT:
3580 case VERR_PAGE_TABLE_NOT_PRESENT:
3581 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3582 break;
3583
3584 default:
3585 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3586 return rc;
3587 }
3588 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3589 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3590}
3591
3592
3593/**
3594 * Performs a read of guest virtual memory for instruction emulation.
3595 *
3596 * This will check permissions, raise exceptions and update the access bits.
3597 *
3598 * The current implementation will bypass all access handlers. It may later be
3599 * changed to at least respect MMIO.
3600 *
3601 *
3602 * @returns VBox status code suitable to scheduling.
3603 * @retval VINF_SUCCESS if the read was performed successfully.
3604 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3605 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3606 *
3607 * @param pVCpu Handle to the current virtual CPU.
3608 * @param pCtxCore The context core.
3609 * @param pvDst Where to put the bytes we've read.
3610 * @param GCPtrSrc The source address.
3611 * @param cb The number of bytes to read. Not more than a page.
3612 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3613 * an appropriate error status will be returned (no
3614 * informational at all).
3615 *
3616 *
3617 * @remarks Takes the PGM lock.
3618 * @remarks A page fault on the 2nd page of the access will be raised without
3619 * writing the bits on the first page since we're ASSUMING that the
3620 * caller is emulating an instruction access.
3621 * @remarks This function will dynamically map physical pages in GC. This may
3622 * unmap mappings done by the caller. Be careful!
3623 */
3624VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3625 bool fRaiseTrap)
3626{
3627 PVM pVM = pVCpu->CTX_SUFF(pVM);
3628 Assert(cb <= PAGE_SIZE);
3629 VMCPU_ASSERT_EMT(pVCpu);
3630
3631 /*
3632 * 1. Translate virtual to physical. This may fault.
3633 * 2. Map the physical address.
3634 * 3. Do the read operation.
3635 * 4. Set access bits if required.
3636 */
3637 int rc;
3638 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3639 if (cb <= cb1)
3640 {
3641 /*
3642 * Not crossing pages.
3643 */
3644 RTGCPHYS GCPhys;
3645 uint64_t fFlags;
3646 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3647 if (RT_SUCCESS(rc))
3648 {
3649 if (1) /** @todo we should check reserved bits ... */
3650 {
3651 const void *pvSrc;
3652 PGMPAGEMAPLOCK Lock;
3653 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3654 switch (rc)
3655 {
3656 case VINF_SUCCESS:
3657 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3658 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3659 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3660 PGMPhysReleasePageMappingLock(pVM, &Lock);
3661 break;
3662 case VERR_PGM_PHYS_PAGE_RESERVED:
3663 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3664 memset(pvDst, 0xff, cb);
3665 break;
3666 default:
3667 AssertMsgFailed(("%Rrc\n", rc));
3668 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3669 return rc;
3670 }
3671
3672 if (!(fFlags & X86_PTE_A))
3673 {
3674 /** @todo access bit emulation isn't 100% correct. */
3675 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3676 AssertRC(rc);
3677 }
3678 return VINF_SUCCESS;
3679 }
3680 }
3681 }
3682 else
3683 {
3684 /*
3685 * Crosses pages.
3686 */
3687 size_t cb2 = cb - cb1;
3688 uint64_t fFlags1;
3689 RTGCPHYS GCPhys1;
3690 uint64_t fFlags2;
3691 RTGCPHYS GCPhys2;
3692 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3693 if (RT_SUCCESS(rc))
3694 {
3695 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3696 if (RT_SUCCESS(rc))
3697 {
3698 if (1) /** @todo we should check reserved bits ... */
3699 {
3700 const void *pvSrc;
3701 PGMPAGEMAPLOCK Lock;
3702 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3703 switch (rc)
3704 {
3705 case VINF_SUCCESS:
3706 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3707 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3708 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3709 PGMPhysReleasePageMappingLock(pVM, &Lock);
3710 break;
3711 case VERR_PGM_PHYS_PAGE_RESERVED:
3712 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3713 memset(pvDst, 0xff, cb1);
3714 break;
3715 default:
3716 AssertMsgFailed(("%Rrc\n", rc));
3717 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3718 return rc;
3719 }
3720
3721 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3722 switch (rc)
3723 {
3724 case VINF_SUCCESS:
3725 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3726 PGMPhysReleasePageMappingLock(pVM, &Lock);
3727 break;
3728 case VERR_PGM_PHYS_PAGE_RESERVED:
3729 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3730 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3731 break;
3732 default:
3733 AssertMsgFailed(("%Rrc\n", rc));
3734 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3735 return rc;
3736 }
3737
3738 if (!(fFlags1 & X86_PTE_A))
3739 {
3740 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3741 AssertRC(rc);
3742 }
3743 if (!(fFlags2 & X86_PTE_A))
3744 {
3745 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3746 AssertRC(rc);
3747 }
3748 return VINF_SUCCESS;
3749 }
3750 /* sort out which page */
3751 }
3752 else
3753 GCPtrSrc += cb1; /* fault on 2nd page */
3754 }
3755 }
3756
3757 /*
3758 * Raise a #PF if we're allowed to do that.
3759 */
3760 /* Calc the error bits. */
3761 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3762 uint32_t uErr;
3763 switch (rc)
3764 {
3765 case VINF_SUCCESS:
3766 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3767 rc = VERR_ACCESS_DENIED;
3768 break;
3769
3770 case VERR_PAGE_NOT_PRESENT:
3771 case VERR_PAGE_TABLE_NOT_PRESENT:
3772 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3773 break;
3774
3775 default:
3776 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3777 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3778 return rc;
3779 }
3780 if (fRaiseTrap)
3781 {
3782 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3783 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3784 }
3785 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3786 return rc;
3787}
3788
3789
3790/**
3791 * Performs a write to guest virtual memory for instruction emulation.
3792 *
3793 * This will check permissions, raise exceptions and update the dirty and access
3794 * bits.
3795 *
3796 * @returns VBox status code suitable to scheduling.
3797 * @retval VINF_SUCCESS if the read was performed successfully.
3798 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3799 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3800 *
3801 * @param pVCpu Handle to the current virtual CPU.
3802 * @param pCtxCore The context core.
3803 * @param GCPtrDst The destination address.
3804 * @param pvSrc What to write.
3805 * @param cb The number of bytes to write. Not more than a page.
3806 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3807 * an appropriate error status will be returned (no
3808 * informational at all).
3809 *
3810 * @remarks Takes the PGM lock.
3811 * @remarks A page fault on the 2nd page of the access will be raised without
3812 * writing the bits on the first page since we're ASSUMING that the
3813 * caller is emulating an instruction access.
3814 * @remarks This function will dynamically map physical pages in GC. This may
3815 * unmap mappings done by the caller. Be careful!
3816 */
3817VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
3818 size_t cb, bool fRaiseTrap)
3819{
3820 Assert(cb <= PAGE_SIZE);
3821 PVM pVM = pVCpu->CTX_SUFF(pVM);
3822 VMCPU_ASSERT_EMT(pVCpu);
3823
3824 /*
3825 * 1. Translate virtual to physical. This may fault.
3826 * 2. Map the physical address.
3827 * 3. Do the write operation.
3828 * 4. Set access bits if required.
3829 */
3830 /** @todo Since this method is frequently used by EMInterpret or IOM
3831 * upon a write fault to an write access monitored page, we can
3832 * reuse the guest page table walking from the \#PF code. */
3833 int rc;
3834 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3835 if (cb <= cb1)
3836 {
3837 /*
3838 * Not crossing pages.
3839 */
3840 RTGCPHYS GCPhys;
3841 uint64_t fFlags;
3842 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3843 if (RT_SUCCESS(rc))
3844 {
3845 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3846 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3847 && CPUMGetGuestCPL(pVCpu) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3848 {
3849 void *pvDst;
3850 PGMPAGEMAPLOCK Lock;
3851 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3852 switch (rc)
3853 {
3854 case VINF_SUCCESS:
3855 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3856 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3857 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3858 PGMPhysReleasePageMappingLock(pVM, &Lock);
3859 break;
3860 case VERR_PGM_PHYS_PAGE_RESERVED:
3861 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3862 /* bit bucket */
3863 break;
3864 default:
3865 AssertMsgFailed(("%Rrc\n", rc));
3866 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3867 return rc;
3868 }
3869
3870 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3871 {
3872 /** @todo dirty & access bit emulation isn't 100% correct. */
3873 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3874 AssertRC(rc);
3875 }
3876 return VINF_SUCCESS;
3877 }
3878 rc = VERR_ACCESS_DENIED;
3879 }
3880 }
3881 else
3882 {
3883 /*
3884 * Crosses pages.
3885 */
3886 size_t cb2 = cb - cb1;
3887 uint64_t fFlags1;
3888 RTGCPHYS GCPhys1;
3889 uint64_t fFlags2;
3890 RTGCPHYS GCPhys2;
3891 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3892 if (RT_SUCCESS(rc))
3893 {
3894 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3895 if (RT_SUCCESS(rc))
3896 {
3897 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3898 && (fFlags2 & X86_PTE_RW))
3899 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3900 && CPUMGetGuestCPL(pVCpu) <= 2) )
3901 {
3902 void *pvDst;
3903 PGMPAGEMAPLOCK Lock;
3904 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3905 switch (rc)
3906 {
3907 case VINF_SUCCESS:
3908 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3909 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3910 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3911 PGMPhysReleasePageMappingLock(pVM, &Lock);
3912 break;
3913 case VERR_PGM_PHYS_PAGE_RESERVED:
3914 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3915 /* bit bucket */
3916 break;
3917 default:
3918 AssertMsgFailed(("%Rrc\n", rc));
3919 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3920 return rc;
3921 }
3922
3923 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3924 switch (rc)
3925 {
3926 case VINF_SUCCESS:
3927 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3928 PGMPhysReleasePageMappingLock(pVM, &Lock);
3929 break;
3930 case VERR_PGM_PHYS_PAGE_RESERVED:
3931 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3932 /* bit bucket */
3933 break;
3934 default:
3935 AssertMsgFailed(("%Rrc\n", rc));
3936 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3937 return rc;
3938 }
3939
3940 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3941 {
3942 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3943 AssertRC(rc);
3944 }
3945 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3946 {
3947 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3948 AssertRC(rc);
3949 }
3950 return VINF_SUCCESS;
3951 }
3952 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3953 GCPtrDst += cb1; /* fault on the 2nd page. */
3954 rc = VERR_ACCESS_DENIED;
3955 }
3956 else
3957 GCPtrDst += cb1; /* fault on the 2nd page. */
3958 }
3959 }
3960
3961 /*
3962 * Raise a #PF if we're allowed to do that.
3963 */
3964 /* Calc the error bits. */
3965 uint32_t uErr;
3966 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3967 switch (rc)
3968 {
3969 case VINF_SUCCESS:
3970 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3971 rc = VERR_ACCESS_DENIED;
3972 break;
3973
3974 case VERR_ACCESS_DENIED:
3975 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3976 break;
3977
3978 case VERR_PAGE_NOT_PRESENT:
3979 case VERR_PAGE_TABLE_NOT_PRESENT:
3980 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3981 break;
3982
3983 default:
3984 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3985 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3986 return rc;
3987 }
3988 if (fRaiseTrap)
3989 {
3990 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3991 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3992 }
3993 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3994 return rc;
3995}
3996
3997
3998/**
3999 * Return the page type of the specified physical address.
4000 *
4001 * @returns The page type.
4002 * @param pVM Pointer to the VM.
4003 * @param GCPhys Guest physical address
4004 */
4005VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys)
4006{
4007 pgmLock(pVM);
4008 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
4009 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
4010 pgmUnlock(pVM);
4011
4012 return enmPgType;
4013}
4014
4015
4016
4017
4018/**
4019 * Converts a GC physical address to a HC ring-3 pointer, with some
4020 * additional checks.
4021 *
4022 * @returns VBox status code (no informational statuses).
4023 * @retval VINF_SUCCESS on success.
4024 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4025 * access handler of some kind.
4026 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4027 * accesses or is odd in any way.
4028 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4029 *
4030 * @param pVM Pointer to the VM.
4031 * @param GCPhys The GC physical address to convert. Since this is only
4032 * used for filling the REM TLB, the A20 mask must be
4033 * applied before calling this API.
4034 * @param fWritable Whether write access is required.
4035 * @param ppv Where to store the pointer corresponding to GCPhys on
4036 * success.
4037 * @param pLock
4038 *
4039 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
4040 */
4041VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
4042 void **ppv, PPGMPAGEMAPLOCK pLock)
4043{
4044 pgmLock(pVM);
4045 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4046
4047 PPGMRAMRANGE pRam;
4048 PPGMPAGE pPage;
4049 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4050 if (RT_SUCCESS(rc))
4051 {
4052 if (PGM_PAGE_IS_BALLOONED(pPage))
4053 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4054 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4055 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4056 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4057 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4058 rc = VINF_SUCCESS;
4059 else
4060 {
4061 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4062 {
4063 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4064 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4065 }
4066 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4067 {
4068 Assert(!fByPassHandlers);
4069 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4070 }
4071 }
4072 if (RT_SUCCESS(rc))
4073 {
4074 int rc2;
4075
4076 /* Make sure what we return is writable. */
4077 if (fWritable)
4078 switch (PGM_PAGE_GET_STATE(pPage))
4079 {
4080 case PGM_PAGE_STATE_ALLOCATED:
4081 break;
4082 case PGM_PAGE_STATE_BALLOONED:
4083 AssertFailed();
4084 case PGM_PAGE_STATE_ZERO:
4085 case PGM_PAGE_STATE_SHARED:
4086 case PGM_PAGE_STATE_WRITE_MONITORED:
4087 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4088 AssertLogRelRCReturn(rc2, rc2);
4089 break;
4090 }
4091
4092#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
4093 PVMCPU pVCpu = VMMGetCpu(pVM);
4094 void *pv;
4095 rc = pgmRZDynMapHCPageInlined(pVCpu,
4096 PGM_PAGE_GET_HCPHYS(pPage),
4097 &pv
4098 RTLOG_COMMA_SRC_POS);
4099 if (RT_FAILURE(rc))
4100 return rc;
4101 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4102 pLock->pvPage = pv;
4103 pLock->pVCpu = pVCpu;
4104
4105#else
4106 /* Get a ring-3 mapping of the address. */
4107 PPGMPAGER3MAPTLBE pTlbe;
4108 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4109 AssertLogRelRCReturn(rc2, rc2);
4110
4111 /* Lock it and calculate the address. */
4112 if (fWritable)
4113 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
4114 else
4115 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
4116 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4117#endif
4118
4119 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4120 }
4121 else
4122 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4123
4124 /* else: handler catching all access, no pointer returned. */
4125 }
4126 else
4127 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4128
4129 pgmUnlock(pVM);
4130 return rc;
4131}
4132
4133
4134/**
4135 * Checks if the give GCPhys page requires special handling for the given access
4136 * because it's MMIO or otherwise monitored.
4137 *
4138 * @returns VBox status code (no informational statuses).
4139 * @retval VINF_SUCCESS on success.
4140 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4141 * access handler of some kind.
4142 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4143 * accesses or is odd in any way.
4144 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4145 *
4146 * @param pVM Pointer to the VM.
4147 * @param GCPhys The GC physical address to convert. Since this is only
4148 * used for filling the REM TLB, the A20 mask must be
4149 * applied before calling this API.
4150 * @param fWritable Whether write access is required.
4151 *
4152 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
4153 * a stop gap thing that should be removed once there is a better TLB
4154 * for virtual address accesses.
4155 */
4156VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVM pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
4157{
4158 pgmLock(pVM);
4159 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4160
4161 PPGMRAMRANGE pRam;
4162 PPGMPAGE pPage;
4163 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4164 if (RT_SUCCESS(rc))
4165 {
4166 if (PGM_PAGE_IS_BALLOONED(pPage))
4167 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4168 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4169 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4170 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4171 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4172 rc = VINF_SUCCESS;
4173 else
4174 {
4175 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4176 {
4177 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4178 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4179 }
4180 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4181 {
4182 Assert(!fByPassHandlers);
4183 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4184 }
4185 }
4186 }
4187
4188 pgmUnlock(pVM);
4189 return rc;
4190}
4191
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette