VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 47660

Last change on this file since 47660 was 46420, checked in by vboxsync, 12 years ago

VMM, recompiler: Purge deprecated macros.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 145.4 KB
Line 
1/* $Id: PGMAllPhys.cpp 46420 2013-06-06 16:27:25Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM_PHYS
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/trpm.h>
24#include <VBox/vmm/vmm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/em.h>
27#ifdef VBOX_WITH_REM
28# include <VBox/vmm/rem.h>
29#endif
30#include "PGMInternal.h"
31#include <VBox/vmm/vm.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50
51
52#ifndef IN_RING3
53
54/**
55 * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
56 * This simply pushes everything to the HC handler.
57 *
58 * @returns VBox status code (appropriate for trap handling and GC return).
59 * @param pVM Pointer to the VM.
60 * @param uErrorCode CPU Error code.
61 * @param pRegFrame Trap register frame.
62 * @param pvFault The fault address (cr2).
63 * @param GCPhysFault The GC physical address corresponding to pvFault.
64 * @param pvUser User argument.
65 */
66VMMDECL(int) pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
67{
68 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
69 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
70}
71
72
73/**
74 * \#PF Handler callback for Guest ROM range write access.
75 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
76 *
77 * @returns VBox status code (appropriate for trap handling and GC return).
78 * @param pVM Pointer to the VM.
79 * @param uErrorCode CPU Error code.
80 * @param pRegFrame Trap register frame.
81 * @param pvFault The fault address (cr2).
82 * @param GCPhysFault The GC physical address corresponding to pvFault.
83 * @param pvUser User argument. Pointer to the ROM range structure.
84 */
85VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
86{
87 int rc;
88 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
89 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
90 PVMCPU pVCpu = VMMGetCpu(pVM);
91 NOREF(uErrorCode); NOREF(pvFault);
92
93 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
94
95 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
96 switch (pRom->aPages[iPage].enmProt)
97 {
98 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
99 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
100 {
101 /*
102 * If it's a simple instruction which doesn't change the cpu state
103 * we will simply skip it. Otherwise we'll have to defer it to REM.
104 */
105 uint32_t cbOp;
106 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
107 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
108 if ( RT_SUCCESS(rc)
109 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
110 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
111 {
112 switch (pDis->bOpCode)
113 {
114 /** @todo Find other instructions we can safely skip, possibly
115 * adding this kind of detection to DIS or EM. */
116 case OP_MOV:
117 pRegFrame->rip += cbOp;
118 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
119 return VINF_SUCCESS;
120 }
121 }
122 break;
123 }
124
125 case PGMROMPROT_READ_RAM_WRITE_RAM:
126 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
127 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
128 AssertRC(rc);
129 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
130
131 case PGMROMPROT_READ_ROM_WRITE_RAM:
132 /* Handle it in ring-3 because it's *way* easier there. */
133 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
134 break;
135
136 default:
137 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
138 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
139 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
140 }
141
142 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
143 return VINF_EM_RAW_EMULATE_INSTR;
144}
145
146#endif /* IN_RING3 */
147
148/**
149 * Invalidates the RAM range TLBs.
150 *
151 * @param pVM Pointer to the VM.
152 */
153void pgmPhysInvalidRamRangeTlbs(PVM pVM)
154{
155 pgmLock(pVM);
156 for (uint32_t i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
157 {
158 pVM->pgm.s.apRamRangesTlbR3[i] = NIL_RTR3PTR;
159 pVM->pgm.s.apRamRangesTlbR0[i] = NIL_RTR0PTR;
160 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
161 }
162 pgmUnlock(pVM);
163}
164
165
166/**
167 * Tests if a value of type RTGCPHYS is negative if the type had been signed
168 * instead of unsigned.
169 *
170 * @returns @c true if negative, @c false if positive or zero.
171 * @param a_GCPhys The value to test.
172 * @todo Move me to iprt/types.h.
173 */
174#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
175
176
177/**
178 * Slow worker for pgmPhysGetRange.
179 *
180 * @copydoc pgmPhysGetRange
181 */
182PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
183{
184 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
185
186 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
187 while (pRam)
188 {
189 RTGCPHYS off = GCPhys - pRam->GCPhys;
190 if (off < pRam->cb)
191 {
192 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
193 return pRam;
194 }
195 if (RTGCPHYS_IS_NEGATIVE(off))
196 pRam = pRam->CTX_SUFF(pLeft);
197 else
198 pRam = pRam->CTX_SUFF(pRight);
199 }
200 return NULL;
201}
202
203
204/**
205 * Slow worker for pgmPhysGetRangeAtOrAbove.
206 *
207 * @copydoc pgmPhysGetRangeAtOrAbove
208 */
209PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
210{
211 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
212
213 PPGMRAMRANGE pLastLeft = NULL;
214 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
215 while (pRam)
216 {
217 RTGCPHYS off = GCPhys - pRam->GCPhys;
218 if (off < pRam->cb)
219 {
220 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
221 return pRam;
222 }
223 if (RTGCPHYS_IS_NEGATIVE(off))
224 {
225 pLastLeft = pRam;
226 pRam = pRam->CTX_SUFF(pLeft);
227 }
228 else
229 pRam = pRam->CTX_SUFF(pRight);
230 }
231 return pLastLeft;
232}
233
234
235/**
236 * Slow worker for pgmPhysGetPage.
237 *
238 * @copydoc pgmPhysGetPage
239 */
240PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
241{
242 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
243
244 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
245 while (pRam)
246 {
247 RTGCPHYS off = GCPhys - pRam->GCPhys;
248 if (off < pRam->cb)
249 {
250 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
251 return &pRam->aPages[off >> PAGE_SHIFT];
252 }
253
254 if (RTGCPHYS_IS_NEGATIVE(off))
255 pRam = pRam->CTX_SUFF(pLeft);
256 else
257 pRam = pRam->CTX_SUFF(pRight);
258 }
259 return NULL;
260}
261
262
263/**
264 * Slow worker for pgmPhysGetPageEx.
265 *
266 * @copydoc pgmPhysGetPageEx
267 */
268int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
269{
270 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
271
272 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
273 while (pRam)
274 {
275 RTGCPHYS off = GCPhys - pRam->GCPhys;
276 if (off < pRam->cb)
277 {
278 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
279 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
280 return VINF_SUCCESS;
281 }
282
283 if (RTGCPHYS_IS_NEGATIVE(off))
284 pRam = pRam->CTX_SUFF(pLeft);
285 else
286 pRam = pRam->CTX_SUFF(pRight);
287 }
288
289 *ppPage = NULL;
290 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
291}
292
293
294/**
295 * Slow worker for pgmPhysGetPageAndRangeEx.
296 *
297 * @copydoc pgmPhysGetPageAndRangeEx
298 */
299int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
300{
301 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
302
303 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
304 while (pRam)
305 {
306 RTGCPHYS off = GCPhys - pRam->GCPhys;
307 if (off < pRam->cb)
308 {
309 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
310 *ppRam = pRam;
311 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
312 return VINF_SUCCESS;
313 }
314
315 if (RTGCPHYS_IS_NEGATIVE(off))
316 pRam = pRam->CTX_SUFF(pLeft);
317 else
318 pRam = pRam->CTX_SUFF(pRight);
319 }
320
321 *ppRam = NULL;
322 *ppPage = NULL;
323 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
324}
325
326
327/**
328 * Checks if Address Gate 20 is enabled or not.
329 *
330 * @returns true if enabled.
331 * @returns false if disabled.
332 * @param pVCpu Pointer to the VMCPU.
333 */
334VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
335{
336 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
337 return pVCpu->pgm.s.fA20Enabled;
338}
339
340
341/**
342 * Validates a GC physical address.
343 *
344 * @returns true if valid.
345 * @returns false if invalid.
346 * @param pVM Pointer to the VM.
347 * @param GCPhys The physical address to validate.
348 */
349VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
350{
351 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
352 return pPage != NULL;
353}
354
355
356/**
357 * Checks if a GC physical address is a normal page,
358 * i.e. not ROM, MMIO or reserved.
359 *
360 * @returns true if normal.
361 * @returns false if invalid, ROM, MMIO or reserved page.
362 * @param pVM Pointer to the VM.
363 * @param GCPhys The physical address to check.
364 */
365VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
366{
367 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
368 return pPage
369 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
370}
371
372
373/**
374 * Converts a GC physical address to a HC physical address.
375 *
376 * @returns VINF_SUCCESS on success.
377 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
378 * page but has no physical backing.
379 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
380 * GC physical address.
381 *
382 * @param pVM Pointer to the VM.
383 * @param GCPhys The GC physical address to convert.
384 * @param pHCPhys Where to store the HC physical address on success.
385 */
386VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
387{
388 pgmLock(pVM);
389 PPGMPAGE pPage;
390 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
391 if (RT_SUCCESS(rc))
392 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
393 pgmUnlock(pVM);
394 return rc;
395}
396
397
398/**
399 * Invalidates all page mapping TLBs.
400 *
401 * @param pVM Pointer to the VM.
402 */
403void pgmPhysInvalidatePageMapTLB(PVM pVM)
404{
405 pgmLock(pVM);
406 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
407
408 /* Clear the shared R0/R3 TLB completely. */
409 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
410 {
411 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
412 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
413 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
414 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
415 }
416
417 /** @todo clear the RC TLB whenever we add it. */
418
419 pgmUnlock(pVM);
420}
421
422
423/**
424 * Invalidates a page mapping TLB entry
425 *
426 * @param pVM Pointer to the VM.
427 * @param GCPhys GCPhys entry to flush
428 */
429void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
430{
431 PGM_LOCK_ASSERT_OWNER(pVM);
432
433 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
434
435#ifdef IN_RC
436 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
437 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
438 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
439 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
440 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
441#else
442 /* Clear the shared R0/R3 TLB entry. */
443 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
444 pTlbe->GCPhys = NIL_RTGCPHYS;
445 pTlbe->pPage = 0;
446 pTlbe->pMap = 0;
447 pTlbe->pv = 0;
448#endif
449
450 /** @todo clear the RC TLB whenever we add it. */
451}
452
453/**
454 * Makes sure that there is at least one handy page ready for use.
455 *
456 * This will also take the appropriate actions when reaching water-marks.
457 *
458 * @returns VBox status code.
459 * @retval VINF_SUCCESS on success.
460 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
461 *
462 * @param pVM Pointer to the VM.
463 *
464 * @remarks Must be called from within the PGM critical section. It may
465 * nip back to ring-3/0 in some cases.
466 */
467static int pgmPhysEnsureHandyPage(PVM pVM)
468{
469 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
470
471 /*
472 * Do we need to do anything special?
473 */
474#ifdef IN_RING3
475 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
476#else
477 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
478#endif
479 {
480 /*
481 * Allocate pages only if we're out of them, or in ring-3, almost out.
482 */
483#ifdef IN_RING3
484 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
485#else
486 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
487#endif
488 {
489 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
490 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
491#ifdef IN_RING3
492 int rc = PGMR3PhysAllocateHandyPages(pVM);
493#else
494 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
495#endif
496 if (RT_UNLIKELY(rc != VINF_SUCCESS))
497 {
498 if (RT_FAILURE(rc))
499 return rc;
500 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
501 if (!pVM->pgm.s.cHandyPages)
502 {
503 LogRel(("PGM: no more handy pages!\n"));
504 return VERR_EM_NO_MEMORY;
505 }
506 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
507 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
508#ifdef IN_RING3
509# ifdef VBOX_WITH_REM
510 REMR3NotifyFF(pVM);
511# endif
512#else
513 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
514#endif
515 }
516 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
517 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
518 ("%u\n", pVM->pgm.s.cHandyPages),
519 VERR_PGM_HANDY_PAGE_IPE);
520 }
521 else
522 {
523 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
524 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
525#ifndef IN_RING3
526 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
527 {
528 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
529 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
530 }
531#endif
532 }
533 }
534
535 return VINF_SUCCESS;
536}
537
538
539/**
540 * Replace a zero or shared page with new page that we can write to.
541 *
542 * @returns The following VBox status codes.
543 * @retval VINF_SUCCESS on success, pPage is modified.
544 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
545 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
546 *
547 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
548 *
549 * @param pVM Pointer to the VM.
550 * @param pPage The physical page tracking structure. This will
551 * be modified on success.
552 * @param GCPhys The address of the page.
553 *
554 * @remarks Must be called from within the PGM critical section. It may
555 * nip back to ring-3/0 in some cases.
556 *
557 * @remarks This function shouldn't really fail, however if it does
558 * it probably means we've screwed up the size of handy pages and/or
559 * the low-water mark. Or, that some device I/O is causing a lot of
560 * pages to be allocated while while the host is in a low-memory
561 * condition. This latter should be handled elsewhere and in a more
562 * controlled manner, it's on the @bugref{3170} todo list...
563 */
564int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
565{
566 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
567
568 /*
569 * Prereqs.
570 */
571 PGM_LOCK_ASSERT_OWNER(pVM);
572 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
573 Assert(!PGM_PAGE_IS_MMIO(pPage));
574
575# ifdef PGM_WITH_LARGE_PAGES
576 /*
577 * Try allocate a large page if applicable.
578 */
579 if ( PGMIsUsingLargePages(pVM)
580 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
581 {
582 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
583 PPGMPAGE pBasePage;
584
585 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
586 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
587 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
588 {
589 rc = pgmPhysAllocLargePage(pVM, GCPhys);
590 if (rc == VINF_SUCCESS)
591 return rc;
592 }
593 /* Mark the base as type page table, so we don't check over and over again. */
594 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
595
596 /* fall back to 4KB pages. */
597 }
598# endif
599
600 /*
601 * Flush any shadow page table mappings of the page.
602 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
603 */
604 bool fFlushTLBs = false;
605 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
606 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
607
608 /*
609 * Ensure that we've got a page handy, take it and use it.
610 */
611 int rc2 = pgmPhysEnsureHandyPage(pVM);
612 if (RT_FAILURE(rc2))
613 {
614 if (fFlushTLBs)
615 PGM_INVL_ALL_VCPU_TLBS(pVM);
616 Assert(rc2 == VERR_EM_NO_MEMORY);
617 return rc2;
618 }
619 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
620 PGM_LOCK_ASSERT_OWNER(pVM);
621 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
622 Assert(!PGM_PAGE_IS_MMIO(pPage));
623
624 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
625 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
626 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
627 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
628 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
629 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
630
631 /*
632 * There are one or two action to be taken the next time we allocate handy pages:
633 * - Tell the GMM (global memory manager) what the page is being used for.
634 * (Speeds up replacement operations - sharing and defragmenting.)
635 * - If the current backing is shared, it must be freed.
636 */
637 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
638 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
639
640 void const *pvSharedPage = NULL;
641 if (PGM_PAGE_IS_SHARED(pPage))
642 {
643 /* Mark this shared page for freeing/dereferencing. */
644 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
645 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
646
647 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
648 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
649 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
650 pVM->pgm.s.cSharedPages--;
651
652 /* Grab the address of the page so we can make a copy later on. (safe) */
653 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
654 AssertRC(rc);
655 }
656 else
657 {
658 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
659 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
660 pVM->pgm.s.cZeroPages--;
661 }
662
663 /*
664 * Do the PGMPAGE modifications.
665 */
666 pVM->pgm.s.cPrivatePages++;
667 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
668 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
669 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
670 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
671 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
672
673 /* Copy the shared page contents to the replacement page. */
674 if (pvSharedPage)
675 {
676 /* Get the virtual address of the new page. */
677 PGMPAGEMAPLOCK PgMpLck;
678 void *pvNewPage;
679 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
680 if (RT_SUCCESS(rc))
681 {
682 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
683 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
684 }
685 }
686
687 if ( fFlushTLBs
688 && rc != VINF_PGM_GCPHYS_ALIASED)
689 PGM_INVL_ALL_VCPU_TLBS(pVM);
690 return rc;
691}
692
693#ifdef PGM_WITH_LARGE_PAGES
694
695/**
696 * Replace a 2 MB range of zero pages with new pages that we can write to.
697 *
698 * @returns The following VBox status codes.
699 * @retval VINF_SUCCESS on success, pPage is modified.
700 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
701 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
702 *
703 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
704 *
705 * @param pVM Pointer to the VM.
706 * @param GCPhys The address of the page.
707 *
708 * @remarks Must be called from within the PGM critical section. It may
709 * nip back to ring-3/0 in some cases.
710 */
711int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
712{
713 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
714 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
715
716 /*
717 * Prereqs.
718 */
719 PGM_LOCK_ASSERT_OWNER(pVM);
720 Assert(PGMIsUsingLargePages(pVM));
721
722 PPGMPAGE pFirstPage;
723 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
724 if ( RT_SUCCESS(rc)
725 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
726 {
727 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
728
729 /* Don't call this function for already allocated pages. */
730 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
731
732 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
733 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
734 {
735 /* Lazy approach: check all pages in the 2 MB range.
736 * The whole range must be ram and unallocated. */
737 GCPhys = GCPhysBase;
738 unsigned iPage;
739 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
740 {
741 PPGMPAGE pSubPage;
742 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
743 if ( RT_FAILURE(rc)
744 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
745 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
746 {
747 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
748 break;
749 }
750 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
751 GCPhys += PAGE_SIZE;
752 }
753 if (iPage != _2M/PAGE_SIZE)
754 {
755 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
756 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
757 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
758 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
759 }
760
761 /*
762 * Do the allocation.
763 */
764# ifdef IN_RING3
765 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
766# else
767 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
768# endif
769 if (RT_SUCCESS(rc))
770 {
771 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
772 pVM->pgm.s.cLargePages++;
773 return VINF_SUCCESS;
774 }
775
776 /* If we fail once, it most likely means the host's memory is too
777 fragmented; don't bother trying again. */
778 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
779 PGMSetLargePageUsage(pVM, false);
780 return rc;
781 }
782 }
783 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
784}
785
786
787/**
788 * Recheck the entire 2 MB range to see if we can use it again as a large page.
789 *
790 * @returns The following VBox status codes.
791 * @retval VINF_SUCCESS on success, the large page can be used again
792 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
793 *
794 * @param pVM Pointer to the VM.
795 * @param GCPhys The address of the page.
796 * @param pLargePage Page structure of the base page
797 */
798int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
799{
800 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
801
802 GCPhys &= X86_PDE2M_PAE_PG_MASK;
803
804 /* Check the base page. */
805 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
806 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
807 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
808 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
809 {
810 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
811 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
812 }
813
814 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
815 /* Check all remaining pages in the 2 MB range. */
816 unsigned i;
817 GCPhys += PAGE_SIZE;
818 for (i = 1; i < _2M/PAGE_SIZE; i++)
819 {
820 PPGMPAGE pPage;
821 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
822 AssertRCBreak(rc);
823
824 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
825 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
826 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
827 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
828 {
829 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
830 break;
831 }
832
833 GCPhys += PAGE_SIZE;
834 }
835 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
836
837 if (i == _2M/PAGE_SIZE)
838 {
839 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
840 pVM->pgm.s.cLargePagesDisabled--;
841 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
842 return VINF_SUCCESS;
843 }
844
845 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
846}
847
848#endif /* PGM_WITH_LARGE_PAGES */
849
850/**
851 * Deal with a write monitored page.
852 *
853 * @returns VBox strict status code.
854 *
855 * @param pVM Pointer to the VM.
856 * @param pPage The physical page tracking structure.
857 *
858 * @remarks Called from within the PGM critical section.
859 */
860void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
861{
862 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
863 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
864 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
865 Assert(pVM->pgm.s.cMonitoredPages > 0);
866 pVM->pgm.s.cMonitoredPages--;
867 pVM->pgm.s.cWrittenToPages++;
868}
869
870
871/**
872 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
873 *
874 * @returns VBox strict status code.
875 * @retval VINF_SUCCESS on success.
876 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
877 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
878 *
879 * @param pVM Pointer to the VM.
880 * @param pPage The physical page tracking structure.
881 * @param GCPhys The address of the page.
882 *
883 * @remarks Called from within the PGM critical section.
884 */
885int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
886{
887 PGM_LOCK_ASSERT_OWNER(pVM);
888 switch (PGM_PAGE_GET_STATE(pPage))
889 {
890 case PGM_PAGE_STATE_WRITE_MONITORED:
891 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
892 /* fall thru */
893 default: /* to shut up GCC */
894 case PGM_PAGE_STATE_ALLOCATED:
895 return VINF_SUCCESS;
896
897 /*
898 * Zero pages can be dummy pages for MMIO or reserved memory,
899 * so we need to check the flags before joining cause with
900 * shared page replacement.
901 */
902 case PGM_PAGE_STATE_ZERO:
903 if (PGM_PAGE_IS_MMIO(pPage))
904 return VERR_PGM_PHYS_PAGE_RESERVED;
905 /* fall thru */
906 case PGM_PAGE_STATE_SHARED:
907 return pgmPhysAllocPage(pVM, pPage, GCPhys);
908
909 /* Not allowed to write to ballooned pages. */
910 case PGM_PAGE_STATE_BALLOONED:
911 return VERR_PGM_PHYS_PAGE_BALLOONED;
912 }
913}
914
915
916/**
917 * Internal usage: Map the page specified by its GMM ID.
918 *
919 * This is similar to pgmPhysPageMap
920 *
921 * @returns VBox status code.
922 *
923 * @param pVM Pointer to the VM.
924 * @param idPage The Page ID.
925 * @param HCPhys The physical address (for RC).
926 * @param ppv Where to store the mapping address.
927 *
928 * @remarks Called from within the PGM critical section. The mapping is only
929 * valid while you are inside this section.
930 */
931int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
932{
933 /*
934 * Validation.
935 */
936 PGM_LOCK_ASSERT_OWNER(pVM);
937 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
938 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
939 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
940
941#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
942 /*
943 * Map it by HCPhys.
944 */
945 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
946
947#else
948 /*
949 * Find/make Chunk TLB entry for the mapping chunk.
950 */
951 PPGMCHUNKR3MAP pMap;
952 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
953 if (pTlbe->idChunk == idChunk)
954 {
955 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
956 pMap = pTlbe->pChunk;
957 }
958 else
959 {
960 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
961
962 /*
963 * Find the chunk, map it if necessary.
964 */
965 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
966 if (pMap)
967 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
968 else
969 {
970# ifdef IN_RING0
971 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
972 AssertRCReturn(rc, rc);
973 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
974 Assert(pMap);
975# else
976 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
977 if (RT_FAILURE(rc))
978 return rc;
979# endif
980 }
981
982 /*
983 * Enter it into the Chunk TLB.
984 */
985 pTlbe->idChunk = idChunk;
986 pTlbe->pChunk = pMap;
987 }
988
989 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
990 return VINF_SUCCESS;
991#endif
992}
993
994
995/**
996 * Maps a page into the current virtual address space so it can be accessed.
997 *
998 * @returns VBox status code.
999 * @retval VINF_SUCCESS on success.
1000 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1001 *
1002 * @param pVM Pointer to the VM.
1003 * @param pPage The physical page tracking structure.
1004 * @param GCPhys The address of the page.
1005 * @param ppMap Where to store the address of the mapping tracking structure.
1006 * @param ppv Where to store the mapping address of the page. The page
1007 * offset is masked off!
1008 *
1009 * @remarks Called from within the PGM critical section.
1010 */
1011static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1012{
1013 PGM_LOCK_ASSERT_OWNER(pVM);
1014
1015#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1016 /*
1017 * Just some sketchy GC/R0-darwin code.
1018 */
1019 *ppMap = NULL;
1020 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1021 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
1022 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1023 NOREF(GCPhys);
1024 return VINF_SUCCESS;
1025
1026#else /* IN_RING3 || IN_RING0 */
1027
1028
1029 /*
1030 * Special case: ZERO and MMIO2 pages.
1031 */
1032 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1033 if (idChunk == NIL_GMM_CHUNKID)
1034 {
1035 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1036 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
1037 {
1038 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
1039 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1040 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_PGM_PHYS_PAGE_MAP_IPE_2);
1041 *ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys));
1042 }
1043 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1044 {
1045 /** @todo deal with aliased MMIO2 pages somehow...
1046 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
1047 * them, that would also avoid this mess. It would actually be kind of
1048 * elegant... */
1049 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_PGM_MAP_MMIO2_ALIAS_MMIO);
1050 }
1051 else
1052 {
1053 /** @todo handle MMIO2 */
1054 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1055 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
1056 ("pPage=%R[pgmpage]\n", pPage),
1057 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1058 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1059 }
1060 *ppMap = NULL;
1061 return VINF_SUCCESS;
1062 }
1063
1064 /*
1065 * Find/make Chunk TLB entry for the mapping chunk.
1066 */
1067 PPGMCHUNKR3MAP pMap;
1068 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1069 if (pTlbe->idChunk == idChunk)
1070 {
1071 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1072 pMap = pTlbe->pChunk;
1073 AssertPtr(pMap->pv);
1074 }
1075 else
1076 {
1077 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1078
1079 /*
1080 * Find the chunk, map it if necessary.
1081 */
1082 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1083 if (pMap)
1084 {
1085 AssertPtr(pMap->pv);
1086 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1087 }
1088 else
1089 {
1090#ifdef IN_RING0
1091 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1092 AssertRCReturn(rc, rc);
1093 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1094 Assert(pMap);
1095#else
1096 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1097 if (RT_FAILURE(rc))
1098 return rc;
1099#endif
1100 AssertPtr(pMap->pv);
1101 }
1102
1103 /*
1104 * Enter it into the Chunk TLB.
1105 */
1106 pTlbe->idChunk = idChunk;
1107 pTlbe->pChunk = pMap;
1108 }
1109
1110 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1111 *ppMap = pMap;
1112 return VINF_SUCCESS;
1113#endif /* IN_RING3 */
1114}
1115
1116
1117/**
1118 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1119 *
1120 * This is typically used is paths where we cannot use the TLB methods (like ROM
1121 * pages) or where there is no point in using them since we won't get many hits.
1122 *
1123 * @returns VBox strict status code.
1124 * @retval VINF_SUCCESS on success.
1125 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1126 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1127 *
1128 * @param pVM Pointer to the VM.
1129 * @param pPage The physical page tracking structure.
1130 * @param GCPhys The address of the page.
1131 * @param ppv Where to store the mapping address of the page. The page
1132 * offset is masked off!
1133 *
1134 * @remarks Called from within the PGM critical section. The mapping is only
1135 * valid while you are inside section.
1136 */
1137int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1138{
1139 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1140 if (RT_SUCCESS(rc))
1141 {
1142 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1143 PPGMPAGEMAP pMapIgnore;
1144 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1145 if (RT_FAILURE(rc2)) /* preserve rc */
1146 rc = rc2;
1147 }
1148 return rc;
1149}
1150
1151
1152/**
1153 * Maps a page into the current virtual address space so it can be accessed for
1154 * both writing and reading.
1155 *
1156 * This is typically used is paths where we cannot use the TLB methods (like ROM
1157 * pages) or where there is no point in using them since we won't get many hits.
1158 *
1159 * @returns VBox status code.
1160 * @retval VINF_SUCCESS on success.
1161 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1162 *
1163 * @param pVM Pointer to the VM.
1164 * @param pPage The physical page tracking structure. Must be in the
1165 * allocated state.
1166 * @param GCPhys The address of the page.
1167 * @param ppv Where to store the mapping address of the page. The page
1168 * offset is masked off!
1169 *
1170 * @remarks Called from within the PGM critical section. The mapping is only
1171 * valid while you are inside section.
1172 */
1173int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1174{
1175 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1176 PPGMPAGEMAP pMapIgnore;
1177 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1178}
1179
1180
1181/**
1182 * Maps a page into the current virtual address space so it can be accessed for
1183 * reading.
1184 *
1185 * This is typically used is paths where we cannot use the TLB methods (like ROM
1186 * pages) or where there is no point in using them since we won't get many hits.
1187 *
1188 * @returns VBox status code.
1189 * @retval VINF_SUCCESS on success.
1190 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1191 *
1192 * @param pVM Pointer to the VM.
1193 * @param pPage The physical page tracking structure.
1194 * @param GCPhys The address of the page.
1195 * @param ppv Where to store the mapping address of the page. The page
1196 * offset is masked off!
1197 *
1198 * @remarks Called from within the PGM critical section. The mapping is only
1199 * valid while you are inside this section.
1200 */
1201int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1202{
1203 PPGMPAGEMAP pMapIgnore;
1204 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1205}
1206
1207#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1208
1209/**
1210 * Load a guest page into the ring-3 physical TLB.
1211 *
1212 * @returns VBox status code.
1213 * @retval VINF_SUCCESS on success
1214 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1215 * @param pPGM The PGM instance pointer.
1216 * @param GCPhys The guest physical address in question.
1217 */
1218int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys)
1219{
1220 PGM_LOCK_ASSERT_OWNER(pVM);
1221
1222 /*
1223 * Find the ram range and page and hand it over to the with-page function.
1224 * 99.8% of requests are expected to be in the first range.
1225 */
1226 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1227 if (!pPage)
1228 {
1229 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1230 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1231 }
1232
1233 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1234}
1235
1236
1237/**
1238 * Load a guest page into the ring-3 physical TLB.
1239 *
1240 * @returns VBox status code.
1241 * @retval VINF_SUCCESS on success
1242 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1243 *
1244 * @param pVM Pointer to the VM.
1245 * @param pPage Pointer to the PGMPAGE structure corresponding to
1246 * GCPhys.
1247 * @param GCPhys The guest physical address in question.
1248 */
1249int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1250{
1251 PGM_LOCK_ASSERT_OWNER(pVM);
1252 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1253
1254 /*
1255 * Map the page.
1256 * Make a special case for the zero page as it is kind of special.
1257 */
1258 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1259 if ( !PGM_PAGE_IS_ZERO(pPage)
1260 && !PGM_PAGE_IS_BALLOONED(pPage))
1261 {
1262 void *pv;
1263 PPGMPAGEMAP pMap;
1264 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1265 if (RT_FAILURE(rc))
1266 return rc;
1267 pTlbe->pMap = pMap;
1268 pTlbe->pv = pv;
1269 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1270 }
1271 else
1272 {
1273 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1274 pTlbe->pMap = NULL;
1275 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1276 }
1277#ifdef PGM_WITH_PHYS_TLB
1278 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1279 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1280 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1281 else
1282 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1283#else
1284 pTlbe->GCPhys = NIL_RTGCPHYS;
1285#endif
1286 pTlbe->pPage = pPage;
1287 return VINF_SUCCESS;
1288}
1289
1290#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1291
1292/**
1293 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1294 * own the PGM lock and therefore not need to lock the mapped page.
1295 *
1296 * @returns VBox status code.
1297 * @retval VINF_SUCCESS on success.
1298 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1299 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1300 *
1301 * @param pVM Pointer to the VM.
1302 * @param GCPhys The guest physical address of the page that should be mapped.
1303 * @param pPage Pointer to the PGMPAGE structure for the page.
1304 * @param ppv Where to store the address corresponding to GCPhys.
1305 *
1306 * @internal
1307 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1308 */
1309int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1310{
1311 int rc;
1312 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1313 PGM_LOCK_ASSERT_OWNER(pVM);
1314 pVM->pgm.s.cDeprecatedPageLocks++;
1315
1316 /*
1317 * Make sure the page is writable.
1318 */
1319 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1320 {
1321 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1322 if (RT_FAILURE(rc))
1323 return rc;
1324 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1325 }
1326 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1327
1328 /*
1329 * Get the mapping address.
1330 */
1331#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1332 void *pv;
1333 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1334 PGM_PAGE_GET_HCPHYS(pPage),
1335 &pv
1336 RTLOG_COMMA_SRC_POS);
1337 if (RT_FAILURE(rc))
1338 return rc;
1339 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1340#else
1341 PPGMPAGEMAPTLBE pTlbe;
1342 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1343 if (RT_FAILURE(rc))
1344 return rc;
1345 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1346#endif
1347 return VINF_SUCCESS;
1348}
1349
1350#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1351
1352/**
1353 * Locks a page mapping for writing.
1354 *
1355 * @param pVM Pointer to the VM.
1356 * @param pPage The page.
1357 * @param pTlbe The mapping TLB entry for the page.
1358 * @param pLock The lock structure (output).
1359 */
1360DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1361{
1362 PPGMPAGEMAP pMap = pTlbe->pMap;
1363 if (pMap)
1364 pMap->cRefs++;
1365
1366 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1367 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1368 {
1369 if (cLocks == 0)
1370 pVM->pgm.s.cWriteLockedPages++;
1371 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1372 }
1373 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1374 {
1375 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1376 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1377 if (pMap)
1378 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1379 }
1380
1381 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1382 pLock->pvMap = pMap;
1383}
1384
1385/**
1386 * Locks a page mapping for reading.
1387 *
1388 * @param pVM Pointer to the VM.
1389 * @param pPage The page.
1390 * @param pTlbe The mapping TLB entry for the page.
1391 * @param pLock The lock structure (output).
1392 */
1393DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1394{
1395 PPGMPAGEMAP pMap = pTlbe->pMap;
1396 if (pMap)
1397 pMap->cRefs++;
1398
1399 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1400 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1401 {
1402 if (cLocks == 0)
1403 pVM->pgm.s.cReadLockedPages++;
1404 PGM_PAGE_INC_READ_LOCKS(pPage);
1405 }
1406 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1407 {
1408 PGM_PAGE_INC_READ_LOCKS(pPage);
1409 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1410 if (pMap)
1411 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1412 }
1413
1414 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1415 pLock->pvMap = pMap;
1416}
1417
1418#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1419
1420
1421/**
1422 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1423 * own the PGM lock and have access to the page structure.
1424 *
1425 * @returns VBox status code.
1426 * @retval VINF_SUCCESS on success.
1427 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1428 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1429 *
1430 * @param pVM Pointer to the VM.
1431 * @param GCPhys The guest physical address of the page that should be mapped.
1432 * @param pPage Pointer to the PGMPAGE structure for the page.
1433 * @param ppv Where to store the address corresponding to GCPhys.
1434 * @param pLock Where to store the lock information that
1435 * pgmPhysReleaseInternalPageMappingLock needs.
1436 *
1437 * @internal
1438 */
1439int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1440{
1441 int rc;
1442 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1443 PGM_LOCK_ASSERT_OWNER(pVM);
1444
1445 /*
1446 * Make sure the page is writable.
1447 */
1448 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1449 {
1450 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1451 if (RT_FAILURE(rc))
1452 return rc;
1453 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1454 }
1455 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1456
1457 /*
1458 * Do the job.
1459 */
1460#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1461 void *pv;
1462 PVMCPU pVCpu = VMMGetCpu(pVM);
1463 rc = pgmRZDynMapHCPageInlined(pVCpu,
1464 PGM_PAGE_GET_HCPHYS(pPage),
1465 &pv
1466 RTLOG_COMMA_SRC_POS);
1467 if (RT_FAILURE(rc))
1468 return rc;
1469 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1470 pLock->pvPage = pv;
1471 pLock->pVCpu = pVCpu;
1472
1473#else
1474 PPGMPAGEMAPTLBE pTlbe;
1475 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1476 if (RT_FAILURE(rc))
1477 return rc;
1478 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1479 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1480#endif
1481 return VINF_SUCCESS;
1482}
1483
1484
1485/**
1486 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1487 * own the PGM lock and have access to the page structure.
1488 *
1489 * @returns VBox status code.
1490 * @retval VINF_SUCCESS on success.
1491 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1492 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1493 *
1494 * @param pVM Pointer to the VM.
1495 * @param GCPhys The guest physical address of the page that should be mapped.
1496 * @param pPage Pointer to the PGMPAGE structure for the page.
1497 * @param ppv Where to store the address corresponding to GCPhys.
1498 * @param pLock Where to store the lock information that
1499 * pgmPhysReleaseInternalPageMappingLock needs.
1500 *
1501 * @internal
1502 */
1503int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1504{
1505 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1506 PGM_LOCK_ASSERT_OWNER(pVM);
1507 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1508
1509 /*
1510 * Do the job.
1511 */
1512#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1513 void *pv;
1514 PVMCPU pVCpu = VMMGetCpu(pVM);
1515 int rc = pgmRZDynMapHCPageInlined(pVCpu,
1516 PGM_PAGE_GET_HCPHYS(pPage),
1517 &pv
1518 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1519 if (RT_FAILURE(rc))
1520 return rc;
1521 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1522 pLock->pvPage = pv;
1523 pLock->pVCpu = pVCpu;
1524
1525#else
1526 PPGMPAGEMAPTLBE pTlbe;
1527 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1528 if (RT_FAILURE(rc))
1529 return rc;
1530 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1531 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1532#endif
1533 return VINF_SUCCESS;
1534}
1535
1536
1537/**
1538 * Requests the mapping of a guest page into the current context.
1539 *
1540 * This API should only be used for very short term, as it will consume scarse
1541 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1542 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1543 *
1544 * This API will assume your intention is to write to the page, and will
1545 * therefore replace shared and zero pages. If you do not intend to modify
1546 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1547 *
1548 * @returns VBox status code.
1549 * @retval VINF_SUCCESS on success.
1550 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1551 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1552 *
1553 * @param pVM Pointer to the VM.
1554 * @param GCPhys The guest physical address of the page that should be
1555 * mapped.
1556 * @param ppv Where to store the address corresponding to GCPhys.
1557 * @param pLock Where to store the lock information that
1558 * PGMPhysReleasePageMappingLock needs.
1559 *
1560 * @remarks The caller is responsible for dealing with access handlers.
1561 * @todo Add an informational return code for pages with access handlers?
1562 *
1563 * @remark Avoid calling this API from within critical sections (other than
1564 * the PGM one) because of the deadlock risk. External threads may
1565 * need to delegate jobs to the EMTs.
1566 * @remarks Only one page is mapped! Make no assumption about what's after or
1567 * before the returned page!
1568 * @thread Any thread.
1569 */
1570VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1571{
1572 int rc = pgmLock(pVM);
1573 AssertRCReturn(rc, rc);
1574
1575#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1576 /*
1577 * Find the page and make sure it's writable.
1578 */
1579 PPGMPAGE pPage;
1580 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1581 if (RT_SUCCESS(rc))
1582 {
1583 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1584 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1585 if (RT_SUCCESS(rc))
1586 {
1587 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1588
1589 PVMCPU pVCpu = VMMGetCpu(pVM);
1590 void *pv;
1591 rc = pgmRZDynMapHCPageInlined(pVCpu,
1592 PGM_PAGE_GET_HCPHYS(pPage),
1593 &pv
1594 RTLOG_COMMA_SRC_POS);
1595 if (RT_SUCCESS(rc))
1596 {
1597 AssertRCSuccess(rc);
1598
1599 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1600 *ppv = pv;
1601 pLock->pvPage = pv;
1602 pLock->pVCpu = pVCpu;
1603 }
1604 }
1605 }
1606
1607#else /* IN_RING3 || IN_RING0 */
1608 /*
1609 * Query the Physical TLB entry for the page (may fail).
1610 */
1611 PPGMPAGEMAPTLBE pTlbe;
1612 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1613 if (RT_SUCCESS(rc))
1614 {
1615 /*
1616 * If the page is shared, the zero page, or being write monitored
1617 * it must be converted to a page that's writable if possible.
1618 */
1619 PPGMPAGE pPage = pTlbe->pPage;
1620 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1621 {
1622 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1623 if (RT_SUCCESS(rc))
1624 {
1625 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1626 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1627 }
1628 }
1629 if (RT_SUCCESS(rc))
1630 {
1631 /*
1632 * Now, just perform the locking and calculate the return address.
1633 */
1634 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1635 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1636 }
1637 }
1638
1639#endif /* IN_RING3 || IN_RING0 */
1640 pgmUnlock(pVM);
1641 return rc;
1642}
1643
1644
1645/**
1646 * Requests the mapping of a guest page into the current context.
1647 *
1648 * This API should only be used for very short term, as it will consume scarse
1649 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1650 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1651 *
1652 * @returns VBox status code.
1653 * @retval VINF_SUCCESS on success.
1654 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1655 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1656 *
1657 * @param pVM Pointer to the VM.
1658 * @param GCPhys The guest physical address of the page that should be
1659 * mapped.
1660 * @param ppv Where to store the address corresponding to GCPhys.
1661 * @param pLock Where to store the lock information that
1662 * PGMPhysReleasePageMappingLock needs.
1663 *
1664 * @remarks The caller is responsible for dealing with access handlers.
1665 * @todo Add an informational return code for pages with access handlers?
1666 *
1667 * @remarks Avoid calling this API from within critical sections (other than
1668 * the PGM one) because of the deadlock risk.
1669 * @remarks Only one page is mapped! Make no assumption about what's after or
1670 * before the returned page!
1671 * @thread Any thread.
1672 */
1673VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1674{
1675 int rc = pgmLock(pVM);
1676 AssertRCReturn(rc, rc);
1677
1678#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1679 /*
1680 * Find the page and make sure it's readable.
1681 */
1682 PPGMPAGE pPage;
1683 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1684 if (RT_SUCCESS(rc))
1685 {
1686 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1687 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1688 else
1689 {
1690 PVMCPU pVCpu = VMMGetCpu(pVM);
1691 void *pv;
1692 rc = pgmRZDynMapHCPageInlined(pVCpu,
1693 PGM_PAGE_GET_HCPHYS(pPage),
1694 &pv
1695 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1696 if (RT_SUCCESS(rc))
1697 {
1698 AssertRCSuccess(rc);
1699
1700 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1701 *ppv = pv;
1702 pLock->pvPage = pv;
1703 pLock->pVCpu = pVCpu;
1704 }
1705 }
1706 }
1707
1708#else /* IN_RING3 || IN_RING0 */
1709 /*
1710 * Query the Physical TLB entry for the page (may fail).
1711 */
1712 PPGMPAGEMAPTLBE pTlbe;
1713 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1714 if (RT_SUCCESS(rc))
1715 {
1716 /* MMIO pages doesn't have any readable backing. */
1717 PPGMPAGE pPage = pTlbe->pPage;
1718 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1719 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1720 else
1721 {
1722 /*
1723 * Now, just perform the locking and calculate the return address.
1724 */
1725 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1726 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1727 }
1728 }
1729
1730#endif /* IN_RING3 || IN_RING0 */
1731 pgmUnlock(pVM);
1732 return rc;
1733}
1734
1735
1736/**
1737 * Requests the mapping of a guest page given by virtual address into the current context.
1738 *
1739 * This API should only be used for very short term, as it will consume
1740 * scarse resources (R0 and GC) in the mapping cache. When you're done
1741 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1742 *
1743 * This API will assume your intention is to write to the page, and will
1744 * therefore replace shared and zero pages. If you do not intend to modify
1745 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1746 *
1747 * @returns VBox status code.
1748 * @retval VINF_SUCCESS on success.
1749 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1750 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1751 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1752 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1753 *
1754 * @param pVCpu Pointer to the VMCPU.
1755 * @param GCPhys The guest physical address of the page that should be mapped.
1756 * @param ppv Where to store the address corresponding to GCPhys.
1757 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1758 *
1759 * @remark Avoid calling this API from within critical sections (other than
1760 * the PGM one) because of the deadlock risk.
1761 * @thread EMT
1762 */
1763VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1764{
1765 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1766 RTGCPHYS GCPhys;
1767 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1768 if (RT_SUCCESS(rc))
1769 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1770 return rc;
1771}
1772
1773
1774/**
1775 * Requests the mapping of a guest page given by virtual address into the current context.
1776 *
1777 * This API should only be used for very short term, as it will consume
1778 * scarse resources (R0 and GC) in the mapping cache. When you're done
1779 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1780 *
1781 * @returns VBox status code.
1782 * @retval VINF_SUCCESS on success.
1783 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1784 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1785 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1786 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1787 *
1788 * @param pVCpu Pointer to the VMCPU.
1789 * @param GCPhys The guest physical address of the page that should be mapped.
1790 * @param ppv Where to store the address corresponding to GCPhys.
1791 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1792 *
1793 * @remark Avoid calling this API from within critical sections (other than
1794 * the PGM one) because of the deadlock risk.
1795 * @thread EMT
1796 */
1797VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1798{
1799 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1800 RTGCPHYS GCPhys;
1801 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1802 if (RT_SUCCESS(rc))
1803 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1804 return rc;
1805}
1806
1807
1808/**
1809 * Release the mapping of a guest page.
1810 *
1811 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1812 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1813 *
1814 * @param pVM Pointer to the VM.
1815 * @param pLock The lock structure initialized by the mapping function.
1816 */
1817VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1818{
1819#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1820 Assert(pLock->pvPage != NULL);
1821 Assert(pLock->pVCpu == VMMGetCpu(pVM));
1822 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
1823 pLock->pVCpu = NULL;
1824 pLock->pvPage = NULL;
1825
1826#else
1827 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1828 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1829 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1830
1831 pLock->uPageAndType = 0;
1832 pLock->pvMap = NULL;
1833
1834 pgmLock(pVM);
1835 if (fWriteLock)
1836 {
1837 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1838 Assert(cLocks > 0);
1839 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1840 {
1841 if (cLocks == 1)
1842 {
1843 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1844 pVM->pgm.s.cWriteLockedPages--;
1845 }
1846 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1847 }
1848
1849 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1850 {
1851 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1852 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1853 Assert(pVM->pgm.s.cMonitoredPages > 0);
1854 pVM->pgm.s.cMonitoredPages--;
1855 pVM->pgm.s.cWrittenToPages++;
1856 }
1857 }
1858 else
1859 {
1860 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1861 Assert(cLocks > 0);
1862 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1863 {
1864 if (cLocks == 1)
1865 {
1866 Assert(pVM->pgm.s.cReadLockedPages > 0);
1867 pVM->pgm.s.cReadLockedPages--;
1868 }
1869 PGM_PAGE_DEC_READ_LOCKS(pPage);
1870 }
1871 }
1872
1873 if (pMap)
1874 {
1875 Assert(pMap->cRefs >= 1);
1876 pMap->cRefs--;
1877 }
1878 pgmUnlock(pVM);
1879#endif /* IN_RING3 */
1880}
1881
1882
1883/**
1884 * Release the internal mapping of a guest page.
1885 *
1886 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
1887 * pgmPhysGCPhys2CCPtrInternalReadOnly.
1888 *
1889 * @param pVM Pointer to the VM.
1890 * @param pLock The lock structure initialized by the mapping function.
1891 *
1892 * @remarks Caller must hold the PGM lock.
1893 */
1894void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1895{
1896 PGM_LOCK_ASSERT_OWNER(pVM);
1897 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
1898}
1899
1900
1901/**
1902 * Converts a GC physical address to a HC ring-3 pointer.
1903 *
1904 * @returns VINF_SUCCESS on success.
1905 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1906 * page but has no physical backing.
1907 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1908 * GC physical address.
1909 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1910 * a dynamic ram chunk boundary
1911 *
1912 * @param pVM Pointer to the VM.
1913 * @param GCPhys The GC physical address to convert.
1914 * @param pR3Ptr Where to store the R3 pointer on success.
1915 *
1916 * @deprecated Avoid when possible!
1917 */
1918int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1919{
1920/** @todo this is kind of hacky and needs some more work. */
1921#ifndef DEBUG_sandervl
1922 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1923#endif
1924
1925 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
1926#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1927 NOREF(pVM); NOREF(pR3Ptr);
1928 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1929#else
1930 pgmLock(pVM);
1931
1932 PPGMRAMRANGE pRam;
1933 PPGMPAGE pPage;
1934 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1935 if (RT_SUCCESS(rc))
1936 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1937
1938 pgmUnlock(pVM);
1939 Assert(rc <= VINF_SUCCESS);
1940 return rc;
1941#endif
1942}
1943
1944#if 0 /*defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)*/
1945
1946/**
1947 * Maps and locks a guest CR3 or PD (PAE) page.
1948 *
1949 * @returns VINF_SUCCESS on success.
1950 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1951 * page but has no physical backing.
1952 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1953 * GC physical address.
1954 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1955 * a dynamic ram chunk boundary
1956 *
1957 * @param pVM Pointer to the VM.
1958 * @param GCPhys The GC physical address to convert.
1959 * @param pR3Ptr Where to store the R3 pointer on success. This may or
1960 * may not be valid in ring-0 depending on the
1961 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option.
1962 *
1963 * @remarks The caller must own the PGM lock.
1964 */
1965int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1966{
1967
1968 PPGMRAMRANGE pRam;
1969 PPGMPAGE pPage;
1970 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1971 if (RT_SUCCESS(rc))
1972 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1973 Assert(rc <= VINF_SUCCESS);
1974 return rc;
1975}
1976
1977
1978int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1979{
1980
1981}
1982
1983#endif
1984
1985/**
1986 * Converts a guest pointer to a GC physical address.
1987 *
1988 * This uses the current CR3/CR0/CR4 of the guest.
1989 *
1990 * @returns VBox status code.
1991 * @param pVCpu Pointer to the VMCPU.
1992 * @param GCPtr The guest pointer to convert.
1993 * @param pGCPhys Where to store the GC physical address.
1994 */
1995VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1996{
1997 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1998 if (pGCPhys && RT_SUCCESS(rc))
1999 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
2000 return rc;
2001}
2002
2003
2004/**
2005 * Converts a guest pointer to a HC physical address.
2006 *
2007 * This uses the current CR3/CR0/CR4 of the guest.
2008 *
2009 * @returns VBox status code.
2010 * @param pVCpu Pointer to the VMCPU.
2011 * @param GCPtr The guest pointer to convert.
2012 * @param pHCPhys Where to store the HC physical address.
2013 */
2014VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2015{
2016 PVM pVM = pVCpu->CTX_SUFF(pVM);
2017 RTGCPHYS GCPhys;
2018 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2019 if (RT_SUCCESS(rc))
2020 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2021 return rc;
2022}
2023
2024
2025
2026#undef LOG_GROUP
2027#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2028
2029
2030#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2031/**
2032 * Cache PGMPhys memory access
2033 *
2034 * @param pVM Pointer to the VM.
2035 * @param pCache Cache structure pointer
2036 * @param GCPhys GC physical address
2037 * @param pbHC HC pointer corresponding to physical page
2038 *
2039 * @thread EMT.
2040 */
2041static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2042{
2043 uint32_t iCacheIndex;
2044
2045 Assert(VM_IS_EMT(pVM));
2046
2047 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2048 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2049
2050 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2051
2052 ASMBitSet(&pCache->aEntries, iCacheIndex);
2053
2054 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2055 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2056}
2057#endif /* IN_RING3 */
2058
2059
2060/**
2061 * Deals with reading from a page with one or more ALL access handlers.
2062 *
2063 * @returns VBox status code. Can be ignored in ring-3.
2064 * @retval VINF_SUCCESS.
2065 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2066 *
2067 * @param pVM Pointer to the VM.
2068 * @param pPage The page descriptor.
2069 * @param GCPhys The physical address to start reading at.
2070 * @param pvBuf Where to put the bits we read.
2071 * @param cb How much to read - less or equal to a page.
2072 */
2073static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
2074{
2075 /*
2076 * The most frequent access here is MMIO and shadowed ROM.
2077 * The current code ASSUMES all these access handlers covers full pages!
2078 */
2079
2080 /*
2081 * Whatever we do we need the source page, map it first.
2082 */
2083 PGMPAGEMAPLOCK PgMpLck;
2084 const void *pvSrc = NULL;
2085 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2086 if (RT_FAILURE(rc))
2087 {
2088 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2089 GCPhys, pPage, rc));
2090 memset(pvBuf, 0xff, cb);
2091 return VINF_SUCCESS;
2092 }
2093 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2094
2095 /*
2096 * Deal with any physical handlers.
2097 */
2098#ifdef IN_RING3
2099 PPGMPHYSHANDLER pPhys = NULL;
2100#endif
2101 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
2102 {
2103#ifdef IN_RING3
2104 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2105 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2106 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2107 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2108 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2109 Assert(pPhys->CTX_SUFF(pfnHandler));
2110
2111 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2112 void *pvUser = pPhys->CTX_SUFF(pvUser);
2113
2114 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2115 STAM_PROFILE_START(&pPhys->Stat, h);
2116 PGM_LOCK_ASSERT_OWNER(pVM);
2117 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2118 pgmUnlock(pVM);
2119 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
2120 pgmLock(pVM);
2121# ifdef VBOX_WITH_STATISTICS
2122 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2123 if (pPhys)
2124 STAM_PROFILE_STOP(&pPhys->Stat, h);
2125# else
2126 pPhys = NULL; /* might not be valid anymore. */
2127# endif
2128 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
2129#else
2130 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2131 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2132 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2133 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2134#endif
2135 }
2136
2137 /*
2138 * Deal with any virtual handlers.
2139 */
2140 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
2141 {
2142 unsigned iPage;
2143 PPGMVIRTHANDLER pVirt;
2144
2145 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
2146 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
2147 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
2148 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2149 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
2150
2151#ifdef IN_RING3
2152 if (pVirt->pfnHandlerR3)
2153 {
2154 if (!pPhys)
2155 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2156 else
2157 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
2158 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2159 + (iPage << PAGE_SHIFT)
2160 + (GCPhys & PAGE_OFFSET_MASK);
2161
2162 STAM_PROFILE_START(&pVirt->Stat, h);
2163 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
2164 STAM_PROFILE_STOP(&pVirt->Stat, h);
2165 if (rc2 == VINF_SUCCESS)
2166 rc = VINF_SUCCESS;
2167 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
2168 }
2169 else
2170 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2171#else
2172 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2173 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2174 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2175 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2176#endif
2177 }
2178
2179 /*
2180 * Take the default action.
2181 */
2182 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2183 memcpy(pvBuf, pvSrc, cb);
2184 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2185 return rc;
2186}
2187
2188
2189/**
2190 * Read physical memory.
2191 *
2192 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2193 * want to ignore those.
2194 *
2195 * @returns VBox status code. Can be ignored in ring-3.
2196 * @retval VINF_SUCCESS.
2197 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2198 *
2199 * @param pVM Pointer to the VM.
2200 * @param GCPhys Physical address start reading from.
2201 * @param pvBuf Where to put the read bits.
2202 * @param cbRead How many bytes to read.
2203 */
2204VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
2205{
2206 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2207 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2208
2209 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2210 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2211
2212 pgmLock(pVM);
2213
2214 /*
2215 * Copy loop on ram ranges.
2216 */
2217 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2218 for (;;)
2219 {
2220 /* Inside range or not? */
2221 if (pRam && GCPhys >= pRam->GCPhys)
2222 {
2223 /*
2224 * Must work our way thru this page by page.
2225 */
2226 RTGCPHYS off = GCPhys - pRam->GCPhys;
2227 while (off < pRam->cb)
2228 {
2229 unsigned iPage = off >> PAGE_SHIFT;
2230 PPGMPAGE pPage = &pRam->aPages[iPage];
2231 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2232 if (cb > cbRead)
2233 cb = cbRead;
2234
2235 /*
2236 * Any ALL access handlers?
2237 */
2238 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
2239 {
2240 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2241 if (RT_FAILURE(rc))
2242 {
2243 pgmUnlock(pVM);
2244 return rc;
2245 }
2246 }
2247 else
2248 {
2249 /*
2250 * Get the pointer to the page.
2251 */
2252 PGMPAGEMAPLOCK PgMpLck;
2253 const void *pvSrc;
2254 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2255 if (RT_SUCCESS(rc))
2256 {
2257 memcpy(pvBuf, pvSrc, cb);
2258 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2259 }
2260 else
2261 {
2262 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2263 pRam->GCPhys + off, pPage, rc));
2264 memset(pvBuf, 0xff, cb);
2265 }
2266 }
2267
2268 /* next page */
2269 if (cb >= cbRead)
2270 {
2271 pgmUnlock(pVM);
2272 return VINF_SUCCESS;
2273 }
2274 cbRead -= cb;
2275 off += cb;
2276 pvBuf = (char *)pvBuf + cb;
2277 } /* walk pages in ram range. */
2278
2279 GCPhys = pRam->GCPhysLast + 1;
2280 }
2281 else
2282 {
2283 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2284
2285 /*
2286 * Unassigned address space.
2287 */
2288 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2289 if (cb >= cbRead)
2290 {
2291 memset(pvBuf, 0xff, cbRead);
2292 break;
2293 }
2294 memset(pvBuf, 0xff, cb);
2295
2296 cbRead -= cb;
2297 pvBuf = (char *)pvBuf + cb;
2298 GCPhys += cb;
2299 }
2300
2301 /* Advance range if necessary. */
2302 while (pRam && GCPhys > pRam->GCPhysLast)
2303 pRam = pRam->CTX_SUFF(pNext);
2304 } /* Ram range walk */
2305
2306 pgmUnlock(pVM);
2307 return VINF_SUCCESS;
2308}
2309
2310
2311/**
2312 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2313 *
2314 * @returns VBox status code. Can be ignored in ring-3.
2315 * @retval VINF_SUCCESS.
2316 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2317 *
2318 * @param pVM Pointer to the VM.
2319 * @param pPage The page descriptor.
2320 * @param GCPhys The physical address to start writing at.
2321 * @param pvBuf What to write.
2322 * @param cbWrite How much to write - less or equal to a page.
2323 */
2324static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
2325{
2326 PGMPAGEMAPLOCK PgMpLck;
2327 void *pvDst = NULL;
2328 int rc;
2329
2330 /*
2331 * Give priority to physical handlers (like #PF does).
2332 *
2333 * Hope for a lonely physical handler first that covers the whole
2334 * write area. This should be a pretty frequent case with MMIO and
2335 * the heavy usage of full page handlers in the page pool.
2336 */
2337 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2338 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
2339 {
2340 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2341 if (pCur)
2342 {
2343 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2344 Assert(pCur->CTX_SUFF(pfnHandler));
2345
2346 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2347 if (cbRange > cbWrite)
2348 cbRange = cbWrite;
2349
2350#ifndef IN_RING3
2351 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2352 NOREF(cbRange);
2353 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2354 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2355
2356#else /* IN_RING3 */
2357 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2358 if (!PGM_PAGE_IS_MMIO(pPage))
2359 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2360 else
2361 rc = VINF_SUCCESS;
2362 if (RT_SUCCESS(rc))
2363 {
2364 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
2365 void *pvUser = pCur->CTX_SUFF(pvUser);
2366
2367 STAM_PROFILE_START(&pCur->Stat, h);
2368 PGM_LOCK_ASSERT_OWNER(pVM);
2369 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2370 pgmUnlock(pVM);
2371 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2372 pgmLock(pVM);
2373# ifdef VBOX_WITH_STATISTICS
2374 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2375 if (pCur)
2376 STAM_PROFILE_STOP(&pCur->Stat, h);
2377# else
2378 pCur = NULL; /* might not be valid anymore. */
2379# endif
2380 if (rc == VINF_PGM_HANDLER_DO_DEFAULT && pvDst)
2381 {
2382 if (pvDst)
2383 memcpy(pvDst, pvBuf, cbRange);
2384 }
2385 else
2386 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
2387 }
2388 else
2389 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2390 GCPhys, pPage, rc), rc);
2391 if (RT_LIKELY(cbRange == cbWrite))
2392 {
2393 if (pvDst)
2394 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2395 return VINF_SUCCESS;
2396 }
2397
2398 /* more fun to be had below */
2399 cbWrite -= cbRange;
2400 GCPhys += cbRange;
2401 pvBuf = (uint8_t *)pvBuf + cbRange;
2402 pvDst = (uint8_t *)pvDst + cbRange;
2403#endif /* IN_RING3 */
2404 }
2405 /* else: the handler is somewhere else in the page, deal with it below. */
2406 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2407 }
2408 /*
2409 * A virtual handler without any interfering physical handlers.
2410 * Hopefully it'll cover the whole write.
2411 */
2412 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2413 {
2414 unsigned iPage;
2415 PPGMVIRTHANDLER pCur;
2416 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2417 if (RT_SUCCESS(rc))
2418 {
2419 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2420 if (cbRange > cbWrite)
2421 cbRange = cbWrite;
2422
2423#ifndef IN_RING3
2424 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2425 NOREF(cbRange);
2426 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2427 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2428
2429#else /* IN_RING3 */
2430
2431 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2432 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2433 if (RT_SUCCESS(rc))
2434 {
2435 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2436 if (pCur->pfnHandlerR3)
2437 {
2438 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2439 + (iPage << PAGE_SHIFT)
2440 + (GCPhys & PAGE_OFFSET_MASK);
2441
2442 STAM_PROFILE_START(&pCur->Stat, h);
2443 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2444 STAM_PROFILE_STOP(&pCur->Stat, h);
2445 }
2446 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2447 memcpy(pvDst, pvBuf, cbRange);
2448 else
2449 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2450 }
2451 else
2452 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2453 GCPhys, pPage, rc), rc);
2454 if (RT_LIKELY(cbRange == cbWrite))
2455 {
2456 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2457 return VINF_SUCCESS;
2458 }
2459
2460 /* more fun to be had below */
2461 cbWrite -= cbRange;
2462 GCPhys += cbRange;
2463 pvBuf = (uint8_t *)pvBuf + cbRange;
2464 pvDst = (uint8_t *)pvDst + cbRange;
2465#endif
2466 }
2467 /* else: the handler is somewhere else in the page, deal with it below. */
2468 }
2469
2470 /*
2471 * Deal with all the odd ends.
2472 */
2473
2474 /* We need a writable destination page. */
2475 if (!pvDst)
2476 {
2477 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2478 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2479 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2480 GCPhys, pPage, rc), rc);
2481 }
2482
2483 /* The loop state (big + ugly). */
2484 unsigned iVirtPage = 0;
2485 PPGMVIRTHANDLER pVirt = NULL;
2486 uint32_t offVirt = PAGE_SIZE;
2487 uint32_t offVirtLast = PAGE_SIZE;
2488 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2489
2490 PPGMPHYSHANDLER pPhys = NULL;
2491 uint32_t offPhys = PAGE_SIZE;
2492 uint32_t offPhysLast = PAGE_SIZE;
2493 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2494
2495 /* The loop. */
2496 for (;;)
2497 {
2498 /*
2499 * Find the closest handler at or above GCPhys.
2500 */
2501 if (fMoreVirt && !pVirt)
2502 {
2503 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2504 if (RT_SUCCESS(rc))
2505 {
2506 offVirt = 0;
2507 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2508 }
2509 else
2510 {
2511 PPGMPHYS2VIRTHANDLER pVirtPhys;
2512 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2513 GCPhys, true /* fAbove */);
2514 if ( pVirtPhys
2515 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2516 {
2517 /* ASSUME that pVirtPhys only covers one page. */
2518 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2519 Assert(pVirtPhys->Core.Key > GCPhys);
2520
2521 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2522 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2523 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2524 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2525 }
2526 else
2527 {
2528 pVirt = NULL;
2529 fMoreVirt = false;
2530 offVirt = offVirtLast = PAGE_SIZE;
2531 }
2532 }
2533 }
2534
2535 if (fMorePhys && !pPhys)
2536 {
2537 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2538 if (pPhys)
2539 {
2540 offPhys = 0;
2541 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2542 }
2543 else
2544 {
2545 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2546 GCPhys, true /* fAbove */);
2547 if ( pPhys
2548 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2549 {
2550 offPhys = pPhys->Core.Key - GCPhys;
2551 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2552 }
2553 else
2554 {
2555 pPhys = NULL;
2556 fMorePhys = false;
2557 offPhys = offPhysLast = PAGE_SIZE;
2558 }
2559 }
2560 }
2561
2562 /*
2563 * Handle access to space without handlers (that's easy).
2564 */
2565 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2566 uint32_t cbRange = (uint32_t)cbWrite;
2567 if (offPhys && offVirt)
2568 {
2569 if (cbRange > offPhys)
2570 cbRange = offPhys;
2571 if (cbRange > offVirt)
2572 cbRange = offVirt;
2573 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2574 }
2575 /*
2576 * Physical handler.
2577 */
2578 else if (!offPhys && offVirt)
2579 {
2580 if (cbRange > offPhysLast + 1)
2581 cbRange = offPhysLast + 1;
2582 if (cbRange > offVirt)
2583 cbRange = offVirt;
2584#ifdef IN_RING3
2585 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2586 void *pvUser = pPhys->CTX_SUFF(pvUser);
2587
2588 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2589 STAM_PROFILE_START(&pPhys->Stat, h);
2590 PGM_LOCK_ASSERT_OWNER(pVM);
2591 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2592 pgmUnlock(pVM);
2593 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2594 pgmLock(pVM);
2595# ifdef VBOX_WITH_STATISTICS
2596 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2597 if (pPhys)
2598 STAM_PROFILE_STOP(&pPhys->Stat, h);
2599# else
2600 pPhys = NULL; /* might not be valid anymore. */
2601# endif
2602 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2603#else
2604 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2605 NOREF(cbRange);
2606 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2607 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2608 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2609#endif
2610 }
2611 /*
2612 * Virtual handler.
2613 */
2614 else if (offPhys && !offVirt)
2615 {
2616 if (cbRange > offVirtLast + 1)
2617 cbRange = offVirtLast + 1;
2618 if (cbRange > offPhys)
2619 cbRange = offPhys;
2620#ifdef IN_RING3
2621 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2622 if (pVirt->pfnHandlerR3)
2623 {
2624 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2625 + (iVirtPage << PAGE_SHIFT)
2626 + (GCPhys & PAGE_OFFSET_MASK);
2627 STAM_PROFILE_START(&pVirt->Stat, h);
2628 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2629 STAM_PROFILE_STOP(&pVirt->Stat, h);
2630 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2631 }
2632 pVirt = NULL;
2633#else
2634 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2635 NOREF(cbRange);
2636 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2637 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2638 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2639#endif
2640 }
2641 /*
2642 * Both... give the physical one priority.
2643 */
2644 else
2645 {
2646 Assert(!offPhys && !offVirt);
2647 if (cbRange > offVirtLast + 1)
2648 cbRange = offVirtLast + 1;
2649 if (cbRange > offPhysLast + 1)
2650 cbRange = offPhysLast + 1;
2651
2652#ifdef IN_RING3
2653 if (pVirt->pfnHandlerR3)
2654 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2655 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2656
2657 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2658 void *pvUser = pPhys->CTX_SUFF(pvUser);
2659
2660 STAM_PROFILE_START(&pPhys->Stat, h);
2661 PGM_LOCK_ASSERT_OWNER(pVM);
2662 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2663 pgmUnlock(pVM);
2664 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2665 pgmLock(pVM);
2666# ifdef VBOX_WITH_STATISTICS
2667 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2668 if (pPhys)
2669 STAM_PROFILE_STOP(&pPhys->Stat, h);
2670# else
2671 pPhys = NULL; /* might not be valid anymore. */
2672# endif
2673 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2674 if (pVirt->pfnHandlerR3)
2675 {
2676
2677 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2678 + (iVirtPage << PAGE_SHIFT)
2679 + (GCPhys & PAGE_OFFSET_MASK);
2680 STAM_PROFILE_START(&pVirt->Stat, h2);
2681 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2682 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2683 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2684 rc = VINF_SUCCESS;
2685 else
2686 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2687 }
2688 pPhys = NULL;
2689 pVirt = NULL;
2690#else
2691 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2692 NOREF(cbRange);
2693 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2694 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2695 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2696#endif
2697 }
2698 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2699 memcpy(pvDst, pvBuf, cbRange);
2700
2701 /*
2702 * Advance if we've got more stuff to do.
2703 */
2704 if (cbRange >= cbWrite)
2705 {
2706 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2707 return VINF_SUCCESS;
2708 }
2709
2710 cbWrite -= cbRange;
2711 GCPhys += cbRange;
2712 pvBuf = (uint8_t *)pvBuf + cbRange;
2713 pvDst = (uint8_t *)pvDst + cbRange;
2714
2715 offPhys -= cbRange;
2716 offPhysLast -= cbRange;
2717 offVirt -= cbRange;
2718 offVirtLast -= cbRange;
2719 }
2720}
2721
2722
2723/**
2724 * Write to physical memory.
2725 *
2726 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2727 * want to ignore those.
2728 *
2729 * @returns VBox status code. Can be ignored in ring-3.
2730 * @retval VINF_SUCCESS.
2731 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2732 *
2733 * @param pVM Pointer to the VM.
2734 * @param GCPhys Physical address to write to.
2735 * @param pvBuf What to write.
2736 * @param cbWrite How many bytes to write.
2737 */
2738VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2739{
2740 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2741 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2742 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2743
2744 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2745 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2746
2747 pgmLock(pVM);
2748
2749 /*
2750 * Copy loop on ram ranges.
2751 */
2752 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2753 for (;;)
2754 {
2755 /* Inside range or not? */
2756 if (pRam && GCPhys >= pRam->GCPhys)
2757 {
2758 /*
2759 * Must work our way thru this page by page.
2760 */
2761 RTGCPTR off = GCPhys - pRam->GCPhys;
2762 while (off < pRam->cb)
2763 {
2764 RTGCPTR iPage = off >> PAGE_SHIFT;
2765 PPGMPAGE pPage = &pRam->aPages[iPage];
2766 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2767 if (cb > cbWrite)
2768 cb = cbWrite;
2769
2770 /*
2771 * Any active WRITE or ALL access handlers?
2772 */
2773 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2774 {
2775 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2776 if (RT_FAILURE(rc))
2777 {
2778 pgmUnlock(pVM);
2779 return rc;
2780 }
2781 }
2782 else
2783 {
2784 /*
2785 * Get the pointer to the page.
2786 */
2787 PGMPAGEMAPLOCK PgMpLck;
2788 void *pvDst;
2789 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2790 if (RT_SUCCESS(rc))
2791 {
2792 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2793 memcpy(pvDst, pvBuf, cb);
2794 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2795 }
2796 /* Ignore writes to ballooned pages. */
2797 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2798 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2799 pRam->GCPhys + off, pPage, rc));
2800 }
2801
2802 /* next page */
2803 if (cb >= cbWrite)
2804 {
2805 pgmUnlock(pVM);
2806 return VINF_SUCCESS;
2807 }
2808
2809 cbWrite -= cb;
2810 off += cb;
2811 pvBuf = (const char *)pvBuf + cb;
2812 } /* walk pages in ram range */
2813
2814 GCPhys = pRam->GCPhysLast + 1;
2815 }
2816 else
2817 {
2818 /*
2819 * Unassigned address space, skip it.
2820 */
2821 if (!pRam)
2822 break;
2823 size_t cb = pRam->GCPhys - GCPhys;
2824 if (cb >= cbWrite)
2825 break;
2826 cbWrite -= cb;
2827 pvBuf = (const char *)pvBuf + cb;
2828 GCPhys += cb;
2829 }
2830
2831 /* Advance range if necessary. */
2832 while (pRam && GCPhys > pRam->GCPhysLast)
2833 pRam = pRam->CTX_SUFF(pNext);
2834 } /* Ram range walk */
2835
2836 pgmUnlock(pVM);
2837 return VINF_SUCCESS;
2838}
2839
2840
2841/**
2842 * Read from guest physical memory by GC physical address, bypassing
2843 * MMIO and access handlers.
2844 *
2845 * @returns VBox status.
2846 * @param pVM Pointer to the VM.
2847 * @param pvDst The destination address.
2848 * @param GCPhysSrc The source address (GC physical address).
2849 * @param cb The number of bytes to read.
2850 */
2851VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2852{
2853 /*
2854 * Treat the first page as a special case.
2855 */
2856 if (!cb)
2857 return VINF_SUCCESS;
2858
2859 /* map the 1st page */
2860 void const *pvSrc;
2861 PGMPAGEMAPLOCK Lock;
2862 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2863 if (RT_FAILURE(rc))
2864 return rc;
2865
2866 /* optimize for the case where access is completely within the first page. */
2867 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2868 if (RT_LIKELY(cb <= cbPage))
2869 {
2870 memcpy(pvDst, pvSrc, cb);
2871 PGMPhysReleasePageMappingLock(pVM, &Lock);
2872 return VINF_SUCCESS;
2873 }
2874
2875 /* copy to the end of the page. */
2876 memcpy(pvDst, pvSrc, cbPage);
2877 PGMPhysReleasePageMappingLock(pVM, &Lock);
2878 GCPhysSrc += cbPage;
2879 pvDst = (uint8_t *)pvDst + cbPage;
2880 cb -= cbPage;
2881
2882 /*
2883 * Page by page.
2884 */
2885 for (;;)
2886 {
2887 /* map the page */
2888 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2889 if (RT_FAILURE(rc))
2890 return rc;
2891
2892 /* last page? */
2893 if (cb <= PAGE_SIZE)
2894 {
2895 memcpy(pvDst, pvSrc, cb);
2896 PGMPhysReleasePageMappingLock(pVM, &Lock);
2897 return VINF_SUCCESS;
2898 }
2899
2900 /* copy the entire page and advance */
2901 memcpy(pvDst, pvSrc, PAGE_SIZE);
2902 PGMPhysReleasePageMappingLock(pVM, &Lock);
2903 GCPhysSrc += PAGE_SIZE;
2904 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2905 cb -= PAGE_SIZE;
2906 }
2907 /* won't ever get here. */
2908}
2909
2910
2911/**
2912 * Write to guest physical memory referenced by GC pointer.
2913 * Write memory to GC physical address in guest physical memory.
2914 *
2915 * This will bypass MMIO and access handlers.
2916 *
2917 * @returns VBox status.
2918 * @param pVM Pointer to the VM.
2919 * @param GCPhysDst The GC physical address of the destination.
2920 * @param pvSrc The source buffer.
2921 * @param cb The number of bytes to write.
2922 */
2923VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2924{
2925 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2926
2927 /*
2928 * Treat the first page as a special case.
2929 */
2930 if (!cb)
2931 return VINF_SUCCESS;
2932
2933 /* map the 1st page */
2934 void *pvDst;
2935 PGMPAGEMAPLOCK Lock;
2936 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2937 if (RT_FAILURE(rc))
2938 return rc;
2939
2940 /* optimize for the case where access is completely within the first page. */
2941 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2942 if (RT_LIKELY(cb <= cbPage))
2943 {
2944 memcpy(pvDst, pvSrc, cb);
2945 PGMPhysReleasePageMappingLock(pVM, &Lock);
2946 return VINF_SUCCESS;
2947 }
2948
2949 /* copy to the end of the page. */
2950 memcpy(pvDst, pvSrc, cbPage);
2951 PGMPhysReleasePageMappingLock(pVM, &Lock);
2952 GCPhysDst += cbPage;
2953 pvSrc = (const uint8_t *)pvSrc + cbPage;
2954 cb -= cbPage;
2955
2956 /*
2957 * Page by page.
2958 */
2959 for (;;)
2960 {
2961 /* map the page */
2962 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2963 if (RT_FAILURE(rc))
2964 return rc;
2965
2966 /* last page? */
2967 if (cb <= PAGE_SIZE)
2968 {
2969 memcpy(pvDst, pvSrc, cb);
2970 PGMPhysReleasePageMappingLock(pVM, &Lock);
2971 return VINF_SUCCESS;
2972 }
2973
2974 /* copy the entire page and advance */
2975 memcpy(pvDst, pvSrc, PAGE_SIZE);
2976 PGMPhysReleasePageMappingLock(pVM, &Lock);
2977 GCPhysDst += PAGE_SIZE;
2978 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2979 cb -= PAGE_SIZE;
2980 }
2981 /* won't ever get here. */
2982}
2983
2984
2985/**
2986 * Read from guest physical memory referenced by GC pointer.
2987 *
2988 * This function uses the current CR3/CR0/CR4 of the guest and will
2989 * bypass access handlers and not set any accessed bits.
2990 *
2991 * @returns VBox status.
2992 * @param pVCpu Handle to the current virtual CPU.
2993 * @param pvDst The destination address.
2994 * @param GCPtrSrc The source address (GC pointer).
2995 * @param cb The number of bytes to read.
2996 */
2997VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2998{
2999 PVM pVM = pVCpu->CTX_SUFF(pVM);
3000/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3001
3002 /*
3003 * Treat the first page as a special case.
3004 */
3005 if (!cb)
3006 return VINF_SUCCESS;
3007
3008 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
3009 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3010
3011 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3012 * when many VCPUs are fighting for the lock.
3013 */
3014 pgmLock(pVM);
3015
3016 /* map the 1st page */
3017 void const *pvSrc;
3018 PGMPAGEMAPLOCK Lock;
3019 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3020 if (RT_FAILURE(rc))
3021 {
3022 pgmUnlock(pVM);
3023 return rc;
3024 }
3025
3026 /* optimize for the case where access is completely within the first page. */
3027 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3028 if (RT_LIKELY(cb <= cbPage))
3029 {
3030 memcpy(pvDst, pvSrc, cb);
3031 PGMPhysReleasePageMappingLock(pVM, &Lock);
3032 pgmUnlock(pVM);
3033 return VINF_SUCCESS;
3034 }
3035
3036 /* copy to the end of the page. */
3037 memcpy(pvDst, pvSrc, cbPage);
3038 PGMPhysReleasePageMappingLock(pVM, &Lock);
3039 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3040 pvDst = (uint8_t *)pvDst + cbPage;
3041 cb -= cbPage;
3042
3043 /*
3044 * Page by page.
3045 */
3046 for (;;)
3047 {
3048 /* map the page */
3049 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3050 if (RT_FAILURE(rc))
3051 {
3052 pgmUnlock(pVM);
3053 return rc;
3054 }
3055
3056 /* last page? */
3057 if (cb <= PAGE_SIZE)
3058 {
3059 memcpy(pvDst, pvSrc, cb);
3060 PGMPhysReleasePageMappingLock(pVM, &Lock);
3061 pgmUnlock(pVM);
3062 return VINF_SUCCESS;
3063 }
3064
3065 /* copy the entire page and advance */
3066 memcpy(pvDst, pvSrc, PAGE_SIZE);
3067 PGMPhysReleasePageMappingLock(pVM, &Lock);
3068 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3069 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3070 cb -= PAGE_SIZE;
3071 }
3072 /* won't ever get here. */
3073}
3074
3075
3076/**
3077 * Write to guest physical memory referenced by GC pointer.
3078 *
3079 * This function uses the current CR3/CR0/CR4 of the guest and will
3080 * bypass access handlers and not set dirty or accessed bits.
3081 *
3082 * @returns VBox status.
3083 * @param pVCpu Handle to the current virtual CPU.
3084 * @param GCPtrDst The destination address (GC pointer).
3085 * @param pvSrc The source address.
3086 * @param cb The number of bytes to write.
3087 */
3088VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3089{
3090 PVM pVM = pVCpu->CTX_SUFF(pVM);
3091 VMCPU_ASSERT_EMT(pVCpu);
3092
3093 /*
3094 * Treat the first page as a special case.
3095 */
3096 if (!cb)
3097 return VINF_SUCCESS;
3098
3099 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
3100 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3101
3102 /* map the 1st page */
3103 void *pvDst;
3104 PGMPAGEMAPLOCK Lock;
3105 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3106 if (RT_FAILURE(rc))
3107 return rc;
3108
3109 /* optimize for the case where access is completely within the first page. */
3110 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3111 if (RT_LIKELY(cb <= cbPage))
3112 {
3113 memcpy(pvDst, pvSrc, cb);
3114 PGMPhysReleasePageMappingLock(pVM, &Lock);
3115 return VINF_SUCCESS;
3116 }
3117
3118 /* copy to the end of the page. */
3119 memcpy(pvDst, pvSrc, cbPage);
3120 PGMPhysReleasePageMappingLock(pVM, &Lock);
3121 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3122 pvSrc = (const uint8_t *)pvSrc + cbPage;
3123 cb -= cbPage;
3124
3125 /*
3126 * Page by page.
3127 */
3128 for (;;)
3129 {
3130 /* map the page */
3131 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3132 if (RT_FAILURE(rc))
3133 return rc;
3134
3135 /* last page? */
3136 if (cb <= PAGE_SIZE)
3137 {
3138 memcpy(pvDst, pvSrc, cb);
3139 PGMPhysReleasePageMappingLock(pVM, &Lock);
3140 return VINF_SUCCESS;
3141 }
3142
3143 /* copy the entire page and advance */
3144 memcpy(pvDst, pvSrc, PAGE_SIZE);
3145 PGMPhysReleasePageMappingLock(pVM, &Lock);
3146 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3147 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3148 cb -= PAGE_SIZE;
3149 }
3150 /* won't ever get here. */
3151}
3152
3153
3154/**
3155 * Write to guest physical memory referenced by GC pointer and update the PTE.
3156 *
3157 * This function uses the current CR3/CR0/CR4 of the guest and will
3158 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3159 *
3160 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3161 *
3162 * @returns VBox status.
3163 * @param pVCpu Handle to the current virtual CPU.
3164 * @param GCPtrDst The destination address (GC pointer).
3165 * @param pvSrc The source address.
3166 * @param cb The number of bytes to write.
3167 */
3168VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3169{
3170 PVM pVM = pVCpu->CTX_SUFF(pVM);
3171 VMCPU_ASSERT_EMT(pVCpu);
3172
3173 /*
3174 * Treat the first page as a special case.
3175 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3176 */
3177 if (!cb)
3178 return VINF_SUCCESS;
3179
3180 /* map the 1st page */
3181 void *pvDst;
3182 PGMPAGEMAPLOCK Lock;
3183 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3184 if (RT_FAILURE(rc))
3185 return rc;
3186
3187 /* optimize for the case where access is completely within the first page. */
3188 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3189 if (RT_LIKELY(cb <= cbPage))
3190 {
3191 memcpy(pvDst, pvSrc, cb);
3192 PGMPhysReleasePageMappingLock(pVM, &Lock);
3193 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3194 return VINF_SUCCESS;
3195 }
3196
3197 /* copy to the end of the page. */
3198 memcpy(pvDst, pvSrc, cbPage);
3199 PGMPhysReleasePageMappingLock(pVM, &Lock);
3200 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3201 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3202 pvSrc = (const uint8_t *)pvSrc + cbPage;
3203 cb -= cbPage;
3204
3205 /*
3206 * Page by page.
3207 */
3208 for (;;)
3209 {
3210 /* map the page */
3211 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3212 if (RT_FAILURE(rc))
3213 return rc;
3214
3215 /* last page? */
3216 if (cb <= PAGE_SIZE)
3217 {
3218 memcpy(pvDst, pvSrc, cb);
3219 PGMPhysReleasePageMappingLock(pVM, &Lock);
3220 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3221 return VINF_SUCCESS;
3222 }
3223
3224 /* copy the entire page and advance */
3225 memcpy(pvDst, pvSrc, PAGE_SIZE);
3226 PGMPhysReleasePageMappingLock(pVM, &Lock);
3227 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3228 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3229 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3230 cb -= PAGE_SIZE;
3231 }
3232 /* won't ever get here. */
3233}
3234
3235
3236/**
3237 * Read from guest physical memory referenced by GC pointer.
3238 *
3239 * This function uses the current CR3/CR0/CR4 of the guest and will
3240 * respect access handlers and set accessed bits.
3241 *
3242 * @returns VBox status.
3243 * @param pVCpu Handle to the current virtual CPU.
3244 * @param pvDst The destination address.
3245 * @param GCPtrSrc The source address (GC pointer).
3246 * @param cb The number of bytes to read.
3247 * @thread The vCPU EMT.
3248 */
3249VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3250{
3251 RTGCPHYS GCPhys;
3252 uint64_t fFlags;
3253 int rc;
3254 PVM pVM = pVCpu->CTX_SUFF(pVM);
3255 VMCPU_ASSERT_EMT(pVCpu);
3256
3257 /*
3258 * Anything to do?
3259 */
3260 if (!cb)
3261 return VINF_SUCCESS;
3262
3263 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3264
3265 /*
3266 * Optimize reads within a single page.
3267 */
3268 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3269 {
3270 /* Convert virtual to physical address + flags */
3271 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3272 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3273 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3274
3275 /* mark the guest page as accessed. */
3276 if (!(fFlags & X86_PTE_A))
3277 {
3278 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3279 AssertRC(rc);
3280 }
3281
3282 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
3283 }
3284
3285 /*
3286 * Page by page.
3287 */
3288 for (;;)
3289 {
3290 /* Convert virtual to physical address + flags */
3291 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3292 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3293 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3294
3295 /* mark the guest page as accessed. */
3296 if (!(fFlags & X86_PTE_A))
3297 {
3298 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3299 AssertRC(rc);
3300 }
3301
3302 /* copy */
3303 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3304 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
3305 if (cbRead >= cb || RT_FAILURE(rc))
3306 return rc;
3307
3308 /* next */
3309 cb -= cbRead;
3310 pvDst = (uint8_t *)pvDst + cbRead;
3311 GCPtrSrc += cbRead;
3312 }
3313}
3314
3315
3316/**
3317 * Write to guest physical memory referenced by GC pointer.
3318 *
3319 * This function uses the current CR3/CR0/CR4 of the guest and will
3320 * respect access handlers and set dirty and accessed bits.
3321 *
3322 * @returns VBox status.
3323 * @retval VINF_SUCCESS.
3324 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
3325 *
3326 * @param pVCpu Handle to the current virtual CPU.
3327 * @param GCPtrDst The destination address (GC pointer).
3328 * @param pvSrc The source address.
3329 * @param cb The number of bytes to write.
3330 */
3331VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3332{
3333 RTGCPHYS GCPhys;
3334 uint64_t fFlags;
3335 int rc;
3336 PVM pVM = pVCpu->CTX_SUFF(pVM);
3337 VMCPU_ASSERT_EMT(pVCpu);
3338
3339 /*
3340 * Anything to do?
3341 */
3342 if (!cb)
3343 return VINF_SUCCESS;
3344
3345 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3346
3347 /*
3348 * Optimize writes within a single page.
3349 */
3350 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3351 {
3352 /* Convert virtual to physical address + flags */
3353 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3354 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3355 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3356
3357 /* Mention when we ignore X86_PTE_RW... */
3358 if (!(fFlags & X86_PTE_RW))
3359 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3360
3361 /* Mark the guest page as accessed and dirty if necessary. */
3362 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3363 {
3364 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3365 AssertRC(rc);
3366 }
3367
3368 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
3369 }
3370
3371 /*
3372 * Page by page.
3373 */
3374 for (;;)
3375 {
3376 /* Convert virtual to physical address + flags */
3377 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3378 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3379 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3380
3381 /* Mention when we ignore X86_PTE_RW... */
3382 if (!(fFlags & X86_PTE_RW))
3383 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3384
3385 /* Mark the guest page as accessed and dirty if necessary. */
3386 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3387 {
3388 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3389 AssertRC(rc);
3390 }
3391
3392 /* copy */
3393 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3394 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
3395 if (cbWrite >= cb || RT_FAILURE(rc))
3396 return rc;
3397
3398 /* next */
3399 cb -= cbWrite;
3400 pvSrc = (uint8_t *)pvSrc + cbWrite;
3401 GCPtrDst += cbWrite;
3402 }
3403}
3404
3405
3406/**
3407 * Performs a read of guest virtual memory for instruction emulation.
3408 *
3409 * This will check permissions, raise exceptions and update the access bits.
3410 *
3411 * The current implementation will bypass all access handlers. It may later be
3412 * changed to at least respect MMIO.
3413 *
3414 *
3415 * @returns VBox status code suitable to scheduling.
3416 * @retval VINF_SUCCESS if the read was performed successfully.
3417 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3418 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3419 *
3420 * @param pVCpu Handle to the current virtual CPU.
3421 * @param pCtxCore The context core.
3422 * @param pvDst Where to put the bytes we've read.
3423 * @param GCPtrSrc The source address.
3424 * @param cb The number of bytes to read. Not more than a page.
3425 *
3426 * @remark This function will dynamically map physical pages in GC. This may unmap
3427 * mappings done by the caller. Be careful!
3428 */
3429VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3430{
3431 PVM pVM = pVCpu->CTX_SUFF(pVM);
3432 Assert(cb <= PAGE_SIZE);
3433 VMCPU_ASSERT_EMT(pVCpu);
3434
3435/** @todo r=bird: This isn't perfect!
3436 * -# It's not checking for reserved bits being 1.
3437 * -# It's not correctly dealing with the access bit.
3438 * -# It's not respecting MMIO memory or any other access handlers.
3439 */
3440 /*
3441 * 1. Translate virtual to physical. This may fault.
3442 * 2. Map the physical address.
3443 * 3. Do the read operation.
3444 * 4. Set access bits if required.
3445 */
3446 int rc;
3447 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3448 if (cb <= cb1)
3449 {
3450 /*
3451 * Not crossing pages.
3452 */
3453 RTGCPHYS GCPhys;
3454 uint64_t fFlags;
3455 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3456 if (RT_SUCCESS(rc))
3457 {
3458 /** @todo we should check reserved bits ... */
3459 PGMPAGEMAPLOCK PgMpLck;
3460 void const *pvSrc;
3461 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3462 switch (rc)
3463 {
3464 case VINF_SUCCESS:
3465 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3466 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3467 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3468 break;
3469 case VERR_PGM_PHYS_PAGE_RESERVED:
3470 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3471 memset(pvDst, 0xff, cb);
3472 break;
3473 default:
3474 Assert(RT_FAILURE_NP(rc));
3475 return rc;
3476 }
3477
3478 /** @todo access bit emulation isn't 100% correct. */
3479 if (!(fFlags & X86_PTE_A))
3480 {
3481 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3482 AssertRC(rc);
3483 }
3484 return VINF_SUCCESS;
3485 }
3486 }
3487 else
3488 {
3489 /*
3490 * Crosses pages.
3491 */
3492 size_t cb2 = cb - cb1;
3493 uint64_t fFlags1;
3494 RTGCPHYS GCPhys1;
3495 uint64_t fFlags2;
3496 RTGCPHYS GCPhys2;
3497 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3498 if (RT_SUCCESS(rc))
3499 {
3500 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3501 if (RT_SUCCESS(rc))
3502 {
3503 /** @todo we should check reserved bits ... */
3504 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3505 PGMPAGEMAPLOCK PgMpLck;
3506 void const *pvSrc1;
3507 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3508 switch (rc)
3509 {
3510 case VINF_SUCCESS:
3511 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3512 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3513 break;
3514 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3515 memset(pvDst, 0xff, cb1);
3516 break;
3517 default:
3518 Assert(RT_FAILURE_NP(rc));
3519 return rc;
3520 }
3521
3522 void const *pvSrc2;
3523 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3524 switch (rc)
3525 {
3526 case VINF_SUCCESS:
3527 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3528 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3529 break;
3530 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3531 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3532 break;
3533 default:
3534 Assert(RT_FAILURE_NP(rc));
3535 return rc;
3536 }
3537
3538 if (!(fFlags1 & X86_PTE_A))
3539 {
3540 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3541 AssertRC(rc);
3542 }
3543 if (!(fFlags2 & X86_PTE_A))
3544 {
3545 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3546 AssertRC(rc);
3547 }
3548 return VINF_SUCCESS;
3549 }
3550 }
3551 }
3552
3553 /*
3554 * Raise a #PF.
3555 */
3556 uint32_t uErr;
3557
3558 /* Get the current privilege level. */
3559 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3560 switch (rc)
3561 {
3562 case VINF_SUCCESS:
3563 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3564 break;
3565
3566 case VERR_PAGE_NOT_PRESENT:
3567 case VERR_PAGE_TABLE_NOT_PRESENT:
3568 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3569 break;
3570
3571 default:
3572 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3573 return rc;
3574 }
3575 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3576 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3577}
3578
3579
3580/**
3581 * Performs a read of guest virtual memory for instruction emulation.
3582 *
3583 * This will check permissions, raise exceptions and update the access bits.
3584 *
3585 * The current implementation will bypass all access handlers. It may later be
3586 * changed to at least respect MMIO.
3587 *
3588 *
3589 * @returns VBox status code suitable to scheduling.
3590 * @retval VINF_SUCCESS if the read was performed successfully.
3591 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3592 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3593 *
3594 * @param pVCpu Handle to the current virtual CPU.
3595 * @param pCtxCore The context core.
3596 * @param pvDst Where to put the bytes we've read.
3597 * @param GCPtrSrc The source address.
3598 * @param cb The number of bytes to read. Not more than a page.
3599 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3600 * an appropriate error status will be returned (no
3601 * informational at all).
3602 *
3603 *
3604 * @remarks Takes the PGM lock.
3605 * @remarks A page fault on the 2nd page of the access will be raised without
3606 * writing the bits on the first page since we're ASSUMING that the
3607 * caller is emulating an instruction access.
3608 * @remarks This function will dynamically map physical pages in GC. This may
3609 * unmap mappings done by the caller. Be careful!
3610 */
3611VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3612 bool fRaiseTrap)
3613{
3614 PVM pVM = pVCpu->CTX_SUFF(pVM);
3615 Assert(cb <= PAGE_SIZE);
3616 VMCPU_ASSERT_EMT(pVCpu);
3617
3618 /*
3619 * 1. Translate virtual to physical. This may fault.
3620 * 2. Map the physical address.
3621 * 3. Do the read operation.
3622 * 4. Set access bits if required.
3623 */
3624 int rc;
3625 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3626 if (cb <= cb1)
3627 {
3628 /*
3629 * Not crossing pages.
3630 */
3631 RTGCPHYS GCPhys;
3632 uint64_t fFlags;
3633 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3634 if (RT_SUCCESS(rc))
3635 {
3636 if (1) /** @todo we should check reserved bits ... */
3637 {
3638 const void *pvSrc;
3639 PGMPAGEMAPLOCK Lock;
3640 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3641 switch (rc)
3642 {
3643 case VINF_SUCCESS:
3644 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3645 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3646 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3647 PGMPhysReleasePageMappingLock(pVM, &Lock);
3648 break;
3649 case VERR_PGM_PHYS_PAGE_RESERVED:
3650 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3651 memset(pvDst, 0xff, cb);
3652 break;
3653 default:
3654 AssertMsgFailed(("%Rrc\n", rc));
3655 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3656 return rc;
3657 }
3658
3659 if (!(fFlags & X86_PTE_A))
3660 {
3661 /** @todo access bit emulation isn't 100% correct. */
3662 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3663 AssertRC(rc);
3664 }
3665 return VINF_SUCCESS;
3666 }
3667 }
3668 }
3669 else
3670 {
3671 /*
3672 * Crosses pages.
3673 */
3674 size_t cb2 = cb - cb1;
3675 uint64_t fFlags1;
3676 RTGCPHYS GCPhys1;
3677 uint64_t fFlags2;
3678 RTGCPHYS GCPhys2;
3679 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3680 if (RT_SUCCESS(rc))
3681 {
3682 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3683 if (RT_SUCCESS(rc))
3684 {
3685 if (1) /** @todo we should check reserved bits ... */
3686 {
3687 const void *pvSrc;
3688 PGMPAGEMAPLOCK Lock;
3689 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3690 switch (rc)
3691 {
3692 case VINF_SUCCESS:
3693 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3694 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3695 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3696 PGMPhysReleasePageMappingLock(pVM, &Lock);
3697 break;
3698 case VERR_PGM_PHYS_PAGE_RESERVED:
3699 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3700 memset(pvDst, 0xff, cb1);
3701 break;
3702 default:
3703 AssertMsgFailed(("%Rrc\n", rc));
3704 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3705 return rc;
3706 }
3707
3708 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3709 switch (rc)
3710 {
3711 case VINF_SUCCESS:
3712 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3713 PGMPhysReleasePageMappingLock(pVM, &Lock);
3714 break;
3715 case VERR_PGM_PHYS_PAGE_RESERVED:
3716 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3717 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3718 break;
3719 default:
3720 AssertMsgFailed(("%Rrc\n", rc));
3721 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3722 return rc;
3723 }
3724
3725 if (!(fFlags1 & X86_PTE_A))
3726 {
3727 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3728 AssertRC(rc);
3729 }
3730 if (!(fFlags2 & X86_PTE_A))
3731 {
3732 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3733 AssertRC(rc);
3734 }
3735 return VINF_SUCCESS;
3736 }
3737 /* sort out which page */
3738 }
3739 else
3740 GCPtrSrc += cb1; /* fault on 2nd page */
3741 }
3742 }
3743
3744 /*
3745 * Raise a #PF if we're allowed to do that.
3746 */
3747 /* Calc the error bits. */
3748 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3749 uint32_t uErr;
3750 switch (rc)
3751 {
3752 case VINF_SUCCESS:
3753 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3754 rc = VERR_ACCESS_DENIED;
3755 break;
3756
3757 case VERR_PAGE_NOT_PRESENT:
3758 case VERR_PAGE_TABLE_NOT_PRESENT:
3759 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3760 break;
3761
3762 default:
3763 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3764 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3765 return rc;
3766 }
3767 if (fRaiseTrap)
3768 {
3769 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3770 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3771 }
3772 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3773 return rc;
3774}
3775
3776
3777/**
3778 * Performs a write to guest virtual memory for instruction emulation.
3779 *
3780 * This will check permissions, raise exceptions and update the dirty and access
3781 * bits.
3782 *
3783 * @returns VBox status code suitable to scheduling.
3784 * @retval VINF_SUCCESS if the read was performed successfully.
3785 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3786 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3787 *
3788 * @param pVCpu Handle to the current virtual CPU.
3789 * @param pCtxCore The context core.
3790 * @param GCPtrDst The destination address.
3791 * @param pvSrc What to write.
3792 * @param cb The number of bytes to write. Not more than a page.
3793 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3794 * an appropriate error status will be returned (no
3795 * informational at all).
3796 *
3797 * @remarks Takes the PGM lock.
3798 * @remarks A page fault on the 2nd page of the access will be raised without
3799 * writing the bits on the first page since we're ASSUMING that the
3800 * caller is emulating an instruction access.
3801 * @remarks This function will dynamically map physical pages in GC. This may
3802 * unmap mappings done by the caller. Be careful!
3803 */
3804VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
3805 size_t cb, bool fRaiseTrap)
3806{
3807 Assert(cb <= PAGE_SIZE);
3808 PVM pVM = pVCpu->CTX_SUFF(pVM);
3809 VMCPU_ASSERT_EMT(pVCpu);
3810
3811 /*
3812 * 1. Translate virtual to physical. This may fault.
3813 * 2. Map the physical address.
3814 * 3. Do the write operation.
3815 * 4. Set access bits if required.
3816 */
3817 /** @todo Since this method is frequently used by EMInterpret or IOM
3818 * upon a write fault to an write access monitored page, we can
3819 * reuse the guest page table walking from the \#PF code. */
3820 int rc;
3821 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3822 if (cb <= cb1)
3823 {
3824 /*
3825 * Not crossing pages.
3826 */
3827 RTGCPHYS GCPhys;
3828 uint64_t fFlags;
3829 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3830 if (RT_SUCCESS(rc))
3831 {
3832 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3833 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3834 && CPUMGetGuestCPL(pVCpu) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3835 {
3836 void *pvDst;
3837 PGMPAGEMAPLOCK Lock;
3838 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3839 switch (rc)
3840 {
3841 case VINF_SUCCESS:
3842 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3843 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3844 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3845 PGMPhysReleasePageMappingLock(pVM, &Lock);
3846 break;
3847 case VERR_PGM_PHYS_PAGE_RESERVED:
3848 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3849 /* bit bucket */
3850 break;
3851 default:
3852 AssertMsgFailed(("%Rrc\n", rc));
3853 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3854 return rc;
3855 }
3856
3857 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3858 {
3859 /** @todo dirty & access bit emulation isn't 100% correct. */
3860 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3861 AssertRC(rc);
3862 }
3863 return VINF_SUCCESS;
3864 }
3865 rc = VERR_ACCESS_DENIED;
3866 }
3867 }
3868 else
3869 {
3870 /*
3871 * Crosses pages.
3872 */
3873 size_t cb2 = cb - cb1;
3874 uint64_t fFlags1;
3875 RTGCPHYS GCPhys1;
3876 uint64_t fFlags2;
3877 RTGCPHYS GCPhys2;
3878 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3879 if (RT_SUCCESS(rc))
3880 {
3881 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3882 if (RT_SUCCESS(rc))
3883 {
3884 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3885 && (fFlags2 & X86_PTE_RW))
3886 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3887 && CPUMGetGuestCPL(pVCpu) <= 2) )
3888 {
3889 void *pvDst;
3890 PGMPAGEMAPLOCK Lock;
3891 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3892 switch (rc)
3893 {
3894 case VINF_SUCCESS:
3895 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3896 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3897 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3898 PGMPhysReleasePageMappingLock(pVM, &Lock);
3899 break;
3900 case VERR_PGM_PHYS_PAGE_RESERVED:
3901 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3902 /* bit bucket */
3903 break;
3904 default:
3905 AssertMsgFailed(("%Rrc\n", rc));
3906 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3907 return rc;
3908 }
3909
3910 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3911 switch (rc)
3912 {
3913 case VINF_SUCCESS:
3914 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3915 PGMPhysReleasePageMappingLock(pVM, &Lock);
3916 break;
3917 case VERR_PGM_PHYS_PAGE_RESERVED:
3918 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3919 /* bit bucket */
3920 break;
3921 default:
3922 AssertMsgFailed(("%Rrc\n", rc));
3923 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3924 return rc;
3925 }
3926
3927 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3928 {
3929 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3930 AssertRC(rc);
3931 }
3932 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3933 {
3934 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3935 AssertRC(rc);
3936 }
3937 return VINF_SUCCESS;
3938 }
3939 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3940 GCPtrDst += cb1; /* fault on the 2nd page. */
3941 rc = VERR_ACCESS_DENIED;
3942 }
3943 else
3944 GCPtrDst += cb1; /* fault on the 2nd page. */
3945 }
3946 }
3947
3948 /*
3949 * Raise a #PF if we're allowed to do that.
3950 */
3951 /* Calc the error bits. */
3952 uint32_t uErr;
3953 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3954 switch (rc)
3955 {
3956 case VINF_SUCCESS:
3957 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3958 rc = VERR_ACCESS_DENIED;
3959 break;
3960
3961 case VERR_ACCESS_DENIED:
3962 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3963 break;
3964
3965 case VERR_PAGE_NOT_PRESENT:
3966 case VERR_PAGE_TABLE_NOT_PRESENT:
3967 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3968 break;
3969
3970 default:
3971 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3972 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3973 return rc;
3974 }
3975 if (fRaiseTrap)
3976 {
3977 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3978 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3979 }
3980 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3981 return rc;
3982}
3983
3984
3985/**
3986 * Return the page type of the specified physical address.
3987 *
3988 * @returns The page type.
3989 * @param pVM Pointer to the VM.
3990 * @param GCPhys Guest physical address
3991 */
3992VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys)
3993{
3994 pgmLock(pVM);
3995 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3996 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
3997 pgmUnlock(pVM);
3998
3999 return enmPgType;
4000}
4001
4002
4003
4004
4005/**
4006 * Converts a GC physical address to a HC ring-3 pointer, with some
4007 * additional checks.
4008 *
4009 * @returns VBox status code (no informational statuses).
4010 * @retval VINF_SUCCESS on success.
4011 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4012 * access handler of some kind.
4013 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4014 * accesses or is odd in any way.
4015 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4016 *
4017 * @param pVM Pointer to the VM.
4018 * @param GCPhys The GC physical address to convert. Since this is only
4019 * used for filling the REM TLB, the A20 mask must be
4020 * applied before calling this API.
4021 * @param fWritable Whether write access is required.
4022 * @param ppv Where to store the pointer corresponding to GCPhys on
4023 * success.
4024 * @param pLock
4025 *
4026 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
4027 */
4028VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
4029 void **ppv, PPGMPAGEMAPLOCK pLock)
4030{
4031 pgmLock(pVM);
4032 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4033
4034 PPGMRAMRANGE pRam;
4035 PPGMPAGE pPage;
4036 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4037 if (RT_SUCCESS(rc))
4038 {
4039 if (PGM_PAGE_IS_BALLOONED(pPage))
4040 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4041 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4042 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4043 rc = VINF_SUCCESS;
4044 else
4045 {
4046 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4047 {
4048 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4049 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4050 }
4051 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4052 {
4053 Assert(!fByPassHandlers);
4054 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4055 }
4056 }
4057 if (RT_SUCCESS(rc))
4058 {
4059 int rc2;
4060
4061 /* Make sure what we return is writable. */
4062 if (fWritable)
4063 switch (PGM_PAGE_GET_STATE(pPage))
4064 {
4065 case PGM_PAGE_STATE_ALLOCATED:
4066 break;
4067 case PGM_PAGE_STATE_BALLOONED:
4068 AssertFailed();
4069 case PGM_PAGE_STATE_ZERO:
4070 case PGM_PAGE_STATE_SHARED:
4071 case PGM_PAGE_STATE_WRITE_MONITORED:
4072 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4073 AssertLogRelRCReturn(rc2, rc2);
4074 break;
4075 }
4076
4077#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
4078 PVMCPU pVCpu = VMMGetCpu(pVM);
4079 void *pv;
4080 rc = pgmRZDynMapHCPageInlined(pVCpu,
4081 PGM_PAGE_GET_HCPHYS(pPage),
4082 &pv
4083 RTLOG_COMMA_SRC_POS);
4084 if (RT_FAILURE(rc))
4085 return rc;
4086 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4087 pLock->pvPage = pv;
4088 pLock->pVCpu = pVCpu;
4089
4090#else
4091 /* Get a ring-3 mapping of the address. */
4092 PPGMPAGER3MAPTLBE pTlbe;
4093 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4094 AssertLogRelRCReturn(rc2, rc2);
4095
4096 /* Lock it and calculate the address. */
4097 if (fWritable)
4098 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
4099 else
4100 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
4101 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4102#endif
4103
4104 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4105 }
4106 else
4107 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4108
4109 /* else: handler catching all access, no pointer returned. */
4110 }
4111 else
4112 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4113
4114 pgmUnlock(pVM);
4115 return rc;
4116}
4117
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette