VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 31996

Last change on this file since 31996 was 31996, checked in by vboxsync, 14 years ago

Mostly safe X86_PTE_PAE_PG_MASK -> X86_PTE_PAE_PG_MASK_FULL conversions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 129.2 KB
Line 
1/* $Id: PGMAllPhys.cpp 31996 2010-08-26 13:32:30Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM_PHYS
22#include <VBox/pgm.h>
23#include <VBox/trpm.h>
24#include <VBox/vmm.h>
25#include <VBox/iom.h>
26#include <VBox/em.h>
27#include <VBox/rem.h>
28#include "../PGMInternal.h"
29#include <VBox/vm.h>
30#include "../PGMInline.h"
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <iprt/assert.h>
34#include <iprt/string.h>
35#include <iprt/asm-amd64-x86.h>
36#include <VBox/log.h>
37#ifdef IN_RING3
38# include <iprt/thread.h>
39#endif
40
41
42/*******************************************************************************
43* Defined Constants And Macros *
44*******************************************************************************/
45/** Enable the physical TLB. */
46#define PGM_WITH_PHYS_TLB
47
48
49
50#ifndef IN_RING3
51
52/**
53 * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
54 * This simply pushes everything to the HC handler.
55 *
56 * @returns VBox status code (appropritate for trap handling and GC return).
57 * @param pVM VM Handle.
58 * @param uErrorCode CPU Error code.
59 * @param pRegFrame Trap register frame.
60 * @param pvFault The fault address (cr2).
61 * @param GCPhysFault The GC physical address corresponding to pvFault.
62 * @param pvUser User argument.
63 */
64VMMDECL(int) pgmPhysHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
65{
66 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_HC_MMIO_WRITE : VINF_IOM_HC_MMIO_READ;
67}
68
69
70/**
71 * \#PF Handler callback for Guest ROM range write access.
72 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
73 *
74 * @returns VBox status code (appropritate for trap handling and GC return).
75 * @param pVM VM Handle.
76 * @param uErrorCode CPU Error code.
77 * @param pRegFrame Trap register frame.
78 * @param pvFault The fault address (cr2).
79 * @param GCPhysFault The GC physical address corresponding to pvFault.
80 * @param pvUser User argument. Pointer to the ROM range structure.
81 */
82VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
83{
84 int rc;
85 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
86 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
87 PVMCPU pVCpu = VMMGetCpu(pVM);
88
89 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
90 switch (pRom->aPages[iPage].enmProt)
91 {
92 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
93 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
94 {
95 /*
96 * If it's a simple instruction which doesn't change the cpu state
97 * we will simply skip it. Otherwise we'll have to defer it to REM.
98 */
99 uint32_t cbOp;
100 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
101 rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
102 if ( RT_SUCCESS(rc)
103 && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
104 && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
105 {
106 switch (pDis->opcode)
107 {
108 /** @todo Find other instructions we can safely skip, possibly
109 * adding this kind of detection to DIS or EM. */
110 case OP_MOV:
111 pRegFrame->rip += cbOp;
112 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
113 return VINF_SUCCESS;
114 }
115 }
116 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
117 return rc;
118 break;
119 }
120
121 case PGMROMPROT_READ_RAM_WRITE_RAM:
122 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
123 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
124 AssertRC(rc);
125 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
126
127 case PGMROMPROT_READ_ROM_WRITE_RAM:
128 /* Handle it in ring-3 because it's *way* easier there. */
129 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
130 break;
131
132 default:
133 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
134 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
135 VERR_INTERNAL_ERROR);
136 }
137
138 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
139 return VINF_EM_RAW_EMULATE_INSTR;
140}
141
142#endif /* IN_RING3 */
143
144/**
145 * Checks if Address Gate 20 is enabled or not.
146 *
147 * @returns true if enabled.
148 * @returns false if disabled.
149 * @param pVCpu VMCPU handle.
150 */
151VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
152{
153 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
154 return pVCpu->pgm.s.fA20Enabled;
155}
156
157
158/**
159 * Validates a GC physical address.
160 *
161 * @returns true if valid.
162 * @returns false if invalid.
163 * @param pVM The VM handle.
164 * @param GCPhys The physical address to validate.
165 */
166VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
167{
168 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
169 return pPage != NULL;
170}
171
172
173/**
174 * Checks if a GC physical address is a normal page,
175 * i.e. not ROM, MMIO or reserved.
176 *
177 * @returns true if normal.
178 * @returns false if invalid, ROM, MMIO or reserved page.
179 * @param pVM The VM handle.
180 * @param GCPhys The physical address to check.
181 */
182VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
183{
184 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
185 return pPage
186 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
187}
188
189
190/**
191 * Converts a GC physical address to a HC physical address.
192 *
193 * @returns VINF_SUCCESS on success.
194 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
195 * page but has no physical backing.
196 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
197 * GC physical address.
198 *
199 * @param pVM The VM handle.
200 * @param GCPhys The GC physical address to convert.
201 * @param pHCPhys Where to store the HC physical address on success.
202 */
203VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
204{
205 pgmLock(pVM);
206 PPGMPAGE pPage;
207 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
208 if (RT_SUCCESS(rc))
209 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
210 pgmUnlock(pVM);
211 return rc;
212}
213
214
215/**
216 * Invalidates all page mapping TLBs.
217 *
218 * @param pVM The VM handle.
219 */
220VMMDECL(void) PGMPhysInvalidatePageMapTLB(PVM pVM)
221{
222 pgmLock(pVM);
223 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
224 /* Clear the shared R0/R3 TLB completely. */
225 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
226 {
227 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
228 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
229 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
230 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
231 }
232 /** @todo clear the RC TLB whenever we add it. */
233 pgmUnlock(pVM);
234}
235
236/**
237 * Invalidates a page mapping TLB entry
238 *
239 * @param pVM The VM handle.
240 * @param GCPhys GCPhys entry to flush
241 */
242VMMDECL(void) PGMPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
243{
244 Assert(PGMIsLocked(pVM));
245
246 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
247 /* Clear the shared R0/R3 TLB entry. */
248#ifdef IN_RC
249 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
250 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
251 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
252 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
253 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
254#else
255 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
256 pTlbe->GCPhys = NIL_RTGCPHYS;
257 pTlbe->pPage = 0;
258 pTlbe->pMap = 0;
259 pTlbe->pv = 0;
260#endif
261 /* @todo clear the RC TLB whenever we add it. */
262}
263
264/**
265 * Makes sure that there is at least one handy page ready for use.
266 *
267 * This will also take the appropriate actions when reaching water-marks.
268 *
269 * @returns VBox status code.
270 * @retval VINF_SUCCESS on success.
271 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
272 *
273 * @param pVM The VM handle.
274 *
275 * @remarks Must be called from within the PGM critical section. It may
276 * nip back to ring-3/0 in some cases.
277 */
278static int pgmPhysEnsureHandyPage(PVM pVM)
279{
280 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
281
282 /*
283 * Do we need to do anything special?
284 */
285#ifdef IN_RING3
286 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
287#else
288 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
289#endif
290 {
291 /*
292 * Allocate pages only if we're out of them, or in ring-3, almost out.
293 */
294#ifdef IN_RING3
295 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
296#else
297 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
298#endif
299 {
300 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
301 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
302#ifdef IN_RING3
303 int rc = PGMR3PhysAllocateHandyPages(pVM);
304#else
305 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
306#endif
307 if (RT_UNLIKELY(rc != VINF_SUCCESS))
308 {
309 if (RT_FAILURE(rc))
310 return rc;
311 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
312 if (!pVM->pgm.s.cHandyPages)
313 {
314 LogRel(("PGM: no more handy pages!\n"));
315 return VERR_EM_NO_MEMORY;
316 }
317 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
318 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
319#ifdef IN_RING3
320 REMR3NotifyFF(pVM);
321#else
322 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
323#endif
324 }
325 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
326 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
327 ("%u\n", pVM->pgm.s.cHandyPages),
328 VERR_INTERNAL_ERROR);
329 }
330 else
331 {
332 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
333 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
334#ifndef IN_RING3
335 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
336 {
337 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
338 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
339 }
340#endif
341 }
342 }
343
344 return VINF_SUCCESS;
345}
346
347
348/**
349 * Replace a zero or shared page with new page that we can write to.
350 *
351 * @returns The following VBox status codes.
352 * @retval VINF_SUCCESS on success, pPage is modified.
353 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
354 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
355 *
356 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
357 *
358 * @param pVM The VM address.
359 * @param pPage The physical page tracking structure. This will
360 * be modified on success.
361 * @param GCPhys The address of the page.
362 *
363 * @remarks Must be called from within the PGM critical section. It may
364 * nip back to ring-3/0 in some cases.
365 *
366 * @remarks This function shouldn't really fail, however if it does
367 * it probably means we've screwed up the size of handy pages and/or
368 * the low-water mark. Or, that some device I/O is causing a lot of
369 * pages to be allocated while while the host is in a low-memory
370 * condition. This latter should be handled elsewhere and in a more
371 * controlled manner, it's on the @bugref{3170} todo list...
372 */
373int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
374{
375 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
376
377 /*
378 * Prereqs.
379 */
380 Assert(PGMIsLocked(pVM));
381 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
382 Assert(!PGM_PAGE_IS_MMIO(pPage));
383
384# ifdef PGM_WITH_LARGE_PAGES
385 if ( PGMIsUsingLargePages(pVM)
386 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
387 {
388 int rc = pgmPhysAllocLargePage(pVM, GCPhys);
389 if (rc == VINF_SUCCESS)
390 return rc;
391
392 /* fall back to 4KB pages. */
393 }
394# endif
395
396 /*
397 * Flush any shadow page table mappings of the page.
398 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
399 */
400 bool fFlushTLBs = false;
401 int rc = pgmPoolTrackFlushGCPhys(pVM, GCPhys, pPage, &fFlushTLBs);
402 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
403
404 /*
405 * Ensure that we've got a page handy, take it and use it.
406 */
407 int rc2 = pgmPhysEnsureHandyPage(pVM);
408 if (RT_FAILURE(rc2))
409 {
410 if (fFlushTLBs)
411 PGM_INVL_ALL_VCPU_TLBS(pVM);
412 Assert(rc2 == VERR_EM_NO_MEMORY);
413 return rc2;
414 }
415 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
416 Assert(PGMIsLocked(pVM));
417 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
418 Assert(!PGM_PAGE_IS_MMIO(pPage));
419
420 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
421 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
422 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
423 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
424 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
425 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
426
427 /*
428 * There are one or two action to be taken the next time we allocate handy pages:
429 * - Tell the GMM (global memory manager) what the page is being used for.
430 * (Speeds up replacement operations - sharing and defragmenting.)
431 * - If the current backing is shared, it must be freed.
432 */
433 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
434 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
435
436 const void *pvSharedPage = NULL;
437
438 if (PGM_PAGE_IS_SHARED(pPage))
439 {
440 /* Mark this shared page for freeing/derefencing. */
441 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
442 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
443
444 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
445 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
446 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
447 pVM->pgm.s.cSharedPages--;
448
449 /* Grab the address of the page so we can make a copy later on. */
450 rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
451 AssertRC(rc);
452 }
453 else
454 {
455 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
456 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
457 pVM->pgm.s.cZeroPages--;
458 }
459
460 /*
461 * Do the PGMPAGE modifications.
462 */
463 pVM->pgm.s.cPrivatePages++;
464 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
465 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
466 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
467 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
468 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
469
470 /* Copy the shared page contents to the replacement page. */
471 if (pvSharedPage)
472 {
473 /* Get the virtual address of the new page. */
474 void *pvNewPage;
475 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage);
476 AssertRC(rc);
477 if (rc == VINF_SUCCESS)
478 {
479 /** @todo todo write ASMMemCopyPage */
480 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE);
481 }
482 }
483
484 if ( fFlushTLBs
485 && rc != VINF_PGM_GCPHYS_ALIASED)
486 PGM_INVL_ALL_VCPU_TLBS(pVM);
487 return rc;
488}
489
490#ifdef PGM_WITH_LARGE_PAGES
491/**
492 * Replace a 2 MB range of zero pages with new pages that we can write to.
493 *
494 * @returns The following VBox status codes.
495 * @retval VINF_SUCCESS on success, pPage is modified.
496 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
497 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
498 *
499 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
500 *
501 * @param pVM The VM address.
502 * @param GCPhys The address of the page.
503 *
504 * @remarks Must be called from within the PGM critical section. It may
505 * nip back to ring-3/0 in some cases.
506 */
507int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
508{
509 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
510 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
511
512 /*
513 * Prereqs.
514 */
515 Assert(PGMIsLocked(pVM));
516 Assert(PGMIsUsingLargePages(pVM));
517
518 PPGMPAGE pPage;
519 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
520 if ( RT_SUCCESS(rc)
521 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
522 {
523 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pPage);
524
525 /* Don't call this function for already allocated pages. */
526 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
527
528 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
529 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
530 {
531 unsigned iPage;
532
533 GCPhys = GCPhysBase;
534
535 /* Lazy approach: check all pages in the 2 MB range.
536 * The whole range must be ram and unallocated
537 */
538 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
539 {
540 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
541 if ( RT_FAILURE(rc)
542 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
543 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ZERO) /* allocated, monitored or shared means we can't use a large page here */
544 {
545 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_STATE(pPage), rc));
546 break;
547 }
548 Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
549 GCPhys += PAGE_SIZE;
550 }
551 /* Fetch the start page of the 2 MB range again. */
552 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
553 AssertRC(rc); /* can't fail */
554
555 if (iPage != _2M/PAGE_SIZE)
556 {
557 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
558 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
559 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
560 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
561 }
562 else
563 {
564# ifdef IN_RING3
565 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
566# else
567 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
568# endif
569 if (RT_SUCCESS(rc))
570 {
571 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
572 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageAlloc);
573 return VINF_SUCCESS;
574 }
575 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
576
577 /* If we fail once, it most likely means the host's memory is too fragmented; don't bother trying again. */
578 PGMSetLargePageUsage(pVM, false);
579 return rc;
580 }
581 }
582 }
583 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
584}
585
586/**
587 * Recheck the entire 2 MB range to see if we can use it again as a large page.
588 *
589 * @returns The following VBox status codes.
590 * @retval VINF_SUCCESS on success, the large page can be used again
591 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
592 *
593 * @param pVM The VM address.
594 * @param GCPhys The address of the page.
595 * @param pLargePage Page structure of the base page
596 */
597int pgmPhysIsValidLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
598{
599 unsigned i;
600
601 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
602
603 GCPhys &= X86_PDE2M_PAE_PG_MASK;
604
605 /* Check the base page. */
606 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
607 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
608 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
609 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
610 {
611 LogFlow(("pgmPhysIsValidLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
612 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
613 }
614
615 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
616 /* Check all remaining pages in the 2 MB range. */
617 GCPhys += PAGE_SIZE;
618 for (i = 1; i < _2M/PAGE_SIZE; i++)
619 {
620 PPGMPAGE pPage;
621 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
622 AssertRCBreak(rc);
623
624 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
625 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
626 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
627 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
628 {
629 LogFlow(("pgmPhysIsValidLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
630 break;
631 }
632
633 GCPhys += PAGE_SIZE;
634 }
635 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
636
637 if (i == _2M/PAGE_SIZE)
638 {
639 PGM_PAGE_SET_PDE_TYPE(pLargePage, PGM_PAGE_PDE_TYPE_PDE);
640 Log(("pgmPhysIsValidLargePage: page %RGp can be reused!\n", GCPhys - _2M));
641 return VINF_SUCCESS;
642 }
643
644 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
645}
646
647#endif /* PGM_WITH_LARGE_PAGES */
648
649/**
650 * Deal with a write monitored page.
651 *
652 * @returns VBox strict status code.
653 *
654 * @param pVM The VM address.
655 * @param pPage The physical page tracking structure.
656 *
657 * @remarks Called from within the PGM critical section.
658 */
659void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
660{
661 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
662 PGM_PAGE_SET_WRITTEN_TO(pPage);
663 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
664 Assert(pVM->pgm.s.cMonitoredPages > 0);
665 pVM->pgm.s.cMonitoredPages--;
666 pVM->pgm.s.cWrittenToPages++;
667}
668
669
670/**
671 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
672 *
673 * @returns VBox strict status code.
674 * @retval VINF_SUCCESS on success.
675 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
676 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
677 *
678 * @param pVM The VM address.
679 * @param pPage The physical page tracking structure.
680 * @param GCPhys The address of the page.
681 *
682 * @remarks Called from within the PGM critical section.
683 */
684int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
685{
686 Assert(PGMIsLockOwner(pVM));
687 switch (PGM_PAGE_GET_STATE(pPage))
688 {
689 case PGM_PAGE_STATE_WRITE_MONITORED:
690 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
691 /* fall thru */
692 default: /* to shut up GCC */
693 case PGM_PAGE_STATE_ALLOCATED:
694 return VINF_SUCCESS;
695
696 /*
697 * Zero pages can be dummy pages for MMIO or reserved memory,
698 * so we need to check the flags before joining cause with
699 * shared page replacement.
700 */
701 case PGM_PAGE_STATE_ZERO:
702 if (PGM_PAGE_IS_MMIO(pPage))
703 return VERR_PGM_PHYS_PAGE_RESERVED;
704 /* fall thru */
705 case PGM_PAGE_STATE_SHARED:
706 return pgmPhysAllocPage(pVM, pPage, GCPhys);
707
708 /* Not allowed to write to ballooned pages. */
709 case PGM_PAGE_STATE_BALLOONED:
710 return VERR_PGM_PHYS_PAGE_BALLOONED;
711 }
712}
713
714
715/**
716 * Internal usage: Map the page specified by its GMM ID.
717 *
718 * This is similar to pgmPhysPageMap
719 *
720 * @returns VBox status code.
721 *
722 * @param pVM The VM handle.
723 * @param idPage The Page ID.
724 * @param HCPhys The physical address (for RC).
725 * @param ppv Where to store the mapping address.
726 *
727 * @remarks Called from within the PGM critical section. The mapping is only
728 * valid while your inside this section.
729 */
730int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
731{
732 /*
733 * Validation.
734 */
735 Assert(PGMIsLocked(pVM));
736 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
737 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
738 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
739
740#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
741 /*
742 * Map it by HCPhys.
743 */
744 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
745
746#else
747 /*
748 * Find/make Chunk TLB entry for the mapping chunk.
749 */
750 PPGMCHUNKR3MAP pMap;
751 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
752 if (pTlbe->idChunk == idChunk)
753 {
754 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
755 pMap = pTlbe->pChunk;
756 }
757 else
758 {
759 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
760
761 /*
762 * Find the chunk, map it if necessary.
763 */
764 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
765 if (!pMap)
766 {
767# ifdef IN_RING0
768 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
769 AssertRCReturn(rc, rc);
770 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
771 Assert(pMap);
772# else
773 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
774 if (RT_FAILURE(rc))
775 return rc;
776# endif
777 }
778
779 /*
780 * Enter it into the Chunk TLB.
781 */
782 pTlbe->idChunk = idChunk;
783 pTlbe->pChunk = pMap;
784 pMap->iAge = 0;
785 }
786
787 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
788 return VINF_SUCCESS;
789#endif
790}
791
792
793/**
794 * Maps a page into the current virtual address space so it can be accessed.
795 *
796 * @returns VBox status code.
797 * @retval VINF_SUCCESS on success.
798 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
799 *
800 * @param pVM The VM address.
801 * @param pPage The physical page tracking structure.
802 * @param GCPhys The address of the page.
803 * @param ppMap Where to store the address of the mapping tracking structure.
804 * @param ppv Where to store the mapping address of the page. The page
805 * offset is masked off!
806 *
807 * @remarks Called from within the PGM critical section.
808 */
809static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
810{
811 Assert(PGMIsLocked(pVM));
812
813#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
814 /*
815 * Just some sketchy GC/R0-darwin code.
816 */
817 *ppMap = NULL;
818 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
819 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
820 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
821 return VINF_SUCCESS;
822
823#else /* IN_RING3 || IN_RING0 */
824
825
826 /*
827 * Special case: ZERO and MMIO2 pages.
828 */
829 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
830 if (idChunk == NIL_GMM_CHUNKID)
831 {
832 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
833 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
834 {
835 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
836 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
837 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
838 *ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys));
839 }
840 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
841 {
842 /** @todo deal with aliased MMIO2 pages somehow...
843 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
844 * them, that would also avoid this mess. It would actually be kind of
845 * elegant... */
846 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
847 }
848 else
849 {
850 /** @todo handle MMIO2 */
851 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
852 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
853 ("pPage=%R[pgmpage]\n", pPage),
854 VERR_INTERNAL_ERROR_2);
855 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
856 }
857 *ppMap = NULL;
858 return VINF_SUCCESS;
859 }
860
861 /*
862 * Find/make Chunk TLB entry for the mapping chunk.
863 */
864 PPGMCHUNKR3MAP pMap;
865 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
866 if (pTlbe->idChunk == idChunk)
867 {
868 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
869 pMap = pTlbe->pChunk;
870 }
871 else
872 {
873 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
874
875 /*
876 * Find the chunk, map it if necessary.
877 */
878 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
879 if (!pMap)
880 {
881#ifdef IN_RING0
882 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
883 AssertRCReturn(rc, rc);
884 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
885 Assert(pMap);
886#else
887 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
888 if (RT_FAILURE(rc))
889 return rc;
890#endif
891 }
892
893 /*
894 * Enter it into the Chunk TLB.
895 */
896 pTlbe->idChunk = idChunk;
897 pTlbe->pChunk = pMap;
898 pMap->iAge = 0;
899 }
900
901 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
902 *ppMap = pMap;
903 return VINF_SUCCESS;
904#endif /* IN_RING3 */
905}
906
907
908/**
909 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
910 *
911 * This is typically used is paths where we cannot use the TLB methods (like ROM
912 * pages) or where there is no point in using them since we won't get many hits.
913 *
914 * @returns VBox strict status code.
915 * @retval VINF_SUCCESS on success.
916 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
917 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
918 *
919 * @param pVM The VM address.
920 * @param pPage The physical page tracking structure.
921 * @param GCPhys The address of the page.
922 * @param ppv Where to store the mapping address of the page. The page
923 * offset is masked off!
924 *
925 * @remarks Called from within the PGM critical section. The mapping is only
926 * valid while your inside this section.
927 */
928int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
929{
930 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
931 if (RT_SUCCESS(rc))
932 {
933 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
934 PPGMPAGEMAP pMapIgnore;
935 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
936 if (RT_FAILURE(rc2)) /* preserve rc */
937 rc = rc2;
938 }
939 return rc;
940}
941
942
943/**
944 * Maps a page into the current virtual address space so it can be accessed for
945 * both writing and reading.
946 *
947 * This is typically used is paths where we cannot use the TLB methods (like ROM
948 * pages) or where there is no point in using them since we won't get many hits.
949 *
950 * @returns VBox status code.
951 * @retval VINF_SUCCESS on success.
952 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
953 *
954 * @param pVM The VM address.
955 * @param pPage The physical page tracking structure. Must be in the
956 * allocated state.
957 * @param GCPhys The address of the page.
958 * @param ppv Where to store the mapping address of the page. The page
959 * offset is masked off!
960 *
961 * @remarks Called from within the PGM critical section. The mapping is only
962 * valid while your inside this section.
963 */
964int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
965{
966 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
967 PPGMPAGEMAP pMapIgnore;
968 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
969}
970
971
972/**
973 * Maps a page into the current virtual address space so it can be accessed for
974 * reading.
975 *
976 * This is typically used is paths where we cannot use the TLB methods (like ROM
977 * pages) or where there is no point in using them since we won't get many hits.
978 *
979 * @returns VBox status code.
980 * @retval VINF_SUCCESS on success.
981 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
982 *
983 * @param pVM The VM address.
984 * @param pPage The physical page tracking structure.
985 * @param GCPhys The address of the page.
986 * @param ppv Where to store the mapping address of the page. The page
987 * offset is masked off!
988 *
989 * @remarks Called from within the PGM critical section. The mapping is only
990 * valid while your inside this section.
991 */
992int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
993{
994 PPGMPAGEMAP pMapIgnore;
995 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
996}
997
998
999#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1000/**
1001 * Load a guest page into the ring-3 physical TLB.
1002 *
1003 * @returns VBox status code.
1004 * @retval VINF_SUCCESS on success
1005 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1006 * @param pPGM The PGM instance pointer.
1007 * @param GCPhys The guest physical address in question.
1008 */
1009int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
1010{
1011 Assert(PGMIsLocked(PGM2VM(pPGM)));
1012
1013 /*
1014 * Find the ram range and page and hand it over to the with-page function.
1015 * 99.8% of requests are expected to be in the first range.
1016 */
1017 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
1018 RTGCPHYS off = GCPhys - pRam->GCPhys;
1019 if (RT_UNLIKELY(off >= pRam->cb))
1020 {
1021 do
1022 {
1023 pRam = pRam->CTX_SUFF(pNext);
1024 if (!pRam)
1025 {
1026 STAM_COUNTER_INC(&pPGM->CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1027 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1028 }
1029 off = GCPhys - pRam->GCPhys;
1030 } while (off >= pRam->cb);
1031 }
1032
1033 return pgmPhysPageLoadIntoTlbWithPage(pPGM, &pRam->aPages[off >> PAGE_SHIFT], GCPhys);
1034}
1035
1036
1037/**
1038 * Load a guest page into the ring-3 physical TLB.
1039 *
1040 * @returns VBox status code.
1041 * @retval VINF_SUCCESS on success
1042 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1043 *
1044 * @param pPGM The PGM instance pointer.
1045 * @param pPage Pointer to the PGMPAGE structure corresponding to
1046 * GCPhys.
1047 * @param GCPhys The guest physical address in question.
1048 */
1049int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1050{
1051 Assert(PGMIsLocked(PGM2VM(pPGM)));
1052 STAM_COUNTER_INC(&pPGM->CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1053
1054 /*
1055 * Map the page.
1056 * Make a special case for the zero page as it is kind of special.
1057 */
1058 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1059 if ( !PGM_PAGE_IS_ZERO(pPage)
1060 && !PGM_PAGE_IS_BALLOONED(pPage))
1061 {
1062 void *pv;
1063 PPGMPAGEMAP pMap;
1064 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
1065 if (RT_FAILURE(rc))
1066 return rc;
1067 pTlbe->pMap = pMap;
1068 pTlbe->pv = pv;
1069 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1070 }
1071 else
1072 {
1073 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
1074 pTlbe->pMap = NULL;
1075 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
1076 }
1077#ifdef PGM_WITH_PHYS_TLB
1078 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1079 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1080 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK_FULL;
1081 else
1082 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1083#else
1084 pTlbe->GCPhys = NIL_RTGCPHYS;
1085#endif
1086 pTlbe->pPage = pPage;
1087 return VINF_SUCCESS;
1088}
1089#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1090
1091
1092/**
1093 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1094 * own the PGM lock and therefore not need to lock the mapped page.
1095 *
1096 * @returns VBox status code.
1097 * @retval VINF_SUCCESS on success.
1098 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1099 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1100 *
1101 * @param pVM The VM handle.
1102 * @param GCPhys The guest physical address of the page that should be mapped.
1103 * @param pPage Pointer to the PGMPAGE structure for the page.
1104 * @param ppv Where to store the address corresponding to GCPhys.
1105 *
1106 * @internal
1107 */
1108int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1109{
1110 int rc;
1111 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1112 Assert(PGMIsLocked(pVM));
1113
1114 /*
1115 * Make sure the page is writable.
1116 */
1117 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1118 {
1119 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1120 if (RT_FAILURE(rc))
1121 return rc;
1122 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1123 }
1124 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1125
1126 /*
1127 * Get the mapping address.
1128 */
1129#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1130 void *pv;
1131 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1132 PGM_PAGE_GET_HCPHYS(pPage),
1133 &pv
1134 RTLOG_COMMA_SRC_POS);
1135 if (RT_FAILURE(rc))
1136 return rc;
1137 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1138#else
1139 PPGMPAGEMAPTLBE pTlbe;
1140 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1141 if (RT_FAILURE(rc))
1142 return rc;
1143 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1144#endif
1145 return VINF_SUCCESS;
1146}
1147
1148
1149/**
1150 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1151 * own the PGM lock and therefore not need to lock the mapped page.
1152 *
1153 * @returns VBox status code.
1154 * @retval VINF_SUCCESS on success.
1155 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1156 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1157 *
1158 * @param pVM The VM handle.
1159 * @param GCPhys The guest physical address of the page that should be mapped.
1160 * @param pPage Pointer to the PGMPAGE structure for the page.
1161 * @param ppv Where to store the address corresponding to GCPhys.
1162 *
1163 * @internal
1164 */
1165int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
1166{
1167 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1168 Assert(PGMIsLocked(pVM));
1169 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1170
1171 /*
1172 * Get the mapping address.
1173 */
1174#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1175 void *pv;
1176 int rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1177 PGM_PAGE_GET_HCPHYS(pPage),
1178 &pv
1179 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1180 if (RT_FAILURE(rc))
1181 return rc;
1182 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1183#else
1184 PPGMPAGEMAPTLBE pTlbe;
1185 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1186 if (RT_FAILURE(rc))
1187 return rc;
1188 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1189#endif
1190 return VINF_SUCCESS;
1191}
1192
1193
1194/**
1195 * Requests the mapping of a guest page into the current context.
1196 *
1197 * This API should only be used for very short term, as it will consume
1198 * scarse resources (R0 and GC) in the mapping cache. When you're done
1199 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1200 *
1201 * This API will assume your intention is to write to the page, and will
1202 * therefore replace shared and zero pages. If you do not intend to modify
1203 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1204 *
1205 * @returns VBox status code.
1206 * @retval VINF_SUCCESS on success.
1207 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1208 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1209 *
1210 * @param pVM The VM handle.
1211 * @param GCPhys The guest physical address of the page that should be mapped.
1212 * @param ppv Where to store the address corresponding to GCPhys.
1213 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1214 *
1215 * @remarks The caller is responsible for dealing with access handlers.
1216 * @todo Add an informational return code for pages with access handlers?
1217 *
1218 * @remark Avoid calling this API from within critical sections (other than the
1219 * PGM one) because of the deadlock risk. External threads may need to
1220 * delegate jobs to the EMTs.
1221 * @thread Any thread.
1222 */
1223VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1224{
1225 int rc = pgmLock(pVM);
1226 AssertRCReturn(rc, rc);
1227
1228#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1229 /*
1230 * Find the page and make sure it's writable.
1231 */
1232 PPGMPAGE pPage;
1233 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1234 if (RT_SUCCESS(rc))
1235 {
1236 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1237 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1238 if (RT_SUCCESS(rc))
1239 {
1240 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1241
1242 PVMCPU pVCpu = VMMGetCpu(pVM);
1243 void *pv;
1244 rc = pgmRZDynMapHCPageInlined(pVCpu,
1245 PGM_PAGE_GET_HCPHYS(pPage),
1246 &pv
1247 RTLOG_COMMA_SRC_POS);
1248 if (RT_SUCCESS(rc))
1249 {
1250 AssertRCSuccess(rc);
1251
1252 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1253 *ppv = pv;
1254 pLock->pvPage = pv;
1255 pLock->pVCpu = pVCpu;
1256 }
1257 }
1258 }
1259
1260#else /* IN_RING3 || IN_RING0 */
1261 /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
1262 /** @todo : This can be dangerous if abused for more than one page; the ring-3 mapping is only valid for ranges that do NOT cross a chunk boundary. */
1263 /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
1264
1265 /*
1266 * Query the Physical TLB entry for the page (may fail).
1267 */
1268 PPGMPAGEMAPTLBE pTlbe;
1269 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1270 if (RT_SUCCESS(rc))
1271 {
1272 /*
1273 * If the page is shared, the zero page, or being write monitored
1274 * it must be converted to a page that's writable if possible.
1275 */
1276 PPGMPAGE pPage = pTlbe->pPage;
1277 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1278 {
1279 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1280 if (RT_SUCCESS(rc))
1281 {
1282 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1283 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1284 }
1285 }
1286 if (RT_SUCCESS(rc))
1287 {
1288 /*
1289 * Now, just perform the locking and calculate the return address.
1290 */
1291 PPGMPAGEMAP pMap = pTlbe->pMap;
1292 if (pMap)
1293 pMap->cRefs++;
1294
1295 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1296 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1297 {
1298 if (cLocks == 0)
1299 pVM->pgm.s.cWriteLockedPages++;
1300 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1301 }
1302 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
1303 {
1304 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1305 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
1306 if (pMap)
1307 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1308 }
1309
1310 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1311 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1312 pLock->pvMap = pMap;
1313 }
1314 }
1315
1316#endif /* IN_RING3 || IN_RING0 */
1317 pgmUnlock(pVM);
1318 return rc;
1319}
1320
1321
1322/**
1323 * Requests the mapping of a guest page into the current context.
1324 *
1325 * This API should only be used for very short term, as it will consume
1326 * scarse resources (R0 and GC) in the mapping cache. When you're done
1327 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1328 *
1329 * @returns VBox status code.
1330 * @retval VINF_SUCCESS on success.
1331 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1332 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1333 *
1334 * @param pVM The VM handle.
1335 * @param GCPhys The guest physical address of the page that should be mapped.
1336 * @param ppv Where to store the address corresponding to GCPhys.
1337 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1338 *
1339 * @remarks The caller is responsible for dealing with access handlers.
1340 * @todo Add an informational return code for pages with access handlers?
1341 *
1342 * @remark Avoid calling this API from within critical sections (other than
1343 * the PGM one) because of the deadlock risk.
1344 * @thread Any thread.
1345 */
1346VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1347{
1348 int rc = pgmLock(pVM);
1349 AssertRCReturn(rc, rc);
1350
1351#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1352 /*
1353 * Find the page and make sure it's readable.
1354 */
1355 PPGMPAGE pPage;
1356 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1357 if (RT_SUCCESS(rc))
1358 {
1359 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1360 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1361 else
1362 {
1363 PVMCPU pVCpu = VMMGetCpu(pVM);
1364 void *pv;
1365 rc = pgmRZDynMapHCPageInlined(pVCpu,
1366 PGM_PAGE_GET_HCPHYS(pPage),
1367 &pv
1368 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1369 if (RT_SUCCESS(rc))
1370 {
1371 AssertRCSuccess(rc);
1372
1373 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1374 *ppv = pv;
1375 pLock->pvPage = pv;
1376 pLock->pVCpu = pVCpu;
1377 }
1378 }
1379 }
1380
1381#else /* IN_RING3 || IN_RING0 */
1382
1383 /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
1384 /** @todo : This can be dangerous if abused for more than one page; the ring-3 mapping is only valid for ranges that do NOT cross a chunk boundary. */
1385 /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
1386
1387 /*
1388 * Query the Physical TLB entry for the page (may fail).
1389 */
1390 PPGMPAGEMAPTLBE pTlbe;
1391 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1392 if (RT_SUCCESS(rc))
1393 {
1394 /* MMIO pages doesn't have any readable backing. */
1395 PPGMPAGE pPage = pTlbe->pPage;
1396 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1397 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1398 else
1399 {
1400 /*
1401 * Now, just perform the locking and calculate the return address.
1402 */
1403 PPGMPAGEMAP pMap = pTlbe->pMap;
1404 if (pMap)
1405 pMap->cRefs++;
1406
1407 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1408 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1409 {
1410 if (cLocks == 0)
1411 pVM->pgm.s.cReadLockedPages++;
1412 PGM_PAGE_INC_READ_LOCKS(pPage);
1413 }
1414 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
1415 {
1416 PGM_PAGE_INC_READ_LOCKS(pPage);
1417 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
1418 if (pMap)
1419 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1420 }
1421
1422 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1423 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1424 pLock->pvMap = pMap;
1425 }
1426 }
1427
1428#endif /* IN_RING3 || IN_RING0 */
1429 pgmUnlock(pVM);
1430 return rc;
1431}
1432
1433
1434/**
1435 * Requests the mapping of a guest page given by virtual address into the current context.
1436 *
1437 * This API should only be used for very short term, as it will consume
1438 * scarse resources (R0 and GC) in the mapping cache. When you're done
1439 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1440 *
1441 * This API will assume your intention is to write to the page, and will
1442 * therefore replace shared and zero pages. If you do not intend to modify
1443 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1444 *
1445 * @returns VBox status code.
1446 * @retval VINF_SUCCESS on success.
1447 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1448 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1449 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1450 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1451 *
1452 * @param pVCpu VMCPU handle.
1453 * @param GCPhys The guest physical address of the page that should be mapped.
1454 * @param ppv Where to store the address corresponding to GCPhys.
1455 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1456 *
1457 * @remark Avoid calling this API from within critical sections (other than
1458 * the PGM one) because of the deadlock risk.
1459 * @thread EMT
1460 */
1461VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1462{
1463 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1464 RTGCPHYS GCPhys;
1465 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1466 if (RT_SUCCESS(rc))
1467 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1468 return rc;
1469}
1470
1471
1472/**
1473 * Requests the mapping of a guest page given by virtual address into the current context.
1474 *
1475 * This API should only be used for very short term, as it will consume
1476 * scarse resources (R0 and GC) in the mapping cache. When you're done
1477 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1478 *
1479 * @returns VBox status code.
1480 * @retval VINF_SUCCESS on success.
1481 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1482 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1483 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1484 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1485 *
1486 * @param pVCpu VMCPU handle.
1487 * @param GCPhys The guest physical address of the page that should be mapped.
1488 * @param ppv Where to store the address corresponding to GCPhys.
1489 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1490 *
1491 * @remark Avoid calling this API from within critical sections (other than
1492 * the PGM one) because of the deadlock risk.
1493 * @thread EMT
1494 */
1495VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1496{
1497 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1498 RTGCPHYS GCPhys;
1499 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1500 if (RT_SUCCESS(rc))
1501 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1502 return rc;
1503}
1504
1505
1506/**
1507 * Release the mapping of a guest page.
1508 *
1509 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1510 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1511 *
1512 * @param pVM The VM handle.
1513 * @param pLock The lock structure initialized by the mapping function.
1514 */
1515VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1516{
1517#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1518 Assert(pLock->pvPage != NULL);
1519 Assert(pLock->pVCpu == VMMGetCpu(pVM));
1520 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
1521 pLock->pVCpu = NULL;
1522 pLock->pvPage = NULL;
1523
1524#else
1525 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1526 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1527 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1528
1529 pLock->uPageAndType = 0;
1530 pLock->pvMap = NULL;
1531
1532 pgmLock(pVM);
1533 if (fWriteLock)
1534 {
1535 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1536 Assert(cLocks > 0);
1537 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1538 {
1539 if (cLocks == 1)
1540 {
1541 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1542 pVM->pgm.s.cWriteLockedPages--;
1543 }
1544 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1545 }
1546
1547 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1548 {
1549 PGM_PAGE_SET_WRITTEN_TO(pPage);
1550 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1551 Assert(pVM->pgm.s.cMonitoredPages > 0);
1552 pVM->pgm.s.cMonitoredPages--;
1553 pVM->pgm.s.cWrittenToPages++;
1554 }
1555 }
1556 else
1557 {
1558 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1559 Assert(cLocks > 0);
1560 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1561 {
1562 if (cLocks == 1)
1563 {
1564 Assert(pVM->pgm.s.cReadLockedPages > 0);
1565 pVM->pgm.s.cReadLockedPages--;
1566 }
1567 PGM_PAGE_DEC_READ_LOCKS(pPage);
1568 }
1569 }
1570
1571 if (pMap)
1572 {
1573 Assert(pMap->cRefs >= 1);
1574 pMap->cRefs--;
1575 pMap->iAge = 0;
1576 }
1577 pgmUnlock(pVM);
1578#endif /* IN_RING3 */
1579}
1580
1581
1582/**
1583 * Converts a GC physical address to a HC ring-3 pointer.
1584 *
1585 * @returns VINF_SUCCESS on success.
1586 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1587 * page but has no physical backing.
1588 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1589 * GC physical address.
1590 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1591 * a dynamic ram chunk boundary
1592 *
1593 * @param pVM The VM handle.
1594 * @param GCPhys The GC physical address to convert.
1595 * @param cbRange Physical range
1596 * @param pR3Ptr Where to store the R3 pointer on success.
1597 *
1598 * @deprecated Avoid when possible!
1599 */
1600VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1601{
1602/** @todo this is kind of hacky and needs some more work. */
1603#ifndef DEBUG_sandervl
1604 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1605#endif
1606
1607 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1608#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1609 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1610#else
1611 pgmLock(pVM);
1612
1613 PPGMRAMRANGE pRam;
1614 PPGMPAGE pPage;
1615 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1616 if (RT_SUCCESS(rc))
1617 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1618
1619 pgmUnlock(pVM);
1620 Assert(rc <= VINF_SUCCESS);
1621 return rc;
1622#endif
1623}
1624
1625
1626#ifdef VBOX_STRICT
1627/**
1628 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1629 *
1630 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1631 * @param pVM The VM handle.
1632 * @param GCPhys The GC Physical addresss.
1633 * @param cbRange Physical range.
1634 *
1635 * @deprecated Avoid when possible.
1636 */
1637VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1638{
1639 RTR3PTR R3Ptr;
1640 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1641 if (RT_SUCCESS(rc))
1642 return R3Ptr;
1643 return NIL_RTR3PTR;
1644}
1645#endif /* VBOX_STRICT */
1646
1647
1648/**
1649 * Converts a guest pointer to a GC physical address.
1650 *
1651 * This uses the current CR3/CR0/CR4 of the guest.
1652 *
1653 * @returns VBox status code.
1654 * @param pVCpu The VMCPU Handle
1655 * @param GCPtr The guest pointer to convert.
1656 * @param pGCPhys Where to store the GC physical address.
1657 */
1658VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1659{
1660 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1661 if (pGCPhys && RT_SUCCESS(rc))
1662 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1663 return rc;
1664}
1665
1666
1667/**
1668 * Converts a guest pointer to a HC physical address.
1669 *
1670 * This uses the current CR3/CR0/CR4 of the guest.
1671 *
1672 * @returns VBox status code.
1673 * @param pVCpu The VMCPU Handle
1674 * @param GCPtr The guest pointer to convert.
1675 * @param pHCPhys Where to store the HC physical address.
1676 */
1677VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1678{
1679 PVM pVM = pVCpu->CTX_SUFF(pVM);
1680 RTGCPHYS GCPhys;
1681 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1682 if (RT_SUCCESS(rc))
1683 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1684 return rc;
1685}
1686
1687
1688
1689#undef LOG_GROUP
1690#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1691
1692
1693#ifdef IN_RING3
1694/**
1695 * Cache PGMPhys memory access
1696 *
1697 * @param pVM VM Handle.
1698 * @param pCache Cache structure pointer
1699 * @param GCPhys GC physical address
1700 * @param pbHC HC pointer corresponding to physical page
1701 *
1702 * @thread EMT.
1703 */
1704static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1705{
1706 uint32_t iCacheIndex;
1707
1708 Assert(VM_IS_EMT(pVM));
1709
1710 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1711 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1712
1713 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1714
1715 ASMBitSet(&pCache->aEntries, iCacheIndex);
1716
1717 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1718 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1719}
1720#endif /* IN_RING3 */
1721
1722
1723/**
1724 * Deals with reading from a page with one or more ALL access handlers.
1725 *
1726 * @returns VBox status code. Can be ignored in ring-3.
1727 * @retval VINF_SUCCESS.
1728 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1729 *
1730 * @param pVM The VM handle.
1731 * @param pPage The page descriptor.
1732 * @param GCPhys The physical address to start reading at.
1733 * @param pvBuf Where to put the bits we read.
1734 * @param cb How much to read - less or equal to a page.
1735 */
1736static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1737{
1738 /*
1739 * The most frequent access here is MMIO and shadowed ROM.
1740 * The current code ASSUMES all these access handlers covers full pages!
1741 */
1742
1743 /*
1744 * Whatever we do we need the source page, map it first.
1745 */
1746 const void *pvSrc = NULL;
1747 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1748 if (RT_FAILURE(rc))
1749 {
1750 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1751 GCPhys, pPage, rc));
1752 memset(pvBuf, 0xff, cb);
1753 return VINF_SUCCESS;
1754 }
1755 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1756
1757 /*
1758 * Deal with any physical handlers.
1759 */
1760 PPGMPHYSHANDLER pPhys = NULL;
1761 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1762 {
1763#ifdef IN_RING3
1764 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
1765 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1766 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1767 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1768 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1769 Assert(pPhys->CTX_SUFF(pfnHandler));
1770
1771 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
1772 void *pvUser = pPhys->CTX_SUFF(pvUser);
1773
1774 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1775 STAM_PROFILE_START(&pPhys->Stat, h);
1776 Assert(PGMIsLockOwner(pVM));
1777 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1778 pgmUnlock(pVM);
1779 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
1780 pgmLock(pVM);
1781# ifdef VBOX_WITH_STATISTICS
1782 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
1783 if (pPhys)
1784 STAM_PROFILE_STOP(&pPhys->Stat, h);
1785# else
1786 pPhys = NULL; /* might not be valid anymore. */
1787# endif
1788 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1789#else
1790 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1791 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1792 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1793#endif
1794 }
1795
1796 /*
1797 * Deal with any virtual handlers.
1798 */
1799 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1800 {
1801 unsigned iPage;
1802 PPGMVIRTHANDLER pVirt;
1803
1804 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1805 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1806 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1807 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1808 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1809
1810#ifdef IN_RING3
1811 if (pVirt->pfnHandlerR3)
1812 {
1813 if (!pPhys)
1814 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1815 else
1816 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1817 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1818 + (iPage << PAGE_SHIFT)
1819 + (GCPhys & PAGE_OFFSET_MASK);
1820
1821 STAM_PROFILE_START(&pVirt->Stat, h);
1822 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1823 STAM_PROFILE_STOP(&pVirt->Stat, h);
1824 if (rc2 == VINF_SUCCESS)
1825 rc = VINF_SUCCESS;
1826 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1827 }
1828 else
1829 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1830#else
1831 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1832 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1833 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1834#endif
1835 }
1836
1837 /*
1838 * Take the default action.
1839 */
1840 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1841 memcpy(pvBuf, pvSrc, cb);
1842 return rc;
1843}
1844
1845
1846/**
1847 * Read physical memory.
1848 *
1849 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1850 * want to ignore those.
1851 *
1852 * @returns VBox status code. Can be ignored in ring-3.
1853 * @retval VINF_SUCCESS.
1854 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1855 *
1856 * @param pVM VM Handle.
1857 * @param GCPhys Physical address start reading from.
1858 * @param pvBuf Where to put the read bits.
1859 * @param cbRead How many bytes to read.
1860 */
1861VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1862{
1863 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1864 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1865
1866 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
1867 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
1868
1869 pgmLock(pVM);
1870
1871 /*
1872 * Copy loop on ram ranges.
1873 */
1874 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1875 for (;;)
1876 {
1877 /* Find range. */
1878 while (pRam && GCPhys > pRam->GCPhysLast)
1879 pRam = pRam->CTX_SUFF(pNext);
1880 /* Inside range or not? */
1881 if (pRam && GCPhys >= pRam->GCPhys)
1882 {
1883 /*
1884 * Must work our way thru this page by page.
1885 */
1886 RTGCPHYS off = GCPhys - pRam->GCPhys;
1887 while (off < pRam->cb)
1888 {
1889 unsigned iPage = off >> PAGE_SHIFT;
1890 PPGMPAGE pPage = &pRam->aPages[iPage];
1891 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1892 if (cb > cbRead)
1893 cb = cbRead;
1894
1895 /*
1896 * Any ALL access handlers?
1897 */
1898 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1899 {
1900 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1901 if (RT_FAILURE(rc))
1902 {
1903 pgmUnlock(pVM);
1904 return rc;
1905 }
1906 }
1907 else
1908 {
1909 /*
1910 * Get the pointer to the page.
1911 */
1912 const void *pvSrc;
1913 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1914 if (RT_SUCCESS(rc))
1915 memcpy(pvBuf, pvSrc, cb);
1916 else
1917 {
1918 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1919 pRam->GCPhys + off, pPage, rc));
1920 memset(pvBuf, 0xff, cb);
1921 }
1922 }
1923
1924 /* next page */
1925 if (cb >= cbRead)
1926 {
1927 pgmUnlock(pVM);
1928 return VINF_SUCCESS;
1929 }
1930 cbRead -= cb;
1931 off += cb;
1932 pvBuf = (char *)pvBuf + cb;
1933 } /* walk pages in ram range. */
1934
1935 GCPhys = pRam->GCPhysLast + 1;
1936 }
1937 else
1938 {
1939 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1940
1941 /*
1942 * Unassigned address space.
1943 */
1944 if (!pRam)
1945 break;
1946 size_t cb = pRam->GCPhys - GCPhys;
1947 if (cb >= cbRead)
1948 {
1949 memset(pvBuf, 0xff, cbRead);
1950 break;
1951 }
1952 memset(pvBuf, 0xff, cb);
1953
1954 cbRead -= cb;
1955 pvBuf = (char *)pvBuf + cb;
1956 GCPhys += cb;
1957 }
1958 } /* Ram range walk */
1959
1960 pgmUnlock(pVM);
1961 return VINF_SUCCESS;
1962}
1963
1964
1965/**
1966 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1967 *
1968 * @returns VBox status code. Can be ignored in ring-3.
1969 * @retval VINF_SUCCESS.
1970 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1971 *
1972 * @param pVM The VM handle.
1973 * @param pPage The page descriptor.
1974 * @param GCPhys The physical address to start writing at.
1975 * @param pvBuf What to write.
1976 * @param cbWrite How much to write - less or equal to a page.
1977 */
1978static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
1979{
1980 void *pvDst = NULL;
1981 int rc;
1982
1983 /*
1984 * Give priority to physical handlers (like #PF does).
1985 *
1986 * Hope for a lonely physical handler first that covers the whole
1987 * write area. This should be a pretty frequent case with MMIO and
1988 * the heavy usage of full page handlers in the page pool.
1989 */
1990 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
1991 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
1992 {
1993 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
1994 if (pCur)
1995 {
1996 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1997 Assert(pCur->CTX_SUFF(pfnHandler));
1998
1999 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2000 if (cbRange > cbWrite)
2001 cbRange = cbWrite;
2002
2003#ifndef IN_RING3
2004 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2005 NOREF(cbRange);
2006 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2007 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2008
2009#else /* IN_RING3 */
2010 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2011 if (!PGM_PAGE_IS_MMIO(pPage))
2012 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2013 else
2014 rc = VINF_SUCCESS;
2015 if (RT_SUCCESS(rc))
2016 {
2017 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
2018 void *pvUser = pCur->CTX_SUFF(pvUser);
2019
2020 STAM_PROFILE_START(&pCur->Stat, h);
2021 Assert(PGMIsLockOwner(pVM));
2022 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2023 pgmUnlock(pVM);
2024 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2025 pgmLock(pVM);
2026# ifdef VBOX_WITH_STATISTICS
2027 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2028 if (pCur)
2029 STAM_PROFILE_STOP(&pCur->Stat, h);
2030# else
2031 pCur = NULL; /* might not be valid anymore. */
2032# endif
2033 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2034 memcpy(pvDst, pvBuf, cbRange);
2035 else
2036 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
2037 }
2038 else
2039 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2040 GCPhys, pPage, rc), rc);
2041 if (RT_LIKELY(cbRange == cbWrite))
2042 return VINF_SUCCESS;
2043
2044 /* more fun to be had below */
2045 cbWrite -= cbRange;
2046 GCPhys += cbRange;
2047 pvBuf = (uint8_t *)pvBuf + cbRange;
2048 pvDst = (uint8_t *)pvDst + cbRange;
2049#endif /* IN_RING3 */
2050 }
2051 /* else: the handler is somewhere else in the page, deal with it below. */
2052 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2053 }
2054 /*
2055 * A virtual handler without any interfering physical handlers.
2056 * Hopefully it'll conver the whole write.
2057 */
2058 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2059 {
2060 unsigned iPage;
2061 PPGMVIRTHANDLER pCur;
2062 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2063 if (RT_SUCCESS(rc))
2064 {
2065 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2066 if (cbRange > cbWrite)
2067 cbRange = cbWrite;
2068
2069#ifndef IN_RING3
2070 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2071 NOREF(cbRange);
2072 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2073 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2074
2075#else /* IN_RING3 */
2076
2077 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2078 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2079 if (RT_SUCCESS(rc))
2080 {
2081 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2082 if (pCur->pfnHandlerR3)
2083 {
2084 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2085 + (iPage << PAGE_SHIFT)
2086 + (GCPhys & PAGE_OFFSET_MASK);
2087
2088 STAM_PROFILE_START(&pCur->Stat, h);
2089 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2090 STAM_PROFILE_STOP(&pCur->Stat, h);
2091 }
2092 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2093 memcpy(pvDst, pvBuf, cbRange);
2094 else
2095 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2096 }
2097 else
2098 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2099 GCPhys, pPage, rc), rc);
2100 if (RT_LIKELY(cbRange == cbWrite))
2101 return VINF_SUCCESS;
2102
2103 /* more fun to be had below */
2104 cbWrite -= cbRange;
2105 GCPhys += cbRange;
2106 pvBuf = (uint8_t *)pvBuf + cbRange;
2107 pvDst = (uint8_t *)pvDst + cbRange;
2108#endif
2109 }
2110 /* else: the handler is somewhere else in the page, deal with it below. */
2111 }
2112
2113 /*
2114 * Deal with all the odd ends.
2115 */
2116
2117 /* We need a writable destination page. */
2118 if (!pvDst)
2119 {
2120 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
2121 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2122 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2123 GCPhys, pPage, rc), rc);
2124 }
2125
2126 /* The loop state (big + ugly). */
2127 unsigned iVirtPage = 0;
2128 PPGMVIRTHANDLER pVirt = NULL;
2129 uint32_t offVirt = PAGE_SIZE;
2130 uint32_t offVirtLast = PAGE_SIZE;
2131 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2132
2133 PPGMPHYSHANDLER pPhys = NULL;
2134 uint32_t offPhys = PAGE_SIZE;
2135 uint32_t offPhysLast = PAGE_SIZE;
2136 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2137
2138 /* The loop. */
2139 for (;;)
2140 {
2141 /*
2142 * Find the closest handler at or above GCPhys.
2143 */
2144 if (fMoreVirt && !pVirt)
2145 {
2146 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2147 if (RT_SUCCESS(rc))
2148 {
2149 offVirt = 0;
2150 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2151 }
2152 else
2153 {
2154 PPGMPHYS2VIRTHANDLER pVirtPhys;
2155 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2156 GCPhys, true /* fAbove */);
2157 if ( pVirtPhys
2158 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2159 {
2160 /* ASSUME that pVirtPhys only covers one page. */
2161 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2162 Assert(pVirtPhys->Core.Key > GCPhys);
2163
2164 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2165 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2166 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2167 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2168 }
2169 else
2170 {
2171 pVirt = NULL;
2172 fMoreVirt = false;
2173 offVirt = offVirtLast = PAGE_SIZE;
2174 }
2175 }
2176 }
2177
2178 if (fMorePhys && !pPhys)
2179 {
2180 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2181 if (pPhys)
2182 {
2183 offPhys = 0;
2184 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2185 }
2186 else
2187 {
2188 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2189 GCPhys, true /* fAbove */);
2190 if ( pPhys
2191 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2192 {
2193 offPhys = pPhys->Core.Key - GCPhys;
2194 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2195 }
2196 else
2197 {
2198 pPhys = NULL;
2199 fMorePhys = false;
2200 offPhys = offPhysLast = PAGE_SIZE;
2201 }
2202 }
2203 }
2204
2205 /*
2206 * Handle access to space without handlers (that's easy).
2207 */
2208 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2209 uint32_t cbRange = (uint32_t)cbWrite;
2210 if (offPhys && offVirt)
2211 {
2212 if (cbRange > offPhys)
2213 cbRange = offPhys;
2214 if (cbRange > offVirt)
2215 cbRange = offVirt;
2216 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2217 }
2218 /*
2219 * Physical handler.
2220 */
2221 else if (!offPhys && offVirt)
2222 {
2223 if (cbRange > offPhysLast + 1)
2224 cbRange = offPhysLast + 1;
2225 if (cbRange > offVirt)
2226 cbRange = offVirt;
2227#ifdef IN_RING3
2228 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2229 void *pvUser = pPhys->CTX_SUFF(pvUser);
2230
2231 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2232 STAM_PROFILE_START(&pPhys->Stat, h);
2233 Assert(PGMIsLockOwner(pVM));
2234 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2235 pgmUnlock(pVM);
2236 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2237 pgmLock(pVM);
2238# ifdef VBOX_WITH_STATISTICS
2239 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2240 if (pPhys)
2241 STAM_PROFILE_STOP(&pPhys->Stat, h);
2242# else
2243 pPhys = NULL; /* might not be valid anymore. */
2244# endif
2245 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2246#else
2247 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2248 NOREF(cbRange);
2249 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2250 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2251#endif
2252 }
2253 /*
2254 * Virtual handler.
2255 */
2256 else if (offPhys && !offVirt)
2257 {
2258 if (cbRange > offVirtLast + 1)
2259 cbRange = offVirtLast + 1;
2260 if (cbRange > offPhys)
2261 cbRange = offPhys;
2262#ifdef IN_RING3
2263 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2264 if (pVirt->pfnHandlerR3)
2265 {
2266 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2267 + (iVirtPage << PAGE_SHIFT)
2268 + (GCPhys & PAGE_OFFSET_MASK);
2269 STAM_PROFILE_START(&pVirt->Stat, h);
2270 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2271 STAM_PROFILE_STOP(&pVirt->Stat, h);
2272 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2273 }
2274 pVirt = NULL;
2275#else
2276 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2277 NOREF(cbRange);
2278 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2279 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2280#endif
2281 }
2282 /*
2283 * Both... give the physical one priority.
2284 */
2285 else
2286 {
2287 Assert(!offPhys && !offVirt);
2288 if (cbRange > offVirtLast + 1)
2289 cbRange = offVirtLast + 1;
2290 if (cbRange > offPhysLast + 1)
2291 cbRange = offPhysLast + 1;
2292
2293#ifdef IN_RING3
2294 if (pVirt->pfnHandlerR3)
2295 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2296 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2297
2298 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2299 void *pvUser = pPhys->CTX_SUFF(pvUser);
2300
2301 STAM_PROFILE_START(&pPhys->Stat, h);
2302 Assert(PGMIsLockOwner(pVM));
2303 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2304 pgmUnlock(pVM);
2305 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2306 pgmLock(pVM);
2307# ifdef VBOX_WITH_STATISTICS
2308 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2309 if (pPhys)
2310 STAM_PROFILE_STOP(&pPhys->Stat, h);
2311# else
2312 pPhys = NULL; /* might not be valid anymore. */
2313# endif
2314 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2315 if (pVirt->pfnHandlerR3)
2316 {
2317
2318 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2319 + (iVirtPage << PAGE_SHIFT)
2320 + (GCPhys & PAGE_OFFSET_MASK);
2321 STAM_PROFILE_START(&pVirt->Stat, h2);
2322 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2323 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2324 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2325 rc = VINF_SUCCESS;
2326 else
2327 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2328 }
2329 pPhys = NULL;
2330 pVirt = NULL;
2331#else
2332 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2333 NOREF(cbRange);
2334 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2335 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2336#endif
2337 }
2338 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2339 memcpy(pvDst, pvBuf, cbRange);
2340
2341 /*
2342 * Advance if we've got more stuff to do.
2343 */
2344 if (cbRange >= cbWrite)
2345 return VINF_SUCCESS;
2346
2347 cbWrite -= cbRange;
2348 GCPhys += cbRange;
2349 pvBuf = (uint8_t *)pvBuf + cbRange;
2350 pvDst = (uint8_t *)pvDst + cbRange;
2351
2352 offPhys -= cbRange;
2353 offPhysLast -= cbRange;
2354 offVirt -= cbRange;
2355 offVirtLast -= cbRange;
2356 }
2357}
2358
2359
2360/**
2361 * Write to physical memory.
2362 *
2363 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2364 * want to ignore those.
2365 *
2366 * @returns VBox status code. Can be ignored in ring-3.
2367 * @retval VINF_SUCCESS.
2368 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2369 *
2370 * @param pVM VM Handle.
2371 * @param GCPhys Physical address to write to.
2372 * @param pvBuf What to write.
2373 * @param cbWrite How many bytes to write.
2374 */
2375VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2376{
2377 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2378 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2379 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2380
2381 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2382 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2383
2384 pgmLock(pVM);
2385
2386 /*
2387 * Copy loop on ram ranges.
2388 */
2389 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2390 for (;;)
2391 {
2392 /* Find range. */
2393 while (pRam && GCPhys > pRam->GCPhysLast)
2394 pRam = pRam->CTX_SUFF(pNext);
2395 /* Inside range or not? */
2396 if (pRam && GCPhys >= pRam->GCPhys)
2397 {
2398 /*
2399 * Must work our way thru this page by page.
2400 */
2401 RTGCPTR off = GCPhys - pRam->GCPhys;
2402 while (off < pRam->cb)
2403 {
2404 RTGCPTR iPage = off >> PAGE_SHIFT;
2405 PPGMPAGE pPage = &pRam->aPages[iPage];
2406 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2407 if (cb > cbWrite)
2408 cb = cbWrite;
2409
2410 /*
2411 * Any active WRITE or ALL access handlers?
2412 */
2413 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2414 {
2415 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2416 if (RT_FAILURE(rc))
2417 {
2418 pgmUnlock(pVM);
2419 return rc;
2420 }
2421 }
2422 else
2423 {
2424 /*
2425 * Get the pointer to the page.
2426 */
2427 void *pvDst;
2428 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2429 if (RT_SUCCESS(rc))
2430 {
2431 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2432 memcpy(pvDst, pvBuf, cb);
2433 }
2434 else
2435 /* Ignore writes to ballooned pages. */
2436 if (!PGM_PAGE_IS_BALLOONED(pPage))
2437 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2438 pRam->GCPhys + off, pPage, rc));
2439 }
2440
2441 /* next page */
2442 if (cb >= cbWrite)
2443 {
2444 pgmUnlock(pVM);
2445 return VINF_SUCCESS;
2446 }
2447
2448 cbWrite -= cb;
2449 off += cb;
2450 pvBuf = (const char *)pvBuf + cb;
2451 } /* walk pages in ram range */
2452
2453 GCPhys = pRam->GCPhysLast + 1;
2454 }
2455 else
2456 {
2457 /*
2458 * Unassigned address space, skip it.
2459 */
2460 if (!pRam)
2461 break;
2462 size_t cb = pRam->GCPhys - GCPhys;
2463 if (cb >= cbWrite)
2464 break;
2465 cbWrite -= cb;
2466 pvBuf = (const char *)pvBuf + cb;
2467 GCPhys += cb;
2468 }
2469 } /* Ram range walk */
2470
2471 pgmUnlock(pVM);
2472 return VINF_SUCCESS;
2473}
2474
2475
2476/**
2477 * Read from guest physical memory by GC physical address, bypassing
2478 * MMIO and access handlers.
2479 *
2480 * @returns VBox status.
2481 * @param pVM VM handle.
2482 * @param pvDst The destination address.
2483 * @param GCPhysSrc The source address (GC physical address).
2484 * @param cb The number of bytes to read.
2485 */
2486VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2487{
2488 /*
2489 * Treat the first page as a special case.
2490 */
2491 if (!cb)
2492 return VINF_SUCCESS;
2493
2494 /* map the 1st page */
2495 void const *pvSrc;
2496 PGMPAGEMAPLOCK Lock;
2497 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2498 if (RT_FAILURE(rc))
2499 return rc;
2500
2501 /* optimize for the case where access is completely within the first page. */
2502 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2503 if (RT_LIKELY(cb <= cbPage))
2504 {
2505 memcpy(pvDst, pvSrc, cb);
2506 PGMPhysReleasePageMappingLock(pVM, &Lock);
2507 return VINF_SUCCESS;
2508 }
2509
2510 /* copy to the end of the page. */
2511 memcpy(pvDst, pvSrc, cbPage);
2512 PGMPhysReleasePageMappingLock(pVM, &Lock);
2513 GCPhysSrc += cbPage;
2514 pvDst = (uint8_t *)pvDst + cbPage;
2515 cb -= cbPage;
2516
2517 /*
2518 * Page by page.
2519 */
2520 for (;;)
2521 {
2522 /* map the page */
2523 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2524 if (RT_FAILURE(rc))
2525 return rc;
2526
2527 /* last page? */
2528 if (cb <= PAGE_SIZE)
2529 {
2530 memcpy(pvDst, pvSrc, cb);
2531 PGMPhysReleasePageMappingLock(pVM, &Lock);
2532 return VINF_SUCCESS;
2533 }
2534
2535 /* copy the entire page and advance */
2536 memcpy(pvDst, pvSrc, PAGE_SIZE);
2537 PGMPhysReleasePageMappingLock(pVM, &Lock);
2538 GCPhysSrc += PAGE_SIZE;
2539 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2540 cb -= PAGE_SIZE;
2541 }
2542 /* won't ever get here. */
2543}
2544
2545
2546/**
2547 * Write to guest physical memory referenced by GC pointer.
2548 * Write memory to GC physical address in guest physical memory.
2549 *
2550 * This will bypass MMIO and access handlers.
2551 *
2552 * @returns VBox status.
2553 * @param pVM VM handle.
2554 * @param GCPhysDst The GC physical address of the destination.
2555 * @param pvSrc The source buffer.
2556 * @param cb The number of bytes to write.
2557 */
2558VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2559{
2560 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2561
2562 /*
2563 * Treat the first page as a special case.
2564 */
2565 if (!cb)
2566 return VINF_SUCCESS;
2567
2568 /* map the 1st page */
2569 void *pvDst;
2570 PGMPAGEMAPLOCK Lock;
2571 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2572 if (RT_FAILURE(rc))
2573 return rc;
2574
2575 /* optimize for the case where access is completely within the first page. */
2576 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2577 if (RT_LIKELY(cb <= cbPage))
2578 {
2579 memcpy(pvDst, pvSrc, cb);
2580 PGMPhysReleasePageMappingLock(pVM, &Lock);
2581 return VINF_SUCCESS;
2582 }
2583
2584 /* copy to the end of the page. */
2585 memcpy(pvDst, pvSrc, cbPage);
2586 PGMPhysReleasePageMappingLock(pVM, &Lock);
2587 GCPhysDst += cbPage;
2588 pvSrc = (const uint8_t *)pvSrc + cbPage;
2589 cb -= cbPage;
2590
2591 /*
2592 * Page by page.
2593 */
2594 for (;;)
2595 {
2596 /* map the page */
2597 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2598 if (RT_FAILURE(rc))
2599 return rc;
2600
2601 /* last page? */
2602 if (cb <= PAGE_SIZE)
2603 {
2604 memcpy(pvDst, pvSrc, cb);
2605 PGMPhysReleasePageMappingLock(pVM, &Lock);
2606 return VINF_SUCCESS;
2607 }
2608
2609 /* copy the entire page and advance */
2610 memcpy(pvDst, pvSrc, PAGE_SIZE);
2611 PGMPhysReleasePageMappingLock(pVM, &Lock);
2612 GCPhysDst += PAGE_SIZE;
2613 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2614 cb -= PAGE_SIZE;
2615 }
2616 /* won't ever get here. */
2617}
2618
2619
2620/**
2621 * Read from guest physical memory referenced by GC pointer.
2622 *
2623 * This function uses the current CR3/CR0/CR4 of the guest and will
2624 * bypass access handlers and not set any accessed bits.
2625 *
2626 * @returns VBox status.
2627 * @param pVCpu The VMCPU handle.
2628 * @param pvDst The destination address.
2629 * @param GCPtrSrc The source address (GC pointer).
2630 * @param cb The number of bytes to read.
2631 */
2632VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2633{
2634 PVM pVM = pVCpu->CTX_SUFF(pVM);
2635
2636 /*
2637 * Treat the first page as a special case.
2638 */
2639 if (!cb)
2640 return VINF_SUCCESS;
2641
2642 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
2643 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
2644
2645 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
2646 * when many VCPUs are fighting for the lock.
2647 */
2648 pgmLock(pVM);
2649
2650 /* map the 1st page */
2651 void const *pvSrc;
2652 PGMPAGEMAPLOCK Lock;
2653 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2654 if (RT_FAILURE(rc))
2655 {
2656 pgmUnlock(pVM);
2657 return rc;
2658 }
2659
2660 /* optimize for the case where access is completely within the first page. */
2661 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2662 if (RT_LIKELY(cb <= cbPage))
2663 {
2664 memcpy(pvDst, pvSrc, cb);
2665 PGMPhysReleasePageMappingLock(pVM, &Lock);
2666 pgmUnlock(pVM);
2667 return VINF_SUCCESS;
2668 }
2669
2670 /* copy to the end of the page. */
2671 memcpy(pvDst, pvSrc, cbPage);
2672 PGMPhysReleasePageMappingLock(pVM, &Lock);
2673 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2674 pvDst = (uint8_t *)pvDst + cbPage;
2675 cb -= cbPage;
2676
2677 /*
2678 * Page by page.
2679 */
2680 for (;;)
2681 {
2682 /* map the page */
2683 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2684 if (RT_FAILURE(rc))
2685 {
2686 pgmUnlock(pVM);
2687 return rc;
2688 }
2689
2690 /* last page? */
2691 if (cb <= PAGE_SIZE)
2692 {
2693 memcpy(pvDst, pvSrc, cb);
2694 PGMPhysReleasePageMappingLock(pVM, &Lock);
2695 pgmUnlock(pVM);
2696 return VINF_SUCCESS;
2697 }
2698
2699 /* copy the entire page and advance */
2700 memcpy(pvDst, pvSrc, PAGE_SIZE);
2701 PGMPhysReleasePageMappingLock(pVM, &Lock);
2702 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2703 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2704 cb -= PAGE_SIZE;
2705 }
2706 /* won't ever get here. */
2707}
2708
2709
2710/**
2711 * Write to guest physical memory referenced by GC pointer.
2712 *
2713 * This function uses the current CR3/CR0/CR4 of the guest and will
2714 * bypass access handlers and not set dirty or accessed bits.
2715 *
2716 * @returns VBox status.
2717 * @param pVCpu The VMCPU handle.
2718 * @param GCPtrDst The destination address (GC pointer).
2719 * @param pvSrc The source address.
2720 * @param cb The number of bytes to write.
2721 */
2722VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2723{
2724 PVM pVM = pVCpu->CTX_SUFF(pVM);
2725
2726 /*
2727 * Treat the first page as a special case.
2728 */
2729 if (!cb)
2730 return VINF_SUCCESS;
2731
2732 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
2733 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
2734
2735 /* map the 1st page */
2736 void *pvDst;
2737 PGMPAGEMAPLOCK Lock;
2738 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2739 if (RT_FAILURE(rc))
2740 return rc;
2741
2742 /* optimize for the case where access is completely within the first page. */
2743 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2744 if (RT_LIKELY(cb <= cbPage))
2745 {
2746 memcpy(pvDst, pvSrc, cb);
2747 PGMPhysReleasePageMappingLock(pVM, &Lock);
2748 return VINF_SUCCESS;
2749 }
2750
2751 /* copy to the end of the page. */
2752 memcpy(pvDst, pvSrc, cbPage);
2753 PGMPhysReleasePageMappingLock(pVM, &Lock);
2754 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2755 pvSrc = (const uint8_t *)pvSrc + cbPage;
2756 cb -= cbPage;
2757
2758 /*
2759 * Page by page.
2760 */
2761 for (;;)
2762 {
2763 /* map the page */
2764 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2765 if (RT_FAILURE(rc))
2766 return rc;
2767
2768 /* last page? */
2769 if (cb <= PAGE_SIZE)
2770 {
2771 memcpy(pvDst, pvSrc, cb);
2772 PGMPhysReleasePageMappingLock(pVM, &Lock);
2773 return VINF_SUCCESS;
2774 }
2775
2776 /* copy the entire page and advance */
2777 memcpy(pvDst, pvSrc, PAGE_SIZE);
2778 PGMPhysReleasePageMappingLock(pVM, &Lock);
2779 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2780 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2781 cb -= PAGE_SIZE;
2782 }
2783 /* won't ever get here. */
2784}
2785
2786
2787/**
2788 * Write to guest physical memory referenced by GC pointer and update the PTE.
2789 *
2790 * This function uses the current CR3/CR0/CR4 of the guest and will
2791 * bypass access handlers but will set any dirty and accessed bits in the PTE.
2792 *
2793 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
2794 *
2795 * @returns VBox status.
2796 * @param pVCpu The VMCPU handle.
2797 * @param GCPtrDst The destination address (GC pointer).
2798 * @param pvSrc The source address.
2799 * @param cb The number of bytes to write.
2800 */
2801VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2802{
2803 PVM pVM = pVCpu->CTX_SUFF(pVM);
2804
2805 /*
2806 * Treat the first page as a special case.
2807 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
2808 */
2809 if (!cb)
2810 return VINF_SUCCESS;
2811
2812 /* map the 1st page */
2813 void *pvDst;
2814 PGMPAGEMAPLOCK Lock;
2815 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2816 if (RT_FAILURE(rc))
2817 return rc;
2818
2819 /* optimize for the case where access is completely within the first page. */
2820 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2821 if (RT_LIKELY(cb <= cbPage))
2822 {
2823 memcpy(pvDst, pvSrc, cb);
2824 PGMPhysReleasePageMappingLock(pVM, &Lock);
2825 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2826 return VINF_SUCCESS;
2827 }
2828
2829 /* copy to the end of the page. */
2830 memcpy(pvDst, pvSrc, cbPage);
2831 PGMPhysReleasePageMappingLock(pVM, &Lock);
2832 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2833 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2834 pvSrc = (const uint8_t *)pvSrc + cbPage;
2835 cb -= cbPage;
2836
2837 /*
2838 * Page by page.
2839 */
2840 for (;;)
2841 {
2842 /* map the page */
2843 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2844 if (RT_FAILURE(rc))
2845 return rc;
2846
2847 /* last page? */
2848 if (cb <= PAGE_SIZE)
2849 {
2850 memcpy(pvDst, pvSrc, cb);
2851 PGMPhysReleasePageMappingLock(pVM, &Lock);
2852 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2853 return VINF_SUCCESS;
2854 }
2855
2856 /* copy the entire page and advance */
2857 memcpy(pvDst, pvSrc, PAGE_SIZE);
2858 PGMPhysReleasePageMappingLock(pVM, &Lock);
2859 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2860 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2861 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2862 cb -= PAGE_SIZE;
2863 }
2864 /* won't ever get here. */
2865}
2866
2867
2868/**
2869 * Read from guest physical memory referenced by GC pointer.
2870 *
2871 * This function uses the current CR3/CR0/CR4 of the guest and will
2872 * respect access handlers and set accessed bits.
2873 *
2874 * @returns VBox status.
2875 * @param pVCpu The VMCPU handle.
2876 * @param pvDst The destination address.
2877 * @param GCPtrSrc The source address (GC pointer).
2878 * @param cb The number of bytes to read.
2879 * @thread The vCPU EMT.
2880 */
2881VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2882{
2883 RTGCPHYS GCPhys;
2884 uint64_t fFlags;
2885 int rc;
2886 PVM pVM = pVCpu->CTX_SUFF(pVM);
2887
2888 /*
2889 * Anything to do?
2890 */
2891 if (!cb)
2892 return VINF_SUCCESS;
2893
2894 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
2895
2896 /*
2897 * Optimize reads within a single page.
2898 */
2899 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2900 {
2901 /* Convert virtual to physical address + flags */
2902 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2903 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2904 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2905
2906 /* mark the guest page as accessed. */
2907 if (!(fFlags & X86_PTE_A))
2908 {
2909 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2910 AssertRC(rc);
2911 }
2912
2913 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
2914 }
2915
2916 /*
2917 * Page by page.
2918 */
2919 for (;;)
2920 {
2921 /* Convert virtual to physical address + flags */
2922 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2923 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2924 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2925
2926 /* mark the guest page as accessed. */
2927 if (!(fFlags & X86_PTE_A))
2928 {
2929 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2930 AssertRC(rc);
2931 }
2932
2933 /* copy */
2934 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2935 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
2936 if (cbRead >= cb || RT_FAILURE(rc))
2937 return rc;
2938
2939 /* next */
2940 cb -= cbRead;
2941 pvDst = (uint8_t *)pvDst + cbRead;
2942 GCPtrSrc += cbRead;
2943 }
2944}
2945
2946
2947/**
2948 * Write to guest physical memory referenced by GC pointer.
2949 *
2950 * This function uses the current CR3/CR0/CR4 of the guest and will
2951 * respect access handlers and set dirty and accessed bits.
2952 *
2953 * @returns VBox status.
2954 * @retval VINF_SUCCESS.
2955 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2956 *
2957 * @param pVCpu The VMCPU handle.
2958 * @param GCPtrDst The destination address (GC pointer).
2959 * @param pvSrc The source address.
2960 * @param cb The number of bytes to write.
2961 */
2962VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2963{
2964 RTGCPHYS GCPhys;
2965 uint64_t fFlags;
2966 int rc;
2967 PVM pVM = pVCpu->CTX_SUFF(pVM);
2968
2969 /*
2970 * Anything to do?
2971 */
2972 if (!cb)
2973 return VINF_SUCCESS;
2974
2975 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
2976
2977 /*
2978 * Optimize writes within a single page.
2979 */
2980 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2981 {
2982 /* Convert virtual to physical address + flags */
2983 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2984 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2985 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2986
2987 /* Mention when we ignore X86_PTE_RW... */
2988 if (!(fFlags & X86_PTE_RW))
2989 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2990
2991 /* Mark the guest page as accessed and dirty if necessary. */
2992 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2993 {
2994 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2995 AssertRC(rc);
2996 }
2997
2998 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2999 }
3000
3001 /*
3002 * Page by page.
3003 */
3004 for (;;)
3005 {
3006 /* Convert virtual to physical address + flags */
3007 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3008 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3009 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3010
3011 /* Mention when we ignore X86_PTE_RW... */
3012 if (!(fFlags & X86_PTE_RW))
3013 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3014
3015 /* Mark the guest page as accessed and dirty if necessary. */
3016 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3017 {
3018 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3019 AssertRC(rc);
3020 }
3021
3022 /* copy */
3023 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3024 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
3025 if (cbWrite >= cb || RT_FAILURE(rc))
3026 return rc;
3027
3028 /* next */
3029 cb -= cbWrite;
3030 pvSrc = (uint8_t *)pvSrc + cbWrite;
3031 GCPtrDst += cbWrite;
3032 }
3033}
3034
3035
3036/**
3037 * Performs a read of guest virtual memory for instruction emulation.
3038 *
3039 * This will check permissions, raise exceptions and update the access bits.
3040 *
3041 * The current implementation will bypass all access handlers. It may later be
3042 * changed to at least respect MMIO.
3043 *
3044 *
3045 * @returns VBox status code suitable to scheduling.
3046 * @retval VINF_SUCCESS if the read was performed successfully.
3047 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3048 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3049 *
3050 * @param pVCpu The VMCPU handle.
3051 * @param pCtxCore The context core.
3052 * @param pvDst Where to put the bytes we've read.
3053 * @param GCPtrSrc The source address.
3054 * @param cb The number of bytes to read. Not more than a page.
3055 *
3056 * @remark This function will dynamically map physical pages in GC. This may unmap
3057 * mappings done by the caller. Be careful!
3058 */
3059VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3060{
3061 PVM pVM = pVCpu->CTX_SUFF(pVM);
3062 Assert(cb <= PAGE_SIZE);
3063
3064/** @todo r=bird: This isn't perfect!
3065 * -# It's not checking for reserved bits being 1.
3066 * -# It's not correctly dealing with the access bit.
3067 * -# It's not respecting MMIO memory or any other access handlers.
3068 */
3069 /*
3070 * 1. Translate virtual to physical. This may fault.
3071 * 2. Map the physical address.
3072 * 3. Do the read operation.
3073 * 4. Set access bits if required.
3074 */
3075 int rc;
3076 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3077 if (cb <= cb1)
3078 {
3079 /*
3080 * Not crossing pages.
3081 */
3082 RTGCPHYS GCPhys;
3083 uint64_t fFlags;
3084 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3085 if (RT_SUCCESS(rc))
3086 {
3087 /** @todo we should check reserved bits ... */
3088 void *pvSrc;
3089 rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, &pvSrc);
3090 switch (rc)
3091 {
3092 case VINF_SUCCESS:
3093 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3094 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3095 break;
3096 case VERR_PGM_PHYS_PAGE_RESERVED:
3097 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3098 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
3099 break;
3100 default:
3101 return rc;
3102 }
3103
3104 /** @todo access bit emulation isn't 100% correct. */
3105 if (!(fFlags & X86_PTE_A))
3106 {
3107 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3108 AssertRC(rc);
3109 }
3110 return VINF_SUCCESS;
3111 }
3112 }
3113 else
3114 {
3115 /*
3116 * Crosses pages.
3117 */
3118 size_t cb2 = cb - cb1;
3119 uint64_t fFlags1;
3120 RTGCPHYS GCPhys1;
3121 uint64_t fFlags2;
3122 RTGCPHYS GCPhys2;
3123 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3124 if (RT_SUCCESS(rc))
3125 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3126 if (RT_SUCCESS(rc))
3127 {
3128 /** @todo we should check reserved bits ... */
3129 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3130 void *pvSrc1;
3131 rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys1, &pvSrc1);
3132 switch (rc)
3133 {
3134 case VINF_SUCCESS:
3135 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3136 break;
3137 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3138 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
3139 break;
3140 default:
3141 return rc;
3142 }
3143
3144 void *pvSrc2;
3145 rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys2, &pvSrc2);
3146 switch (rc)
3147 {
3148 case VINF_SUCCESS:
3149 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3150 break;
3151 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3152 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
3153 break;
3154 default:
3155 return rc;
3156 }
3157
3158 if (!(fFlags1 & X86_PTE_A))
3159 {
3160 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3161 AssertRC(rc);
3162 }
3163 if (!(fFlags2 & X86_PTE_A))
3164 {
3165 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3166 AssertRC(rc);
3167 }
3168 return VINF_SUCCESS;
3169 }
3170 }
3171
3172 /*
3173 * Raise a #PF.
3174 */
3175 uint32_t uErr;
3176
3177 /* Get the current privilege level. */
3178 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3179 switch (rc)
3180 {
3181 case VINF_SUCCESS:
3182 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3183 break;
3184
3185 case VERR_PAGE_NOT_PRESENT:
3186 case VERR_PAGE_TABLE_NOT_PRESENT:
3187 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3188 break;
3189
3190 default:
3191 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3192 return rc;
3193 }
3194 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3195 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3196}
3197
3198
3199/**
3200 * Performs a read of guest virtual memory for instruction emulation.
3201 *
3202 * This will check permissions, raise exceptions and update the access bits.
3203 *
3204 * The current implementation will bypass all access handlers. It may later be
3205 * changed to at least respect MMIO.
3206 *
3207 *
3208 * @returns VBox status code suitable to scheduling.
3209 * @retval VINF_SUCCESS if the read was performed successfully.
3210 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3211 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3212 *
3213 * @param pVCpu The VMCPU handle.
3214 * @param pCtxCore The context core.
3215 * @param pvDst Where to put the bytes we've read.
3216 * @param GCPtrSrc The source address.
3217 * @param cb The number of bytes to read. Not more than a page.
3218 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3219 * an appropriate error status will be returned (no
3220 * informational at all).
3221 *
3222 *
3223 * @remarks Takes the PGM lock.
3224 * @remarks A page fault on the 2nd page of the access will be raised without
3225 * writing the bits on the first page since we're ASSUMING that the
3226 * caller is emulating an instruction access.
3227 * @remarks This function will dynamically map physical pages in GC. This may
3228 * unmap mappings done by the caller. Be careful!
3229 */
3230VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
3231{
3232 PVM pVM = pVCpu->CTX_SUFF(pVM);
3233 Assert(cb <= PAGE_SIZE);
3234
3235 /*
3236 * 1. Translate virtual to physical. This may fault.
3237 * 2. Map the physical address.
3238 * 3. Do the read operation.
3239 * 4. Set access bits if required.
3240 */
3241 int rc;
3242 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3243 if (cb <= cb1)
3244 {
3245 /*
3246 * Not crossing pages.
3247 */
3248 RTGCPHYS GCPhys;
3249 uint64_t fFlags;
3250 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3251 if (RT_SUCCESS(rc))
3252 {
3253 if (1) /** @todo we should check reserved bits ... */
3254 {
3255 const void *pvSrc;
3256 PGMPAGEMAPLOCK Lock;
3257 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3258 switch (rc)
3259 {
3260 case VINF_SUCCESS:
3261 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3262 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3263 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3264 break;
3265 case VERR_PGM_PHYS_PAGE_RESERVED:
3266 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3267 memset(pvDst, 0xff, cb);
3268 break;
3269 default:
3270 AssertMsgFailed(("%Rrc\n", rc));
3271 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3272 return rc;
3273 }
3274 PGMPhysReleasePageMappingLock(pVM, &Lock);
3275
3276 if (!(fFlags & X86_PTE_A))
3277 {
3278 /** @todo access bit emulation isn't 100% correct. */
3279 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3280 AssertRC(rc);
3281 }
3282 return VINF_SUCCESS;
3283 }
3284 }
3285 }
3286 else
3287 {
3288 /*
3289 * Crosses pages.
3290 */
3291 size_t cb2 = cb - cb1;
3292 uint64_t fFlags1;
3293 RTGCPHYS GCPhys1;
3294 uint64_t fFlags2;
3295 RTGCPHYS GCPhys2;
3296 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3297 if (RT_SUCCESS(rc))
3298 {
3299 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3300 if (RT_SUCCESS(rc))
3301 {
3302 if (1) /** @todo we should check reserved bits ... */
3303 {
3304 const void *pvSrc;
3305 PGMPAGEMAPLOCK Lock;
3306 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3307 switch (rc)
3308 {
3309 case VINF_SUCCESS:
3310 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3311 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3312 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3313 PGMPhysReleasePageMappingLock(pVM, &Lock);
3314 break;
3315 case VERR_PGM_PHYS_PAGE_RESERVED:
3316 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3317 memset(pvDst, 0xff, cb1);
3318 break;
3319 default:
3320 AssertMsgFailed(("%Rrc\n", rc));
3321 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3322 return rc;
3323 }
3324
3325 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3326 switch (rc)
3327 {
3328 case VINF_SUCCESS:
3329 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3330 PGMPhysReleasePageMappingLock(pVM, &Lock);
3331 break;
3332 case VERR_PGM_PHYS_PAGE_RESERVED:
3333 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3334 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3335 break;
3336 default:
3337 AssertMsgFailed(("%Rrc\n", rc));
3338 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3339 return rc;
3340 }
3341
3342 if (!(fFlags1 & X86_PTE_A))
3343 {
3344 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3345 AssertRC(rc);
3346 }
3347 if (!(fFlags2 & X86_PTE_A))
3348 {
3349 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3350 AssertRC(rc);
3351 }
3352 return VINF_SUCCESS;
3353 }
3354 /* sort out which page */
3355 }
3356 else
3357 GCPtrSrc += cb1; /* fault on 2nd page */
3358 }
3359 }
3360
3361 /*
3362 * Raise a #PF if we're allowed to do that.
3363 */
3364 /* Calc the error bits. */
3365 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3366 uint32_t uErr;
3367 switch (rc)
3368 {
3369 case VINF_SUCCESS:
3370 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3371 rc = VERR_ACCESS_DENIED;
3372 break;
3373
3374 case VERR_PAGE_NOT_PRESENT:
3375 case VERR_PAGE_TABLE_NOT_PRESENT:
3376 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3377 break;
3378
3379 default:
3380 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3381 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3382 return rc;
3383 }
3384 if (fRaiseTrap)
3385 {
3386 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3387 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3388 }
3389 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3390 return rc;
3391}
3392
3393
3394/**
3395 * Performs a write to guest virtual memory for instruction emulation.
3396 *
3397 * This will check permissions, raise exceptions and update the dirty and access
3398 * bits.
3399 *
3400 * @returns VBox status code suitable to scheduling.
3401 * @retval VINF_SUCCESS if the read was performed successfully.
3402 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3403 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3404 *
3405 * @param pVCpu The VMCPU handle.
3406 * @param pCtxCore The context core.
3407 * @param GCPtrDst The destination address.
3408 * @param pvSrc What to write.
3409 * @param cb The number of bytes to write. Not more than a page.
3410 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3411 * an appropriate error status will be returned (no
3412 * informational at all).
3413 *
3414 * @remarks Takes the PGM lock.
3415 * @remarks A page fault on the 2nd page of the access will be raised without
3416 * writing the bits on the first page since we're ASSUMING that the
3417 * caller is emulating an instruction access.
3418 * @remarks This function will dynamically map physical pages in GC. This may
3419 * unmap mappings done by the caller. Be careful!
3420 */
3421VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
3422{
3423 Assert(cb <= PAGE_SIZE);
3424 PVM pVM = pVCpu->CTX_SUFF(pVM);
3425
3426 /*
3427 * 1. Translate virtual to physical. This may fault.
3428 * 2. Map the physical address.
3429 * 3. Do the write operation.
3430 * 4. Set access bits if required.
3431 */
3432 /** @todo Since this method is frequently used by EMInterpret or IOM
3433 * upon a write fault to an write access monitored page, we can
3434 * reuse the guest page table walking from the \#PF code. */
3435 int rc;
3436 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3437 if (cb <= cb1)
3438 {
3439 /*
3440 * Not crossing pages.
3441 */
3442 RTGCPHYS GCPhys;
3443 uint64_t fFlags;
3444 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3445 if (RT_SUCCESS(rc))
3446 {
3447 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3448 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3449 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3450 {
3451 void *pvDst;
3452 PGMPAGEMAPLOCK Lock;
3453 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3454 switch (rc)
3455 {
3456 case VINF_SUCCESS:
3457 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3458 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3459 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3460 PGMPhysReleasePageMappingLock(pVM, &Lock);
3461 break;
3462 case VERR_PGM_PHYS_PAGE_RESERVED:
3463 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3464 /* bit bucket */
3465 break;
3466 default:
3467 AssertMsgFailed(("%Rrc\n", rc));
3468 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3469 return rc;
3470 }
3471
3472 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3473 {
3474 /** @todo dirty & access bit emulation isn't 100% correct. */
3475 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3476 AssertRC(rc);
3477 }
3478 return VINF_SUCCESS;
3479 }
3480 rc = VERR_ACCESS_DENIED;
3481 }
3482 }
3483 else
3484 {
3485 /*
3486 * Crosses pages.
3487 */
3488 size_t cb2 = cb - cb1;
3489 uint64_t fFlags1;
3490 RTGCPHYS GCPhys1;
3491 uint64_t fFlags2;
3492 RTGCPHYS GCPhys2;
3493 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3494 if (RT_SUCCESS(rc))
3495 {
3496 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3497 if (RT_SUCCESS(rc))
3498 {
3499 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3500 && (fFlags2 & X86_PTE_RW))
3501 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3502 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) )
3503 {
3504 void *pvDst;
3505 PGMPAGEMAPLOCK Lock;
3506 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3507 switch (rc)
3508 {
3509 case VINF_SUCCESS:
3510 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3511 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3512 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3513 PGMPhysReleasePageMappingLock(pVM, &Lock);
3514 break;
3515 case VERR_PGM_PHYS_PAGE_RESERVED:
3516 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3517 /* bit bucket */
3518 break;
3519 default:
3520 AssertMsgFailed(("%Rrc\n", rc));
3521 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3522 return rc;
3523 }
3524
3525 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3526 switch (rc)
3527 {
3528 case VINF_SUCCESS:
3529 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3530 PGMPhysReleasePageMappingLock(pVM, &Lock);
3531 break;
3532 case VERR_PGM_PHYS_PAGE_RESERVED:
3533 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3534 /* bit bucket */
3535 break;
3536 default:
3537 AssertMsgFailed(("%Rrc\n", rc));
3538 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3539 return rc;
3540 }
3541
3542 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3543 {
3544 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3545 AssertRC(rc);
3546 }
3547 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3548 {
3549 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3550 AssertRC(rc);
3551 }
3552 return VINF_SUCCESS;
3553 }
3554 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3555 GCPtrDst += cb1; /* fault on the 2nd page. */
3556 rc = VERR_ACCESS_DENIED;
3557 }
3558 else
3559 GCPtrDst += cb1; /* fault on the 2nd page. */
3560 }
3561 }
3562
3563 /*
3564 * Raise a #PF if we're allowed to do that.
3565 */
3566 /* Calc the error bits. */
3567 uint32_t uErr;
3568 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3569 switch (rc)
3570 {
3571 case VINF_SUCCESS:
3572 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3573 rc = VERR_ACCESS_DENIED;
3574 break;
3575
3576 case VERR_ACCESS_DENIED:
3577 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3578 break;
3579
3580 case VERR_PAGE_NOT_PRESENT:
3581 case VERR_PAGE_TABLE_NOT_PRESENT:
3582 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3583 break;
3584
3585 default:
3586 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3587 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3588 return rc;
3589 }
3590 if (fRaiseTrap)
3591 {
3592 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3593 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3594 }
3595 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3596 return rc;
3597}
3598
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette