VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 24647

Last change on this file since 24647 was 24647, checked in by vboxsync, 15 years ago

Fix physical page TLB.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 119.4 KB
Line 
1/* $Id: PGMAllPhys.cpp 24647 2009-11-13 17:04:26Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM_PHYS
26#include <VBox/pgm.h>
27#include <VBox/trpm.h>
28#include <VBox/vmm.h>
29#include <VBox/iom.h>
30#include <VBox/em.h>
31#include <VBox/rem.h>
32#include "PGMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/string.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#ifdef IN_RING3
41# include <iprt/thread.h>
42#endif
43
44
45
46#ifndef IN_RING3
47
48/**
49 * \#PF Handler callback for Guest ROM range write access.
50 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
51 *
52 * @returns VBox status code (appropritate for trap handling and GC return).
53 * @param pVM VM Handle.
54 * @param uErrorCode CPU Error code.
55 * @param pRegFrame Trap register frame.
56 * @param pvFault The fault address (cr2).
57 * @param GCPhysFault The GC physical address corresponding to pvFault.
58 * @param pvUser User argument. Pointer to the ROM range structure.
59 */
60VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
61{
62 int rc;
63 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
64 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
65 PVMCPU pVCpu = VMMGetCpu(pVM);
66
67 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
68 switch (pRom->aPages[iPage].enmProt)
69 {
70 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
71 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
72 {
73 /*
74 * If it's a simple instruction which doesn't change the cpu state
75 * we will simply skip it. Otherwise we'll have to defer it to REM.
76 */
77 uint32_t cbOp;
78 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
79 rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
80 if ( RT_SUCCESS(rc)
81 && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
82 && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
83 {
84 switch (pDis->opcode)
85 {
86 /** @todo Find other instructions we can safely skip, possibly
87 * adding this kind of detection to DIS or EM. */
88 case OP_MOV:
89 pRegFrame->rip += cbOp;
90 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteHandled);
91 return VINF_SUCCESS;
92 }
93 }
94 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
95 return rc;
96 break;
97 }
98
99 case PGMROMPROT_READ_RAM_WRITE_RAM:
100 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
101 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
102 AssertRC(rc);
103 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
104
105 case PGMROMPROT_READ_ROM_WRITE_RAM:
106 /* Handle it in ring-3 because it's *way* easier there. */
107 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
108 break;
109
110 default:
111 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
112 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
113 VERR_INTERNAL_ERROR);
114 }
115
116 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteUnhandled);
117 return VINF_EM_RAW_EMULATE_INSTR;
118}
119
120#endif /* IN_RING3 */
121
122/**
123 * Checks if Address Gate 20 is enabled or not.
124 *
125 * @returns true if enabled.
126 * @returns false if disabled.
127 * @param pVCpu VMCPU handle.
128 */
129VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
130{
131 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
132 return pVCpu->pgm.s.fA20Enabled;
133}
134
135
136/**
137 * Validates a GC physical address.
138 *
139 * @returns true if valid.
140 * @returns false if invalid.
141 * @param pVM The VM handle.
142 * @param GCPhys The physical address to validate.
143 */
144VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
145{
146 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
147 return pPage != NULL;
148}
149
150
151/**
152 * Checks if a GC physical address is a normal page,
153 * i.e. not ROM, MMIO or reserved.
154 *
155 * @returns true if normal.
156 * @returns false if invalid, ROM, MMIO or reserved page.
157 * @param pVM The VM handle.
158 * @param GCPhys The physical address to check.
159 */
160VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
161{
162 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
163 return pPage
164 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
165}
166
167
168/**
169 * Converts a GC physical address to a HC physical address.
170 *
171 * @returns VINF_SUCCESS on success.
172 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
173 * page but has no physical backing.
174 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
175 * GC physical address.
176 *
177 * @param pVM The VM handle.
178 * @param GCPhys The GC physical address to convert.
179 * @param pHCPhys Where to store the HC physical address on success.
180 */
181VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
182{
183 pgmLock(pVM);
184 PPGMPAGE pPage;
185 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
186 if (RT_SUCCESS(rc))
187 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
188 pgmUnlock(pVM);
189 return rc;
190}
191
192
193/**
194 * Invalidates the GC page mapping TLB.
195 *
196 * @param pVM The VM handle.
197 */
198VMMDECL(void) PGMPhysInvalidatePageGCMapTLB(PVM pVM)
199{
200 /* later */
201 NOREF(pVM);
202}
203
204#ifndef IN_RC
205/**
206 * Invalidates the ring-0 page mapping TLB.
207 *
208 * @param pVM The VM handle.
209 */
210VMMDECL(void) PGMPhysInvalidatePageR0MapTLB(PVM pVM)
211{
212 PGMPhysInvalidatePageR3MapTLB(pVM);
213}
214
215
216/**
217 * Invalidates the ring-3 page mapping TLB.
218 *
219 * @param pVM The VM handle.
220 */
221VMMDECL(void) PGMPhysInvalidatePageR3MapTLB(PVM pVM)
222{
223 pgmLock(pVM);
224 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
225 {
226 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
227 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
228 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
229 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
230 }
231 pgmUnlock(pVM);
232}
233#endif /* ! IN_RC */
234
235/**
236 * Makes sure that there is at least one handy page ready for use.
237 *
238 * This will also take the appropriate actions when reaching water-marks.
239 *
240 * @returns VBox status code.
241 * @retval VINF_SUCCESS on success.
242 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
243 *
244 * @param pVM The VM handle.
245 *
246 * @remarks Must be called from within the PGM critical section. It may
247 * nip back to ring-3/0 in some cases.
248 */
249static int pgmPhysEnsureHandyPage(PVM pVM)
250{
251 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
252
253 /*
254 * Do we need to do anything special?
255 */
256#ifdef IN_RING3
257 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
258#else
259 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
260#endif
261 {
262 /*
263 * Allocate pages only if we're out of them, or in ring-3, almost out.
264 */
265#ifdef IN_RING3
266 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
267#else
268 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
269#endif
270 {
271 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
272 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
273#ifdef IN_RING3
274 int rc = PGMR3PhysAllocateHandyPages(pVM);
275#else
276 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
277#endif
278 if (RT_UNLIKELY(rc != VINF_SUCCESS))
279 {
280 if (RT_FAILURE(rc))
281 return rc;
282 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
283 if (!pVM->pgm.s.cHandyPages)
284 {
285 LogRel(("PGM: no more handy pages!\n"));
286 return VERR_EM_NO_MEMORY;
287 }
288 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
289 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
290#ifdef IN_RING3
291 REMR3NotifyFF(pVM);
292#else
293 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
294#endif
295 }
296 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
297 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
298 ("%u\n", pVM->pgm.s.cHandyPages),
299 VERR_INTERNAL_ERROR);
300 }
301 else
302 {
303 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
304 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
305#ifndef IN_RING3
306 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
307 {
308 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
309 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
310 }
311#endif
312 }
313 }
314
315 return VINF_SUCCESS;
316}
317
318
319/**
320 * Replace a zero or shared page with new page that we can write to.
321 *
322 * @returns The following VBox status codes.
323 * @retval VINF_SUCCESS on success, pPage is modified.
324 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
325 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
326 *
327 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
328 *
329 * @param pVM The VM address.
330 * @param pPage The physical page tracking structure. This will
331 * be modified on success.
332 * @param GCPhys The address of the page.
333 *
334 * @remarks Must be called from within the PGM critical section. It may
335 * nip back to ring-3/0 in some cases.
336 *
337 * @remarks This function shouldn't really fail, however if it does
338 * it probably means we've screwed up the size of handy pages and/or
339 * the low-water mark. Or, that some device I/O is causing a lot of
340 * pages to be allocated while while the host is in a low-memory
341 * condition. This latter should be handled elsewhere and in a more
342 * controlled manner, it's on the @bugref{3170} todo list...
343 */
344int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
345{
346 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
347
348 /*
349 * Prereqs.
350 */
351 Assert(PGMIsLocked(pVM));
352 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
353 Assert(!PGM_PAGE_IS_MMIO(pPage));
354
355
356 /*
357 * Flush any shadow page table mappings of the page.
358 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
359 */
360 bool fFlushTLBs = false;
361 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
362 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
363
364 /*
365 * Ensure that we've got a page handy, take it and use it.
366 */
367 int rc2 = pgmPhysEnsureHandyPage(pVM);
368 if (RT_FAILURE(rc2))
369 {
370 if (fFlushTLBs)
371 PGM_INVL_ALL_VCPU_TLBS(pVM);
372 Assert(rc2 == VERR_EM_NO_MEMORY);
373 return rc2;
374 }
375 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
376 Assert(PGMIsLocked(pVM));
377 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
378 Assert(!PGM_PAGE_IS_MMIO(pPage));
379
380 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
381 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
382 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
383 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
384 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
385 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
386
387 /*
388 * There are one or two action to be taken the next time we allocate handy pages:
389 * - Tell the GMM (global memory manager) what the page is being used for.
390 * (Speeds up replacement operations - sharing and defragmenting.)
391 * - If the current backing is shared, it must be freed.
392 */
393 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
394 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
395
396 if (PGM_PAGE_IS_SHARED(pPage))
397 {
398 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
399 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
400 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
401
402 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
403 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
404 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
405 pVM->pgm.s.cSharedPages--;
406 AssertMsgFailed(("TODO: copy shared page content")); /** @todo err.. what about copying the page content? */
407 }
408 else
409 {
410 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
411 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
412 pVM->pgm.s.cZeroPages--;
413 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
414 }
415
416 /*
417 * Do the PGMPAGE modifications.
418 */
419 pVM->pgm.s.cPrivatePages++;
420 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
421 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
422 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
423
424 if ( fFlushTLBs
425 && rc != VINF_PGM_GCPHYS_ALIASED)
426 PGM_INVL_ALL_VCPU_TLBS(pVM);
427 return rc;
428}
429
430
431/**
432 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
433 *
434 * @returns VBox strict status code.
435 * @retval VINF_SUCCESS on success.
436 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
437 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
438 *
439 * @param pVM The VM address.
440 * @param pPage The physical page tracking structure.
441 * @param GCPhys The address of the page.
442 *
443 * @remarks Called from within the PGM critical section.
444 */
445int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
446{
447 switch (PGM_PAGE_GET_STATE(pPage))
448 {
449 case PGM_PAGE_STATE_WRITE_MONITORED:
450 PGM_PAGE_SET_WRITTEN_TO(pPage);
451 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
452 Assert(pVM->pgm.s.cMonitoredPages > 0);
453 pVM->pgm.s.cMonitoredPages--;
454 pVM->pgm.s.cWrittenToPages++;
455 /* fall thru */
456 default: /* to shut up GCC */
457 case PGM_PAGE_STATE_ALLOCATED:
458 return VINF_SUCCESS;
459
460 /*
461 * Zero pages can be dummy pages for MMIO or reserved memory,
462 * so we need to check the flags before joining cause with
463 * shared page replacement.
464 */
465 case PGM_PAGE_STATE_ZERO:
466 if (PGM_PAGE_IS_MMIO(pPage))
467 return VERR_PGM_PHYS_PAGE_RESERVED;
468 /* fall thru */
469 case PGM_PAGE_STATE_SHARED:
470 return pgmPhysAllocPage(pVM, pPage, GCPhys);
471 }
472}
473
474
475/**
476 * Wrapper for pgmPhysPageMakeWritable which enters the critsect.
477 *
478 * @returns VBox strict status code.
479 * @retval VINF_SUCCESS on success.
480 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
481 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
482 *
483 * @param pVM The VM address.
484 * @param pPage The physical page tracking structure.
485 * @param GCPhys The address of the page.
486 */
487int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
488{
489 int rc = pgmLock(pVM);
490 if (RT_SUCCESS(rc))
491 {
492 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
493 pgmUnlock(pVM);
494 }
495 return rc;
496}
497
498
499/**
500 * Internal usage: Map the page specified by its GMM ID.
501 *
502 * This is similar to pgmPhysPageMap
503 *
504 * @returns VBox status code.
505 *
506 * @param pVM The VM handle.
507 * @param idPage The Page ID.
508 * @param HCPhys The physical address (for RC).
509 * @param ppv Where to store the mapping address.
510 *
511 * @remarks Called from within the PGM critical section. The mapping is only
512 * valid while your inside this section.
513 */
514int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
515{
516 /*
517 * Validation.
518 */
519 Assert(PGMIsLocked(pVM));
520 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
521 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
522 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
523
524#ifdef IN_RC
525 /*
526 * Map it by HCPhys.
527 */
528 return PGMDynMapHCPage(pVM, HCPhys, ppv);
529
530#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
531 /*
532 * Map it by HCPhys.
533 */
534 return pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
535
536#else
537 /*
538 * Find/make Chunk TLB entry for the mapping chunk.
539 */
540 PPGMCHUNKR3MAP pMap;
541 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
542 if (pTlbe->idChunk == idChunk)
543 {
544 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
545 pMap = pTlbe->pChunk;
546 }
547 else
548 {
549 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
550
551 /*
552 * Find the chunk, map it if necessary.
553 */
554 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
555 if (!pMap)
556 {
557# ifdef IN_RING0
558 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
559 AssertRCReturn(rc, rc);
560 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
561 Assert(pMap);
562# else
563 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
564 if (RT_FAILURE(rc))
565 return rc;
566# endif
567 }
568
569 /*
570 * Enter it into the Chunk TLB.
571 */
572 pTlbe->idChunk = idChunk;
573 pTlbe->pChunk = pMap;
574 pMap->iAge = 0;
575 }
576
577 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
578 return VINF_SUCCESS;
579#endif
580}
581
582
583/**
584 * Maps a page into the current virtual address space so it can be accessed.
585 *
586 * @returns VBox status code.
587 * @retval VINF_SUCCESS on success.
588 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
589 *
590 * @param pVM The VM address.
591 * @param pPage The physical page tracking structure.
592 * @param GCPhys The address of the page.
593 * @param ppMap Where to store the address of the mapping tracking structure.
594 * @param ppv Where to store the mapping address of the page. The page
595 * offset is masked off!
596 *
597 * @remarks Called from within the PGM critical section.
598 */
599static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
600{
601 Assert(PGMIsLocked(pVM));
602
603#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
604 /*
605 * Just some sketchy GC/R0-darwin code.
606 */
607 *ppMap = NULL;
608 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
609 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
610# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
611 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
612# else
613 PGMDynMapHCPage(pVM, HCPhys, ppv);
614# endif
615 return VINF_SUCCESS;
616
617#else /* IN_RING3 || IN_RING0 */
618
619
620 /*
621 * Special case: ZERO and MMIO2 pages.
622 */
623 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
624 if (idChunk == NIL_GMM_CHUNKID)
625 {
626 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
627 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
628 {
629 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
630 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
631 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
632 *ppv = (void *)((uintptr_t)pRam->pvR3 + (GCPhys - pRam->GCPhys));
633 }
634 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
635 {
636 /** @todo deal with aliased MMIO2 pages somehow...
637 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
638 * them, that would also avoid this mess. It would actually be kind of
639 * elegant... */
640 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
641 }
642 else
643 {
644 /** @todo handle MMIO2 */
645 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
646 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
647 ("pPage=%R[pgmpage]\n", pPage),
648 VERR_INTERNAL_ERROR_2);
649 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
650 }
651 *ppMap = NULL;
652 return VINF_SUCCESS;
653 }
654
655 /*
656 * Find/make Chunk TLB entry for the mapping chunk.
657 */
658 PPGMCHUNKR3MAP pMap;
659 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
660 if (pTlbe->idChunk == idChunk)
661 {
662 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
663 pMap = pTlbe->pChunk;
664 }
665 else
666 {
667 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
668
669 /*
670 * Find the chunk, map it if necessary.
671 */
672 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
673 if (!pMap)
674 {
675#ifdef IN_RING0
676 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
677 AssertRCReturn(rc, rc);
678 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
679 Assert(pMap);
680#else
681 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
682 if (RT_FAILURE(rc))
683 return rc;
684#endif
685 }
686
687 /*
688 * Enter it into the Chunk TLB.
689 */
690 pTlbe->idChunk = idChunk;
691 pTlbe->pChunk = pMap;
692 pMap->iAge = 0;
693 }
694
695 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
696 *ppMap = pMap;
697 return VINF_SUCCESS;
698#endif /* IN_RING3 */
699}
700
701
702/**
703 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
704 *
705 * This is typically used is paths where we cannot use the TLB methods (like ROM
706 * pages) or where there is no point in using them since we won't get many hits.
707 *
708 * @returns VBox strict status code.
709 * @retval VINF_SUCCESS on success.
710 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
711 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
712 *
713 * @param pVM The VM address.
714 * @param pPage The physical page tracking structure.
715 * @param GCPhys The address of the page.
716 * @param ppv Where to store the mapping address of the page. The page
717 * offset is masked off!
718 *
719 * @remarks Called from within the PGM critical section. The mapping is only
720 * valid while your inside this section.
721 */
722int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
723{
724 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
725 if (RT_SUCCESS(rc))
726 {
727 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
728 PPGMPAGEMAP pMapIgnore;
729 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
730 if (RT_FAILURE(rc2)) /* preserve rc */
731 rc = rc2;
732 }
733 return rc;
734}
735
736
737/**
738 * Maps a page into the current virtual address space so it can be accessed for
739 * both writing and reading.
740 *
741 * This is typically used is paths where we cannot use the TLB methods (like ROM
742 * pages) or where there is no point in using them since we won't get many hits.
743 *
744 * @returns VBox status code.
745 * @retval VINF_SUCCESS on success.
746 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
747 *
748 * @param pVM The VM address.
749 * @param pPage The physical page tracking structure. Must be in the
750 * allocated state.
751 * @param GCPhys The address of the page.
752 * @param ppv Where to store the mapping address of the page. The page
753 * offset is masked off!
754 *
755 * @remarks Called from within the PGM critical section. The mapping is only
756 * valid while your inside this section.
757 */
758int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
759{
760 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
761 PPGMPAGEMAP pMapIgnore;
762 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
763}
764
765
766/**
767 * Maps a page into the current virtual address space so it can be accessed for
768 * reading.
769 *
770 * This is typically used is paths where we cannot use the TLB methods (like ROM
771 * pages) or where there is no point in using them since we won't get many hits.
772 *
773 * @returns VBox status code.
774 * @retval VINF_SUCCESS on success.
775 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
776 *
777 * @param pVM The VM address.
778 * @param pPage The physical page tracking structure.
779 * @param GCPhys The address of the page.
780 * @param ppv Where to store the mapping address of the page. The page
781 * offset is masked off!
782 *
783 * @remarks Called from within the PGM critical section. The mapping is only
784 * valid while your inside this section.
785 */
786int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
787{
788 PPGMPAGEMAP pMapIgnore;
789 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
790}
791
792
793#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
794/**
795 * Load a guest page into the ring-3 physical TLB.
796 *
797 * @returns VBox status code.
798 * @retval VINF_SUCCESS on success
799 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
800 * @param pPGM The PGM instance pointer.
801 * @param GCPhys The guest physical address in question.
802 */
803int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
804{
805 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
806
807 /*
808 * Find the ram range.
809 * 99.8% of requests are expected to be in the first range.
810 */
811 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
812 RTGCPHYS off = GCPhys - pRam->GCPhys;
813 if (RT_UNLIKELY(off >= pRam->cb))
814 {
815 do
816 {
817 pRam = pRam->CTX_SUFF(pNext);
818 if (!pRam)
819 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
820 off = GCPhys - pRam->GCPhys;
821 } while (off >= pRam->cb);
822 }
823
824 /*
825 * Map the page.
826 * Make a special case for the zero page as it is kind of special.
827 */
828 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
829 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
830 if (!PGM_PAGE_IS_ZERO(pPage))
831 {
832 void *pv;
833 PPGMPAGEMAP pMap;
834 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
835 if (RT_FAILURE(rc))
836 return rc;
837 pTlbe->pMap = pMap;
838 pTlbe->pv = pv;
839 }
840 else
841 {
842 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
843 pTlbe->pMap = NULL;
844 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
845 }
846 pTlbe->GCPhys = (GCPhys & X86_PTE_PAE_PG_MASK);
847 pTlbe->pPage = pPage;
848 return VINF_SUCCESS;
849}
850
851
852/**
853 * Load a guest page into the ring-3 physical TLB.
854 *
855 * @returns VBox status code.
856 * @retval VINF_SUCCESS on success
857 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
858 *
859 * @param pPGM The PGM instance pointer.
860 * @param pPage Pointer to the PGMPAGE structure corresponding to
861 * GCPhys.
862 * @param GCPhys The guest physical address in question.
863 */
864int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
865{
866 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
867
868 /*
869 * Map the page.
870 * Make a special case for the zero page as it is kind of special.
871 */
872 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
873 if (!PGM_PAGE_IS_ZERO(pPage))
874 {
875 void *pv;
876 PPGMPAGEMAP pMap;
877 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
878 if (RT_FAILURE(rc))
879 return rc;
880 pTlbe->pMap = pMap;
881 pTlbe->pv = pv;
882 }
883 else
884 {
885 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
886 pTlbe->pMap = NULL;
887 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
888 }
889 pTlbe->GCPhys = (GCPhys & X86_PTE_PAE_PG_MASK);
890 pTlbe->pPage = pPage;
891 return VINF_SUCCESS;
892}
893#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
894
895
896/**
897 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
898 * own the PGM lock and therefore not need to lock the mapped page.
899 *
900 * @returns VBox status code.
901 * @retval VINF_SUCCESS on success.
902 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
903 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
904 *
905 * @param pVM The VM handle.
906 * @param GCPhys The guest physical address of the page that should be mapped.
907 * @param pPage Pointer to the PGMPAGE structure for the page.
908 * @param ppv Where to store the address corresponding to GCPhys.
909 *
910 * @internal
911 */
912int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
913{
914 int rc;
915 AssertReturn(pPage, VERR_INTERNAL_ERROR);
916 Assert(PGMIsLocked(pVM));
917
918 /*
919 * Make sure the page is writable.
920 */
921 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
922 {
923 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
924 if (RT_FAILURE(rc))
925 return rc;
926 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
927 }
928 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
929
930 /*
931 * Get the mapping address.
932 */
933#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
934 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
935#else
936 PPGMPAGEMAPTLBE pTlbe;
937 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
938 if (RT_FAILURE(rc))
939 return rc;
940 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
941#endif
942 return VINF_SUCCESS;
943}
944
945
946/**
947 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
948 * own the PGM lock and therefore not need to lock the mapped page.
949 *
950 * @returns VBox status code.
951 * @retval VINF_SUCCESS on success.
952 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
953 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
954 *
955 * @param pVM The VM handle.
956 * @param GCPhys The guest physical address of the page that should be mapped.
957 * @param pPage Pointer to the PGMPAGE structure for the page.
958 * @param ppv Where to store the address corresponding to GCPhys.
959 *
960 * @internal
961 */
962int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
963{
964 AssertReturn(pPage, VERR_INTERNAL_ERROR);
965 Assert(PGMIsLocked(pVM));
966 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
967
968 /*
969 * Get the mapping address.
970 */
971#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
972 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
973#else
974 PPGMPAGEMAPTLBE pTlbe;
975 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
976 if (RT_FAILURE(rc))
977 return rc;
978 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
979#endif
980 return VINF_SUCCESS;
981}
982
983
984/**
985 * Requests the mapping of a guest page into the current context.
986 *
987 * This API should only be used for very short term, as it will consume
988 * scarse resources (R0 and GC) in the mapping cache. When you're done
989 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
990 *
991 * This API will assume your intention is to write to the page, and will
992 * therefore replace shared and zero pages. If you do not intend to modify
993 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
994 *
995 * @returns VBox status code.
996 * @retval VINF_SUCCESS on success.
997 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
998 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
999 *
1000 * @param pVM The VM handle.
1001 * @param GCPhys The guest physical address of the page that should be mapped.
1002 * @param ppv Where to store the address corresponding to GCPhys.
1003 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1004 *
1005 * @remarks The caller is responsible for dealing with access handlers.
1006 * @todo Add an informational return code for pages with access handlers?
1007 *
1008 * @remark Avoid calling this API from within critical sections (other than the
1009 * PGM one) because of the deadlock risk. External threads may need to
1010 * delegate jobs to the EMTs.
1011 * @thread Any thread.
1012 */
1013VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1014{
1015#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1016
1017 /*
1018 * Find the page and make sure it's writable.
1019 */
1020 PPGMPAGE pPage;
1021 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1022 if (RT_SUCCESS(rc))
1023 {
1024 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1025 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1026 if (RT_SUCCESS(rc))
1027 {
1028 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1029# if 0
1030 pLock->pvMap = 0;
1031 pLock->pvPage = pPage;
1032# else
1033 pLock->u32Dummy = UINT32_MAX;
1034# endif
1035 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1036 rc = VINF_SUCCESS;
1037 }
1038 }
1039
1040#else /* IN_RING3 || IN_RING0 */
1041 int rc = pgmLock(pVM);
1042 AssertRCReturn(rc, rc);
1043
1044 /*
1045 * Query the Physical TLB entry for the page (may fail).
1046 */
1047 PPGMPAGEMAPTLBE pTlbe;
1048 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1049 if (RT_SUCCESS(rc))
1050 {
1051 /*
1052 * If the page is shared, the zero page, or being write monitored
1053 * it must be converted to an page that's writable if possible.
1054 */
1055 PPGMPAGE pPage = pTlbe->pPage;
1056 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1057 {
1058 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1059 if (RT_SUCCESS(rc))
1060 {
1061 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1062 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1063 }
1064 }
1065 if (RT_SUCCESS(rc))
1066 {
1067 /*
1068 * Now, just perform the locking and calculate the return address.
1069 */
1070 PPGMPAGEMAP pMap = pTlbe->pMap;
1071 if (pMap)
1072 pMap->cRefs++;
1073
1074 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1075 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1076 {
1077 if (cLocks == 0)
1078 pVM->pgm.s.cWriteLockedPages++;
1079 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1080 }
1081 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
1082 {
1083 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1084 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
1085 if (pMap)
1086 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1087 }
1088
1089 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1090 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1091 pLock->pvMap = pMap;
1092 }
1093 }
1094
1095 pgmUnlock(pVM);
1096#endif /* IN_RING3 || IN_RING0 */
1097 return rc;
1098}
1099
1100
1101/**
1102 * Requests the mapping of a guest page into the current context.
1103 *
1104 * This API should only be used for very short term, as it will consume
1105 * scarse resources (R0 and GC) in the mapping cache. When you're done
1106 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1107 *
1108 * @returns VBox status code.
1109 * @retval VINF_SUCCESS on success.
1110 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1111 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1112 *
1113 * @param pVM The VM handle.
1114 * @param GCPhys The guest physical address of the page that should be mapped.
1115 * @param ppv Where to store the address corresponding to GCPhys.
1116 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1117 *
1118 * @remarks The caller is responsible for dealing with access handlers.
1119 * @todo Add an informational return code for pages with access handlers?
1120 *
1121 * @remark Avoid calling this API from within critical sections (other than
1122 * the PGM one) because of the deadlock risk.
1123 * @thread Any thread.
1124 */
1125VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1126{
1127#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1128
1129 /*
1130 * Find the page and make sure it's readable.
1131 */
1132 PPGMPAGE pPage;
1133 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1134 if (RT_SUCCESS(rc))
1135 {
1136 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1137 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1138 else
1139 {
1140 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1141# if 0
1142 pLock->pvMap = 0;
1143 pLock->pvPage = pPage;
1144# else
1145 pLock->u32Dummy = UINT32_MAX;
1146# endif
1147 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1148 rc = VINF_SUCCESS;
1149 }
1150 }
1151
1152#else /* IN_RING3 || IN_RING0 */
1153 int rc = pgmLock(pVM);
1154 AssertRCReturn(rc, rc);
1155
1156 /*
1157 * Query the Physical TLB entry for the page (may fail).
1158 */
1159 PPGMPAGEMAPTLBE pTlbe;
1160 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1161 if (RT_SUCCESS(rc))
1162 {
1163 /* MMIO pages doesn't have any readable backing. */
1164 PPGMPAGE pPage = pTlbe->pPage;
1165 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1166 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1167 else
1168 {
1169 /*
1170 * Now, just perform the locking and calculate the return address.
1171 */
1172 PPGMPAGEMAP pMap = pTlbe->pMap;
1173 if (pMap)
1174 pMap->cRefs++;
1175
1176 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1177 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1178 {
1179 if (cLocks == 0)
1180 pVM->pgm.s.cReadLockedPages++;
1181 PGM_PAGE_INC_READ_LOCKS(pPage);
1182 }
1183 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
1184 {
1185 PGM_PAGE_INC_READ_LOCKS(pPage);
1186 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
1187 if (pMap)
1188 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1189 }
1190
1191 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1192 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1193 pLock->pvMap = pMap;
1194 }
1195 }
1196
1197 pgmUnlock(pVM);
1198#endif /* IN_RING3 || IN_RING0 */
1199 return rc;
1200}
1201
1202
1203/**
1204 * Requests the mapping of a guest page given by virtual address into the current context.
1205 *
1206 * This API should only be used for very short term, as it will consume
1207 * scarse resources (R0 and GC) in the mapping cache. When you're done
1208 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1209 *
1210 * This API will assume your intention is to write to the page, and will
1211 * therefore replace shared and zero pages. If you do not intend to modify
1212 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1213 *
1214 * @returns VBox status code.
1215 * @retval VINF_SUCCESS on success.
1216 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1217 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1218 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1219 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1220 *
1221 * @param pVCpu VMCPU handle.
1222 * @param GCPhys The guest physical address of the page that should be mapped.
1223 * @param ppv Where to store the address corresponding to GCPhys.
1224 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1225 *
1226 * @remark Avoid calling this API from within critical sections (other than
1227 * the PGM one) because of the deadlock risk.
1228 * @thread EMT
1229 */
1230VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1231{
1232 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1233 RTGCPHYS GCPhys;
1234 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1235 if (RT_SUCCESS(rc))
1236 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1237 return rc;
1238}
1239
1240
1241/**
1242 * Requests the mapping of a guest page given by virtual address into the current context.
1243 *
1244 * This API should only be used for very short term, as it will consume
1245 * scarse resources (R0 and GC) in the mapping cache. When you're done
1246 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1247 *
1248 * @returns VBox status code.
1249 * @retval VINF_SUCCESS on success.
1250 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1251 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1252 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1253 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1254 *
1255 * @param pVCpu VMCPU handle.
1256 * @param GCPhys The guest physical address of the page that should be mapped.
1257 * @param ppv Where to store the address corresponding to GCPhys.
1258 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1259 *
1260 * @remark Avoid calling this API from within critical sections (other than
1261 * the PGM one) because of the deadlock risk.
1262 * @thread EMT
1263 */
1264VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1265{
1266 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1267 RTGCPHYS GCPhys;
1268 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1269 if (RT_SUCCESS(rc))
1270 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1271 return rc;
1272}
1273
1274
1275/**
1276 * Release the mapping of a guest page.
1277 *
1278 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1279 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1280 *
1281 * @param pVM The VM handle.
1282 * @param pLock The lock structure initialized by the mapping function.
1283 */
1284VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1285{
1286#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1287 /* currently nothing to do here. */
1288 Assert(pLock->u32Dummy == UINT32_MAX);
1289 pLock->u32Dummy = 0;
1290
1291#else /* IN_RING3 */
1292 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1293 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1294 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1295
1296 pLock->uPageAndType = 0;
1297 pLock->pvMap = NULL;
1298
1299 pgmLock(pVM);
1300 if (fWriteLock)
1301 {
1302 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1303 Assert(cLocks > 0);
1304 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1305 {
1306 if (cLocks == 1)
1307 {
1308 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1309 pVM->pgm.s.cWriteLockedPages--;
1310 }
1311 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1312 }
1313
1314 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1315 {
1316 PGM_PAGE_SET_WRITTEN_TO(pPage);
1317 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1318 Assert(pVM->pgm.s.cMonitoredPages > 0);
1319 pVM->pgm.s.cMonitoredPages--;
1320 pVM->pgm.s.cWrittenToPages++;
1321 }
1322 }
1323 else
1324 {
1325 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1326 Assert(cLocks > 0);
1327 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1328 {
1329 if (cLocks == 1)
1330 {
1331 Assert(pVM->pgm.s.cReadLockedPages > 0);
1332 pVM->pgm.s.cReadLockedPages--;
1333 }
1334 PGM_PAGE_DEC_READ_LOCKS(pPage);
1335 }
1336 }
1337
1338 if (pMap)
1339 {
1340 Assert(pMap->cRefs >= 1);
1341 pMap->cRefs--;
1342 pMap->iAge = 0;
1343 }
1344 pgmUnlock(pVM);
1345#endif /* IN_RING3 */
1346}
1347
1348
1349/**
1350 * Converts a GC physical address to a HC ring-3 pointer.
1351 *
1352 * @returns VINF_SUCCESS on success.
1353 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1354 * page but has no physical backing.
1355 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1356 * GC physical address.
1357 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1358 * a dynamic ram chunk boundary
1359 *
1360 * @param pVM The VM handle.
1361 * @param GCPhys The GC physical address to convert.
1362 * @param cbRange Physical range
1363 * @param pR3Ptr Where to store the R3 pointer on success.
1364 *
1365 * @deprecated Avoid when possible!
1366 */
1367VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1368{
1369/** @todo this is kind of hacky and needs some more work. */
1370#ifndef DEBUG_sandervl
1371 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1372#endif
1373
1374 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1375#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1376 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1377#else
1378 pgmLock(pVM);
1379
1380 PPGMRAMRANGE pRam;
1381 PPGMPAGE pPage;
1382 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1383 if (RT_SUCCESS(rc))
1384 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1385
1386 pgmUnlock(pVM);
1387 Assert(rc <= VINF_SUCCESS);
1388 return rc;
1389#endif
1390}
1391
1392
1393#ifdef VBOX_STRICT
1394/**
1395 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1396 *
1397 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1398 * @param pVM The VM handle.
1399 * @param GCPhys The GC Physical addresss.
1400 * @param cbRange Physical range.
1401 *
1402 * @deprecated Avoid when possible.
1403 */
1404VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1405{
1406 RTR3PTR R3Ptr;
1407 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1408 if (RT_SUCCESS(rc))
1409 return R3Ptr;
1410 return NIL_RTR3PTR;
1411}
1412#endif /* VBOX_STRICT */
1413
1414
1415/**
1416 * Converts a guest pointer to a GC physical address.
1417 *
1418 * This uses the current CR3/CR0/CR4 of the guest.
1419 *
1420 * @returns VBox status code.
1421 * @param pVCpu The VMCPU Handle
1422 * @param GCPtr The guest pointer to convert.
1423 * @param pGCPhys Where to store the GC physical address.
1424 */
1425VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1426{
1427 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1428 if (pGCPhys && RT_SUCCESS(rc))
1429 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1430 return rc;
1431}
1432
1433
1434/**
1435 * Converts a guest pointer to a HC physical address.
1436 *
1437 * This uses the current CR3/CR0/CR4 of the guest.
1438 *
1439 * @returns VBox status code.
1440 * @param pVCpu The VMCPU Handle
1441 * @param GCPtr The guest pointer to convert.
1442 * @param pHCPhys Where to store the HC physical address.
1443 */
1444VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1445{
1446 PVM pVM = pVCpu->CTX_SUFF(pVM);
1447 RTGCPHYS GCPhys;
1448 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1449 if (RT_SUCCESS(rc))
1450 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1451 return rc;
1452}
1453
1454
1455/**
1456 * Converts a guest pointer to a R3 pointer.
1457 *
1458 * This uses the current CR3/CR0/CR4 of the guest.
1459 *
1460 * @returns VBox status code.
1461 * @param pVCpu The VMCPU Handle
1462 * @param GCPtr The guest pointer to convert.
1463 * @param pR3Ptr Where to store the R3 virtual address.
1464 *
1465 * @deprecated Don't use this.
1466 */
1467VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVMCPU pVCpu, RTGCPTR GCPtr, PRTR3PTR pR3Ptr)
1468{
1469 PVM pVM = pVCpu->CTX_SUFF(pVM);
1470 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1471 RTGCPHYS GCPhys;
1472 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1473 if (RT_SUCCESS(rc))
1474 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pR3Ptr);
1475 return rc;
1476}
1477
1478
1479
1480#undef LOG_GROUP
1481#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1482
1483
1484#ifdef IN_RING3
1485/**
1486 * Cache PGMPhys memory access
1487 *
1488 * @param pVM VM Handle.
1489 * @param pCache Cache structure pointer
1490 * @param GCPhys GC physical address
1491 * @param pbHC HC pointer corresponding to physical page
1492 *
1493 * @thread EMT.
1494 */
1495static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1496{
1497 uint32_t iCacheIndex;
1498
1499 Assert(VM_IS_EMT(pVM));
1500
1501 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1502 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1503
1504 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1505
1506 ASMBitSet(&pCache->aEntries, iCacheIndex);
1507
1508 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1509 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1510}
1511#endif /* IN_RING3 */
1512
1513
1514/**
1515 * Deals with reading from a page with one or more ALL access handlers.
1516 *
1517 * @returns VBox status code. Can be ignored in ring-3.
1518 * @retval VINF_SUCCESS.
1519 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1520 *
1521 * @param pVM The VM handle.
1522 * @param pPage The page descriptor.
1523 * @param GCPhys The physical address to start reading at.
1524 * @param pvBuf Where to put the bits we read.
1525 * @param cb How much to read - less or equal to a page.
1526 */
1527static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1528{
1529 /*
1530 * The most frequent access here is MMIO and shadowed ROM.
1531 * The current code ASSUMES all these access handlers covers full pages!
1532 */
1533
1534 /*
1535 * Whatever we do we need the source page, map it first.
1536 */
1537 const void *pvSrc = NULL;
1538 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1539 if (RT_FAILURE(rc))
1540 {
1541 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1542 GCPhys, pPage, rc));
1543 memset(pvBuf, 0xff, cb);
1544 return VINF_SUCCESS;
1545 }
1546 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1547
1548 /*
1549 * Deal with any physical handlers.
1550 */
1551 PPGMPHYSHANDLER pPhys = NULL;
1552 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1553 {
1554#ifdef IN_RING3
1555 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1556 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1557 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1558 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1559 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1560 Assert(pPhys->CTX_SUFF(pfnHandler));
1561
1562 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
1563 void *pvUser = pPhys->CTX_SUFF(pvUser);
1564
1565 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1566 STAM_PROFILE_START(&pPhys->Stat, h);
1567 Assert(PGMIsLockOwner(pVM));
1568 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1569 pgmUnlock(pVM);
1570 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
1571 pgmLock(pVM);
1572# ifdef VBOX_WITH_STATISTICS
1573 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1574 if (pPhys)
1575 STAM_PROFILE_STOP(&pPhys->Stat, h);
1576# else
1577 pPhys = NULL; /* might not be valid anymore. */
1578# endif
1579 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1580#else
1581 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1582 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1583 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1584#endif
1585 }
1586
1587 /*
1588 * Deal with any virtual handlers.
1589 */
1590 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1591 {
1592 unsigned iPage;
1593 PPGMVIRTHANDLER pVirt;
1594
1595 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1596 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1597 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1598 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1599 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1600
1601#ifdef IN_RING3
1602 if (pVirt->pfnHandlerR3)
1603 {
1604 if (!pPhys)
1605 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1606 else
1607 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1608 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1609 + (iPage << PAGE_SHIFT)
1610 + (GCPhys & PAGE_OFFSET_MASK);
1611
1612 STAM_PROFILE_START(&pVirt->Stat, h);
1613 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1614 STAM_PROFILE_STOP(&pVirt->Stat, h);
1615 if (rc2 == VINF_SUCCESS)
1616 rc = VINF_SUCCESS;
1617 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1618 }
1619 else
1620 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1621#else
1622 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1623 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1624 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1625#endif
1626 }
1627
1628 /*
1629 * Take the default action.
1630 */
1631 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1632 memcpy(pvBuf, pvSrc, cb);
1633 return rc;
1634}
1635
1636
1637/**
1638 * Read physical memory.
1639 *
1640 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1641 * want to ignore those.
1642 *
1643 * @returns VBox status code. Can be ignored in ring-3.
1644 * @retval VINF_SUCCESS.
1645 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1646 *
1647 * @param pVM VM Handle.
1648 * @param GCPhys Physical address start reading from.
1649 * @param pvBuf Where to put the read bits.
1650 * @param cbRead How many bytes to read.
1651 */
1652VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1653{
1654 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1655 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1656
1657 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysRead));
1658 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
1659
1660 pgmLock(pVM);
1661
1662 /*
1663 * Copy loop on ram ranges.
1664 */
1665 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1666 for (;;)
1667 {
1668 /* Find range. */
1669 while (pRam && GCPhys > pRam->GCPhysLast)
1670 pRam = pRam->CTX_SUFF(pNext);
1671 /* Inside range or not? */
1672 if (pRam && GCPhys >= pRam->GCPhys)
1673 {
1674 /*
1675 * Must work our way thru this page by page.
1676 */
1677 RTGCPHYS off = GCPhys - pRam->GCPhys;
1678 while (off < pRam->cb)
1679 {
1680 unsigned iPage = off >> PAGE_SHIFT;
1681 PPGMPAGE pPage = &pRam->aPages[iPage];
1682 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1683 if (cb > cbRead)
1684 cb = cbRead;
1685
1686 /*
1687 * Any ALL access handlers?
1688 */
1689 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1690 {
1691 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1692 if (RT_FAILURE(rc))
1693 {
1694 pgmUnlock(pVM);
1695 return rc;
1696 }
1697 }
1698 else
1699 {
1700 /*
1701 * Get the pointer to the page.
1702 */
1703 const void *pvSrc;
1704 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1705 if (RT_SUCCESS(rc))
1706 memcpy(pvBuf, pvSrc, cb);
1707 else
1708 {
1709 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1710 pRam->GCPhys + off, pPage, rc));
1711 memset(pvBuf, 0xff, cb);
1712 }
1713 }
1714
1715 /* next page */
1716 if (cb >= cbRead)
1717 {
1718 pgmUnlock(pVM);
1719 return VINF_SUCCESS;
1720 }
1721 cbRead -= cb;
1722 off += cb;
1723 pvBuf = (char *)pvBuf + cb;
1724 } /* walk pages in ram range. */
1725
1726 GCPhys = pRam->GCPhysLast + 1;
1727 }
1728 else
1729 {
1730 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1731
1732 /*
1733 * Unassigned address space.
1734 */
1735 if (!pRam)
1736 break;
1737 size_t cb = pRam->GCPhys - GCPhys;
1738 if (cb >= cbRead)
1739 {
1740 memset(pvBuf, 0xff, cbRead);
1741 break;
1742 }
1743 memset(pvBuf, 0xff, cb);
1744
1745 cbRead -= cb;
1746 pvBuf = (char *)pvBuf + cb;
1747 GCPhys += cb;
1748 }
1749 } /* Ram range walk */
1750
1751 pgmUnlock(pVM);
1752 return VINF_SUCCESS;
1753}
1754
1755
1756/**
1757 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1758 *
1759 * @returns VBox status code. Can be ignored in ring-3.
1760 * @retval VINF_SUCCESS.
1761 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1762 *
1763 * @param pVM The VM handle.
1764 * @param pPage The page descriptor.
1765 * @param GCPhys The physical address to start writing at.
1766 * @param pvBuf What to write.
1767 * @param cbWrite How much to write - less or equal to a page.
1768 */
1769static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
1770{
1771 void *pvDst = NULL;
1772 int rc;
1773
1774 /*
1775 * Give priority to physical handlers (like #PF does).
1776 *
1777 * Hope for a lonely physical handler first that covers the whole
1778 * write area. This should be a pretty frequent case with MMIO and
1779 * the heavy usage of full page handlers in the page pool.
1780 */
1781 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
1782 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
1783 {
1784 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1785 if (pCur)
1786 {
1787 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1788 Assert(pCur->CTX_SUFF(pfnHandler));
1789
1790 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
1791 if (cbRange > cbWrite)
1792 cbRange = cbWrite;
1793
1794#ifndef IN_RING3
1795 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1796 NOREF(cbRange);
1797 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1798 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1799
1800#else /* IN_RING3 */
1801 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1802 if (!PGM_PAGE_IS_MMIO(pPage))
1803 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1804 else
1805 rc = VINF_SUCCESS;
1806 if (RT_SUCCESS(rc))
1807 {
1808 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
1809 void *pvUser = pCur->CTX_SUFF(pvUser);
1810
1811 STAM_PROFILE_START(&pCur->Stat, h);
1812 Assert(PGMIsLockOwner(pVM));
1813 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1814 pgmUnlock(pVM);
1815 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
1816 pgmLock(pVM);
1817# ifdef VBOX_WITH_STATISTICS
1818 pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1819 if (pCur)
1820 STAM_PROFILE_STOP(&pCur->Stat, h);
1821# else
1822 pCur = NULL; /* might not be valid anymore. */
1823# endif
1824 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1825 memcpy(pvDst, pvBuf, cbRange);
1826 else
1827 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
1828 }
1829 else
1830 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1831 GCPhys, pPage, rc), rc);
1832 if (RT_LIKELY(cbRange == cbWrite))
1833 return VINF_SUCCESS;
1834
1835 /* more fun to be had below */
1836 cbWrite -= cbRange;
1837 GCPhys += cbRange;
1838 pvBuf = (uint8_t *)pvBuf + cbRange;
1839 pvDst = (uint8_t *)pvDst + cbRange;
1840#endif /* IN_RING3 */
1841 }
1842 /* else: the handler is somewhere else in the page, deal with it below. */
1843 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
1844 }
1845 /*
1846 * A virtual handler without any interfering physical handlers.
1847 * Hopefully it'll conver the whole write.
1848 */
1849 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
1850 {
1851 unsigned iPage;
1852 PPGMVIRTHANDLER pCur;
1853 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
1854 if (RT_SUCCESS(rc))
1855 {
1856 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
1857 if (cbRange > cbWrite)
1858 cbRange = cbWrite;
1859
1860#ifndef IN_RING3
1861 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1862 NOREF(cbRange);
1863 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1864 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1865
1866#else /* IN_RING3 */
1867
1868 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1869 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1870 if (RT_SUCCESS(rc))
1871 {
1872 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1873 if (pCur->pfnHandlerR3)
1874 {
1875 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
1876 + (iPage << PAGE_SHIFT)
1877 + (GCPhys & PAGE_OFFSET_MASK);
1878
1879 STAM_PROFILE_START(&pCur->Stat, h);
1880 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
1881 STAM_PROFILE_STOP(&pCur->Stat, h);
1882 }
1883 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1884 memcpy(pvDst, pvBuf, cbRange);
1885 else
1886 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
1887 }
1888 else
1889 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1890 GCPhys, pPage, rc), rc);
1891 if (RT_LIKELY(cbRange == cbWrite))
1892 return VINF_SUCCESS;
1893
1894 /* more fun to be had below */
1895 cbWrite -= cbRange;
1896 GCPhys += cbRange;
1897 pvBuf = (uint8_t *)pvBuf + cbRange;
1898 pvDst = (uint8_t *)pvDst + cbRange;
1899#endif
1900 }
1901 /* else: the handler is somewhere else in the page, deal with it below. */
1902 }
1903
1904 /*
1905 * Deal with all the odd ends.
1906 */
1907
1908 /* We need a writable destination page. */
1909 if (!pvDst)
1910 {
1911 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1912 AssertLogRelMsgReturn(RT_SUCCESS(rc),
1913 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1914 GCPhys, pPage, rc), rc);
1915 }
1916
1917 /* The loop state (big + ugly). */
1918 unsigned iVirtPage = 0;
1919 PPGMVIRTHANDLER pVirt = NULL;
1920 uint32_t offVirt = PAGE_SIZE;
1921 uint32_t offVirtLast = PAGE_SIZE;
1922 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
1923
1924 PPGMPHYSHANDLER pPhys = NULL;
1925 uint32_t offPhys = PAGE_SIZE;
1926 uint32_t offPhysLast = PAGE_SIZE;
1927 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
1928
1929 /* The loop. */
1930 for (;;)
1931 {
1932 /*
1933 * Find the closest handler at or above GCPhys.
1934 */
1935 if (fMoreVirt && !pVirt)
1936 {
1937 int rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
1938 if (RT_SUCCESS(rc))
1939 {
1940 offVirt = 0;
1941 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1942 }
1943 else
1944 {
1945 PPGMPHYS2VIRTHANDLER pVirtPhys;
1946 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
1947 GCPhys, true /* fAbove */);
1948 if ( pVirtPhys
1949 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
1950 {
1951 /* ASSUME that pVirtPhys only covers one page. */
1952 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
1953 Assert(pVirtPhys->Core.Key > GCPhys);
1954
1955 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
1956 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
1957 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1958 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1959 }
1960 else
1961 {
1962 pVirt = NULL;
1963 fMoreVirt = false;
1964 offVirt = offVirtLast = PAGE_SIZE;
1965 }
1966 }
1967 }
1968
1969 if (fMorePhys && !pPhys)
1970 {
1971 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1972 if (pPhys)
1973 {
1974 offPhys = 0;
1975 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
1976 }
1977 else
1978 {
1979 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
1980 GCPhys, true /* fAbove */);
1981 if ( pPhys
1982 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
1983 {
1984 offPhys = pPhys->Core.Key - GCPhys;
1985 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
1986 }
1987 else
1988 {
1989 pPhys = NULL;
1990 fMorePhys = false;
1991 offPhys = offPhysLast = PAGE_SIZE;
1992 }
1993 }
1994 }
1995
1996 /*
1997 * Handle access to space without handlers (that's easy).
1998 */
1999 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2000 uint32_t cbRange = (uint32_t)cbWrite;
2001 if (offPhys && offVirt)
2002 {
2003 if (cbRange > offPhys)
2004 cbRange = offPhys;
2005 if (cbRange > offVirt)
2006 cbRange = offVirt;
2007 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2008 }
2009 /*
2010 * Physical handler.
2011 */
2012 else if (!offPhys && offVirt)
2013 {
2014 if (cbRange > offPhysLast + 1)
2015 cbRange = offPhysLast + 1;
2016 if (cbRange > offVirt)
2017 cbRange = offVirt;
2018#ifdef IN_RING3
2019 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2020 void *pvUser = pPhys->CTX_SUFF(pvUser);
2021
2022 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2023 STAM_PROFILE_START(&pPhys->Stat, h);
2024 Assert(PGMIsLockOwner(pVM));
2025 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2026 pgmUnlock(pVM);
2027 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2028 pgmLock(pVM);
2029# ifdef VBOX_WITH_STATISTICS
2030 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2031 if (pPhys)
2032 STAM_PROFILE_STOP(&pPhys->Stat, h);
2033# else
2034 pPhys = NULL; /* might not be valid anymore. */
2035# endif
2036 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2037#else
2038 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2039 NOREF(cbRange);
2040 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2041 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2042#endif
2043 }
2044 /*
2045 * Virtual handler.
2046 */
2047 else if (offPhys && !offVirt)
2048 {
2049 if (cbRange > offVirtLast + 1)
2050 cbRange = offVirtLast + 1;
2051 if (cbRange > offPhys)
2052 cbRange = offPhys;
2053#ifdef IN_RING3
2054 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2055 if (pVirt->pfnHandlerR3)
2056 {
2057 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2058 + (iVirtPage << PAGE_SHIFT)
2059 + (GCPhys & PAGE_OFFSET_MASK);
2060 STAM_PROFILE_START(&pVirt->Stat, h);
2061 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2062 STAM_PROFILE_STOP(&pVirt->Stat, h);
2063 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2064 }
2065 pVirt = NULL;
2066#else
2067 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2068 NOREF(cbRange);
2069 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2070 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2071#endif
2072 }
2073 /*
2074 * Both... give the physical one priority.
2075 */
2076 else
2077 {
2078 Assert(!offPhys && !offVirt);
2079 if (cbRange > offVirtLast + 1)
2080 cbRange = offVirtLast + 1;
2081 if (cbRange > offPhysLast + 1)
2082 cbRange = offPhysLast + 1;
2083
2084#ifdef IN_RING3
2085 if (pVirt->pfnHandlerR3)
2086 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2087 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2088
2089 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2090 void *pvUser = pPhys->CTX_SUFF(pvUser);
2091
2092 STAM_PROFILE_START(&pPhys->Stat, h);
2093 Assert(PGMIsLockOwner(pVM));
2094 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2095 pgmUnlock(pVM);
2096 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2097 pgmLock(pVM);
2098# ifdef VBOX_WITH_STATISTICS
2099 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2100 if (pPhys)
2101 STAM_PROFILE_STOP(&pPhys->Stat, h);
2102# else
2103 pPhys = NULL; /* might not be valid anymore. */
2104# endif
2105 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2106 if (pVirt->pfnHandlerR3)
2107 {
2108
2109 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2110 + (iVirtPage << PAGE_SHIFT)
2111 + (GCPhys & PAGE_OFFSET_MASK);
2112 STAM_PROFILE_START(&pVirt->Stat, h);
2113 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2114 STAM_PROFILE_STOP(&pVirt->Stat, h);
2115 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2116 rc = VINF_SUCCESS;
2117 else
2118 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2119 }
2120 pPhys = NULL;
2121 pVirt = NULL;
2122#else
2123 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2124 NOREF(cbRange);
2125 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2126 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2127#endif
2128 }
2129 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2130 memcpy(pvDst, pvBuf, cbRange);
2131
2132 /*
2133 * Advance if we've got more stuff to do.
2134 */
2135 if (cbRange >= cbWrite)
2136 return VINF_SUCCESS;
2137
2138 cbWrite -= cbRange;
2139 GCPhys += cbRange;
2140 pvBuf = (uint8_t *)pvBuf + cbRange;
2141 pvDst = (uint8_t *)pvDst + cbRange;
2142
2143 offPhys -= cbRange;
2144 offPhysLast -= cbRange;
2145 offVirt -= cbRange;
2146 offVirtLast -= cbRange;
2147 }
2148}
2149
2150
2151/**
2152 * Write to physical memory.
2153 *
2154 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2155 * want to ignore those.
2156 *
2157 * @returns VBox status code. Can be ignored in ring-3.
2158 * @retval VINF_SUCCESS.
2159 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2160 *
2161 * @param pVM VM Handle.
2162 * @param GCPhys Physical address to write to.
2163 * @param pvBuf What to write.
2164 * @param cbWrite How many bytes to write.
2165 */
2166VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2167{
2168 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2169 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2170 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2171
2172 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWrite));
2173 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2174
2175 pgmLock(pVM);
2176
2177 /*
2178 * Copy loop on ram ranges.
2179 */
2180 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2181 for (;;)
2182 {
2183 /* Find range. */
2184 while (pRam && GCPhys > pRam->GCPhysLast)
2185 pRam = pRam->CTX_SUFF(pNext);
2186 /* Inside range or not? */
2187 if (pRam && GCPhys >= pRam->GCPhys)
2188 {
2189 /*
2190 * Must work our way thru this page by page.
2191 */
2192 RTGCPTR off = GCPhys - pRam->GCPhys;
2193 while (off < pRam->cb)
2194 {
2195 RTGCPTR iPage = off >> PAGE_SHIFT;
2196 PPGMPAGE pPage = &pRam->aPages[iPage];
2197 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2198 if (cb > cbWrite)
2199 cb = cbWrite;
2200
2201 /*
2202 * Any active WRITE or ALL access handlers?
2203 */
2204 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2205 {
2206 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2207 if (RT_FAILURE(rc))
2208 {
2209 pgmUnlock(pVM);
2210 return rc;
2211 }
2212 }
2213 else
2214 {
2215 /*
2216 * Get the pointer to the page.
2217 */
2218 void *pvDst;
2219 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2220 if (RT_SUCCESS(rc))
2221 memcpy(pvDst, pvBuf, cb);
2222 else
2223 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2224 pRam->GCPhys + off, pPage, rc));
2225 }
2226
2227 /* next page */
2228 if (cb >= cbWrite)
2229 {
2230 pgmUnlock(pVM);
2231 return VINF_SUCCESS;
2232 }
2233
2234 cbWrite -= cb;
2235 off += cb;
2236 pvBuf = (const char *)pvBuf + cb;
2237 } /* walk pages in ram range */
2238
2239 GCPhys = pRam->GCPhysLast + 1;
2240 }
2241 else
2242 {
2243 /*
2244 * Unassigned address space, skip it.
2245 */
2246 if (!pRam)
2247 break;
2248 size_t cb = pRam->GCPhys - GCPhys;
2249 if (cb >= cbWrite)
2250 break;
2251 cbWrite -= cb;
2252 pvBuf = (const char *)pvBuf + cb;
2253 GCPhys += cb;
2254 }
2255 } /* Ram range walk */
2256
2257 pgmUnlock(pVM);
2258 return VINF_SUCCESS;
2259}
2260
2261
2262/**
2263 * Read from guest physical memory by GC physical address, bypassing
2264 * MMIO and access handlers.
2265 *
2266 * @returns VBox status.
2267 * @param pVM VM handle.
2268 * @param pvDst The destination address.
2269 * @param GCPhysSrc The source address (GC physical address).
2270 * @param cb The number of bytes to read.
2271 */
2272VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2273{
2274 /*
2275 * Treat the first page as a special case.
2276 */
2277 if (!cb)
2278 return VINF_SUCCESS;
2279
2280 /* map the 1st page */
2281 void const *pvSrc;
2282 PGMPAGEMAPLOCK Lock;
2283 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2284 if (RT_FAILURE(rc))
2285 return rc;
2286
2287 /* optimize for the case where access is completely within the first page. */
2288 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2289 if (RT_LIKELY(cb <= cbPage))
2290 {
2291 memcpy(pvDst, pvSrc, cb);
2292 PGMPhysReleasePageMappingLock(pVM, &Lock);
2293 return VINF_SUCCESS;
2294 }
2295
2296 /* copy to the end of the page. */
2297 memcpy(pvDst, pvSrc, cbPage);
2298 PGMPhysReleasePageMappingLock(pVM, &Lock);
2299 GCPhysSrc += cbPage;
2300 pvDst = (uint8_t *)pvDst + cbPage;
2301 cb -= cbPage;
2302
2303 /*
2304 * Page by page.
2305 */
2306 for (;;)
2307 {
2308 /* map the page */
2309 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2310 if (RT_FAILURE(rc))
2311 return rc;
2312
2313 /* last page? */
2314 if (cb <= PAGE_SIZE)
2315 {
2316 memcpy(pvDst, pvSrc, cb);
2317 PGMPhysReleasePageMappingLock(pVM, &Lock);
2318 return VINF_SUCCESS;
2319 }
2320
2321 /* copy the entire page and advance */
2322 memcpy(pvDst, pvSrc, PAGE_SIZE);
2323 PGMPhysReleasePageMappingLock(pVM, &Lock);
2324 GCPhysSrc += PAGE_SIZE;
2325 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2326 cb -= PAGE_SIZE;
2327 }
2328 /* won't ever get here. */
2329}
2330
2331
2332/**
2333 * Write to guest physical memory referenced by GC pointer.
2334 * Write memory to GC physical address in guest physical memory.
2335 *
2336 * This will bypass MMIO and access handlers.
2337 *
2338 * @returns VBox status.
2339 * @param pVM VM handle.
2340 * @param GCPhysDst The GC physical address of the destination.
2341 * @param pvSrc The source buffer.
2342 * @param cb The number of bytes to write.
2343 */
2344VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2345{
2346 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2347
2348 /*
2349 * Treat the first page as a special case.
2350 */
2351 if (!cb)
2352 return VINF_SUCCESS;
2353
2354 /* map the 1st page */
2355 void *pvDst;
2356 PGMPAGEMAPLOCK Lock;
2357 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2358 if (RT_FAILURE(rc))
2359 return rc;
2360
2361 /* optimize for the case where access is completely within the first page. */
2362 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2363 if (RT_LIKELY(cb <= cbPage))
2364 {
2365 memcpy(pvDst, pvSrc, cb);
2366 PGMPhysReleasePageMappingLock(pVM, &Lock);
2367 return VINF_SUCCESS;
2368 }
2369
2370 /* copy to the end of the page. */
2371 memcpy(pvDst, pvSrc, cbPage);
2372 PGMPhysReleasePageMappingLock(pVM, &Lock);
2373 GCPhysDst += cbPage;
2374 pvSrc = (const uint8_t *)pvSrc + cbPage;
2375 cb -= cbPage;
2376
2377 /*
2378 * Page by page.
2379 */
2380 for (;;)
2381 {
2382 /* map the page */
2383 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2384 if (RT_FAILURE(rc))
2385 return rc;
2386
2387 /* last page? */
2388 if (cb <= PAGE_SIZE)
2389 {
2390 memcpy(pvDst, pvSrc, cb);
2391 PGMPhysReleasePageMappingLock(pVM, &Lock);
2392 return VINF_SUCCESS;
2393 }
2394
2395 /* copy the entire page and advance */
2396 memcpy(pvDst, pvSrc, PAGE_SIZE);
2397 PGMPhysReleasePageMappingLock(pVM, &Lock);
2398 GCPhysDst += PAGE_SIZE;
2399 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2400 cb -= PAGE_SIZE;
2401 }
2402 /* won't ever get here. */
2403}
2404
2405
2406/**
2407 * Read from guest physical memory referenced by GC pointer.
2408 *
2409 * This function uses the current CR3/CR0/CR4 of the guest and will
2410 * bypass access handlers and not set any accessed bits.
2411 *
2412 * @returns VBox status.
2413 * @param pVCpu The VMCPU handle.
2414 * @param pvDst The destination address.
2415 * @param GCPtrSrc The source address (GC pointer).
2416 * @param cb The number of bytes to read.
2417 */
2418VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2419{
2420 PVM pVM = pVCpu->CTX_SUFF(pVM);
2421
2422 /*
2423 * Treat the first page as a special case.
2424 */
2425 if (!cb)
2426 return VINF_SUCCESS;
2427
2428 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleRead));
2429 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
2430
2431 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
2432 * when many VCPUs are fighting for the lock.
2433 */
2434 pgmLock(pVM);
2435
2436 /* map the 1st page */
2437 void const *pvSrc;
2438 PGMPAGEMAPLOCK Lock;
2439 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2440 if (RT_FAILURE(rc))
2441 {
2442 pgmUnlock(pVM);
2443 return rc;
2444 }
2445
2446 /* optimize for the case where access is completely within the first page. */
2447 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2448 if (RT_LIKELY(cb <= cbPage))
2449 {
2450 memcpy(pvDst, pvSrc, cb);
2451 PGMPhysReleasePageMappingLock(pVM, &Lock);
2452 pgmUnlock(pVM);
2453 return VINF_SUCCESS;
2454 }
2455
2456 /* copy to the end of the page. */
2457 memcpy(pvDst, pvSrc, cbPage);
2458 PGMPhysReleasePageMappingLock(pVM, &Lock);
2459 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2460 pvDst = (uint8_t *)pvDst + cbPage;
2461 cb -= cbPage;
2462
2463 /*
2464 * Page by page.
2465 */
2466 for (;;)
2467 {
2468 /* map the page */
2469 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2470 if (RT_FAILURE(rc))
2471 {
2472 pgmUnlock(pVM);
2473 return rc;
2474 }
2475
2476 /* last page? */
2477 if (cb <= PAGE_SIZE)
2478 {
2479 memcpy(pvDst, pvSrc, cb);
2480 PGMPhysReleasePageMappingLock(pVM, &Lock);
2481 pgmUnlock(pVM);
2482 return VINF_SUCCESS;
2483 }
2484
2485 /* copy the entire page and advance */
2486 memcpy(pvDst, pvSrc, PAGE_SIZE);
2487 PGMPhysReleasePageMappingLock(pVM, &Lock);
2488 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2489 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2490 cb -= PAGE_SIZE;
2491 }
2492 /* won't ever get here. */
2493}
2494
2495
2496/**
2497 * Write to guest physical memory referenced by GC pointer.
2498 *
2499 * This function uses the current CR3/CR0/CR4 of the guest and will
2500 * bypass access handlers and not set dirty or accessed bits.
2501 *
2502 * @returns VBox status.
2503 * @param pVCpu The VMCPU handle.
2504 * @param GCPtrDst The destination address (GC pointer).
2505 * @param pvSrc The source address.
2506 * @param cb The number of bytes to write.
2507 */
2508VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2509{
2510 PVM pVM = pVCpu->CTX_SUFF(pVM);
2511
2512 /*
2513 * Treat the first page as a special case.
2514 */
2515 if (!cb)
2516 return VINF_SUCCESS;
2517
2518 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWrite));
2519 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
2520
2521 /* map the 1st page */
2522 void *pvDst;
2523 PGMPAGEMAPLOCK Lock;
2524 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2525 if (RT_FAILURE(rc))
2526 return rc;
2527
2528 /* optimize for the case where access is completely within the first page. */
2529 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2530 if (RT_LIKELY(cb <= cbPage))
2531 {
2532 memcpy(pvDst, pvSrc, cb);
2533 PGMPhysReleasePageMappingLock(pVM, &Lock);
2534 return VINF_SUCCESS;
2535 }
2536
2537 /* copy to the end of the page. */
2538 memcpy(pvDst, pvSrc, cbPage);
2539 PGMPhysReleasePageMappingLock(pVM, &Lock);
2540 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2541 pvSrc = (const uint8_t *)pvSrc + cbPage;
2542 cb -= cbPage;
2543
2544 /*
2545 * Page by page.
2546 */
2547 for (;;)
2548 {
2549 /* map the page */
2550 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2551 if (RT_FAILURE(rc))
2552 return rc;
2553
2554 /* last page? */
2555 if (cb <= PAGE_SIZE)
2556 {
2557 memcpy(pvDst, pvSrc, cb);
2558 PGMPhysReleasePageMappingLock(pVM, &Lock);
2559 return VINF_SUCCESS;
2560 }
2561
2562 /* copy the entire page and advance */
2563 memcpy(pvDst, pvSrc, PAGE_SIZE);
2564 PGMPhysReleasePageMappingLock(pVM, &Lock);
2565 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2566 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2567 cb -= PAGE_SIZE;
2568 }
2569 /* won't ever get here. */
2570}
2571
2572
2573/**
2574 * Write to guest physical memory referenced by GC pointer and update the PTE.
2575 *
2576 * This function uses the current CR3/CR0/CR4 of the guest and will
2577 * bypass access handlers but will set any dirty and accessed bits in the PTE.
2578 *
2579 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
2580 *
2581 * @returns VBox status.
2582 * @param pVCpu The VMCPU handle.
2583 * @param GCPtrDst The destination address (GC pointer).
2584 * @param pvSrc The source address.
2585 * @param cb The number of bytes to write.
2586 */
2587VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2588{
2589 PVM pVM = pVCpu->CTX_SUFF(pVM);
2590
2591 /*
2592 * Treat the first page as a special case.
2593 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
2594 */
2595 if (!cb)
2596 return VINF_SUCCESS;
2597
2598 /* map the 1st page */
2599 void *pvDst;
2600 PGMPAGEMAPLOCK Lock;
2601 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2602 if (RT_FAILURE(rc))
2603 return rc;
2604
2605 /* optimize for the case where access is completely within the first page. */
2606 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2607 if (RT_LIKELY(cb <= cbPage))
2608 {
2609 memcpy(pvDst, pvSrc, cb);
2610 PGMPhysReleasePageMappingLock(pVM, &Lock);
2611 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2612 return VINF_SUCCESS;
2613 }
2614
2615 /* copy to the end of the page. */
2616 memcpy(pvDst, pvSrc, cbPage);
2617 PGMPhysReleasePageMappingLock(pVM, &Lock);
2618 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2619 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2620 pvSrc = (const uint8_t *)pvSrc + cbPage;
2621 cb -= cbPage;
2622
2623 /*
2624 * Page by page.
2625 */
2626 for (;;)
2627 {
2628 /* map the page */
2629 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2630 if (RT_FAILURE(rc))
2631 return rc;
2632
2633 /* last page? */
2634 if (cb <= PAGE_SIZE)
2635 {
2636 memcpy(pvDst, pvSrc, cb);
2637 PGMPhysReleasePageMappingLock(pVM, &Lock);
2638 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2639 return VINF_SUCCESS;
2640 }
2641
2642 /* copy the entire page and advance */
2643 memcpy(pvDst, pvSrc, PAGE_SIZE);
2644 PGMPhysReleasePageMappingLock(pVM, &Lock);
2645 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2646 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2647 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2648 cb -= PAGE_SIZE;
2649 }
2650 /* won't ever get here. */
2651}
2652
2653
2654/**
2655 * Read from guest physical memory referenced by GC pointer.
2656 *
2657 * This function uses the current CR3/CR0/CR4 of the guest and will
2658 * respect access handlers and set accessed bits.
2659 *
2660 * @returns VBox status.
2661 * @param pVCpu The VMCPU handle.
2662 * @param pvDst The destination address.
2663 * @param GCPtrSrc The source address (GC pointer).
2664 * @param cb The number of bytes to read.
2665 * @thread The vCPU EMT.
2666 */
2667VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2668{
2669 RTGCPHYS GCPhys;
2670 uint64_t fFlags;
2671 int rc;
2672 PVM pVM = pVCpu->CTX_SUFF(pVM);
2673
2674 /*
2675 * Anything to do?
2676 */
2677 if (!cb)
2678 return VINF_SUCCESS;
2679
2680 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
2681
2682 /*
2683 * Optimize reads within a single page.
2684 */
2685 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2686 {
2687 /* Convert virtual to physical address + flags */
2688 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2689 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2690 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2691
2692 /* mark the guest page as accessed. */
2693 if (!(fFlags & X86_PTE_A))
2694 {
2695 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2696 AssertRC(rc);
2697 }
2698
2699 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
2700 }
2701
2702 /*
2703 * Page by page.
2704 */
2705 for (;;)
2706 {
2707 /* Convert virtual to physical address + flags */
2708 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2709 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2710 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2711
2712 /* mark the guest page as accessed. */
2713 if (!(fFlags & X86_PTE_A))
2714 {
2715 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2716 AssertRC(rc);
2717 }
2718
2719 /* copy */
2720 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2721 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
2722 if (cbRead >= cb || RT_FAILURE(rc))
2723 return rc;
2724
2725 /* next */
2726 cb -= cbRead;
2727 pvDst = (uint8_t *)pvDst + cbRead;
2728 GCPtrSrc += cbRead;
2729 }
2730}
2731
2732
2733/**
2734 * Write to guest physical memory referenced by GC pointer.
2735 *
2736 * This function uses the current CR3/CR0/CR4 of the guest and will
2737 * respect access handlers and set dirty and accessed bits.
2738 *
2739 * @returns VBox status.
2740 * @retval VINF_SUCCESS.
2741 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2742 *
2743 * @param pVCpu The VMCPU handle.
2744 * @param GCPtrDst The destination address (GC pointer).
2745 * @param pvSrc The source address.
2746 * @param cb The number of bytes to write.
2747 */
2748VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2749{
2750 RTGCPHYS GCPhys;
2751 uint64_t fFlags;
2752 int rc;
2753 PVM pVM = pVCpu->CTX_SUFF(pVM);
2754
2755 /*
2756 * Anything to do?
2757 */
2758 if (!cb)
2759 return VINF_SUCCESS;
2760
2761 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
2762
2763 /*
2764 * Optimize writes within a single page.
2765 */
2766 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2767 {
2768 /* Convert virtual to physical address + flags */
2769 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2770 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2771 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2772
2773 /* Mention when we ignore X86_PTE_RW... */
2774 if (!(fFlags & X86_PTE_RW))
2775 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2776
2777 /* Mark the guest page as accessed and dirty if necessary. */
2778 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2779 {
2780 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2781 AssertRC(rc);
2782 }
2783
2784 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2785 }
2786
2787 /*
2788 * Page by page.
2789 */
2790 for (;;)
2791 {
2792 /* Convert virtual to physical address + flags */
2793 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2794 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2795 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2796
2797 /* Mention when we ignore X86_PTE_RW... */
2798 if (!(fFlags & X86_PTE_RW))
2799 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2800
2801 /* Mark the guest page as accessed and dirty if necessary. */
2802 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2803 {
2804 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2805 AssertRC(rc);
2806 }
2807
2808 /* copy */
2809 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2810 int rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
2811 if (cbWrite >= cb || RT_FAILURE(rc))
2812 return rc;
2813
2814 /* next */
2815 cb -= cbWrite;
2816 pvSrc = (uint8_t *)pvSrc + cbWrite;
2817 GCPtrDst += cbWrite;
2818 }
2819}
2820
2821
2822/**
2823 * Performs a read of guest virtual memory for instruction emulation.
2824 *
2825 * This will check permissions, raise exceptions and update the access bits.
2826 *
2827 * The current implementation will bypass all access handlers. It may later be
2828 * changed to at least respect MMIO.
2829 *
2830 *
2831 * @returns VBox status code suitable to scheduling.
2832 * @retval VINF_SUCCESS if the read was performed successfully.
2833 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2834 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2835 *
2836 * @param pVCpu The VMCPU handle.
2837 * @param pCtxCore The context core.
2838 * @param pvDst Where to put the bytes we've read.
2839 * @param GCPtrSrc The source address.
2840 * @param cb The number of bytes to read. Not more than a page.
2841 *
2842 * @remark This function will dynamically map physical pages in GC. This may unmap
2843 * mappings done by the caller. Be careful!
2844 */
2845VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
2846{
2847 PVM pVM = pVCpu->CTX_SUFF(pVM);
2848 Assert(cb <= PAGE_SIZE);
2849
2850/** @todo r=bird: This isn't perfect!
2851 * -# It's not checking for reserved bits being 1.
2852 * -# It's not correctly dealing with the access bit.
2853 * -# It's not respecting MMIO memory or any other access handlers.
2854 */
2855 /*
2856 * 1. Translate virtual to physical. This may fault.
2857 * 2. Map the physical address.
2858 * 3. Do the read operation.
2859 * 4. Set access bits if required.
2860 */
2861 int rc;
2862 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
2863 if (cb <= cb1)
2864 {
2865 /*
2866 * Not crossing pages.
2867 */
2868 RTGCPHYS GCPhys;
2869 uint64_t fFlags;
2870 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
2871 if (RT_SUCCESS(rc))
2872 {
2873 /** @todo we should check reserved bits ... */
2874 void *pvSrc;
2875 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
2876 switch (rc)
2877 {
2878 case VINF_SUCCESS:
2879 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
2880 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
2881 break;
2882 case VERR_PGM_PHYS_PAGE_RESERVED:
2883 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2884 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
2885 break;
2886 default:
2887 return rc;
2888 }
2889
2890 /** @todo access bit emulation isn't 100% correct. */
2891 if (!(fFlags & X86_PTE_A))
2892 {
2893 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2894 AssertRC(rc);
2895 }
2896 return VINF_SUCCESS;
2897 }
2898 }
2899 else
2900 {
2901 /*
2902 * Crosses pages.
2903 */
2904 size_t cb2 = cb - cb1;
2905 uint64_t fFlags1;
2906 RTGCPHYS GCPhys1;
2907 uint64_t fFlags2;
2908 RTGCPHYS GCPhys2;
2909 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
2910 if (RT_SUCCESS(rc))
2911 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
2912 if (RT_SUCCESS(rc))
2913 {
2914 /** @todo we should check reserved bits ... */
2915 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
2916 void *pvSrc1;
2917 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
2918 switch (rc)
2919 {
2920 case VINF_SUCCESS:
2921 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
2922 break;
2923 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2924 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
2925 break;
2926 default:
2927 return rc;
2928 }
2929
2930 void *pvSrc2;
2931 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
2932 switch (rc)
2933 {
2934 case VINF_SUCCESS:
2935 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
2936 break;
2937 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2938 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
2939 break;
2940 default:
2941 return rc;
2942 }
2943
2944 if (!(fFlags1 & X86_PTE_A))
2945 {
2946 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2947 AssertRC(rc);
2948 }
2949 if (!(fFlags2 & X86_PTE_A))
2950 {
2951 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2952 AssertRC(rc);
2953 }
2954 return VINF_SUCCESS;
2955 }
2956 }
2957
2958 /*
2959 * Raise a #PF.
2960 */
2961 uint32_t uErr;
2962
2963 /* Get the current privilege level. */
2964 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
2965 switch (rc)
2966 {
2967 case VINF_SUCCESS:
2968 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
2969 break;
2970
2971 case VERR_PAGE_NOT_PRESENT:
2972 case VERR_PAGE_TABLE_NOT_PRESENT:
2973 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
2974 break;
2975
2976 default:
2977 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
2978 return rc;
2979 }
2980 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
2981 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
2982}
2983
2984
2985/**
2986 * Performs a read of guest virtual memory for instruction emulation.
2987 *
2988 * This will check permissions, raise exceptions and update the access bits.
2989 *
2990 * The current implementation will bypass all access handlers. It may later be
2991 * changed to at least respect MMIO.
2992 *
2993 *
2994 * @returns VBox status code suitable to scheduling.
2995 * @retval VINF_SUCCESS if the read was performed successfully.
2996 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2997 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2998 *
2999 * @param pVCpu The VMCPU handle.
3000 * @param pCtxCore The context core.
3001 * @param pvDst Where to put the bytes we've read.
3002 * @param GCPtrSrc The source address.
3003 * @param cb The number of bytes to read. Not more than a page.
3004 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3005 * an appropriate error status will be returned (no
3006 * informational at all).
3007 *
3008 *
3009 * @remarks Takes the PGM lock.
3010 * @remarks A page fault on the 2nd page of the access will be raised without
3011 * writing the bits on the first page since we're ASSUMING that the
3012 * caller is emulating an instruction access.
3013 * @remarks This function will dynamically map physical pages in GC. This may
3014 * unmap mappings done by the caller. Be careful!
3015 */
3016VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
3017{
3018 PVM pVM = pVCpu->CTX_SUFF(pVM);
3019 Assert(cb <= PAGE_SIZE);
3020
3021 /*
3022 * 1. Translate virtual to physical. This may fault.
3023 * 2. Map the physical address.
3024 * 3. Do the read operation.
3025 * 4. Set access bits if required.
3026 */
3027 int rc;
3028 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3029 if (cb <= cb1)
3030 {
3031 /*
3032 * Not crossing pages.
3033 */
3034 RTGCPHYS GCPhys;
3035 uint64_t fFlags;
3036 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3037 if (RT_SUCCESS(rc))
3038 {
3039 if (1) /** @todo we should check reserved bits ... */
3040 {
3041 const void *pvSrc;
3042 PGMPAGEMAPLOCK Lock;
3043 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3044 switch (rc)
3045 {
3046 case VINF_SUCCESS:
3047 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3048 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3049 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3050 break;
3051 case VERR_PGM_PHYS_PAGE_RESERVED:
3052 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3053 memset(pvDst, 0xff, cb);
3054 break;
3055 default:
3056 AssertMsgFailed(("%Rrc\n", rc));
3057 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3058 return rc;
3059 }
3060 PGMPhysReleasePageMappingLock(pVM, &Lock);
3061
3062 if (!(fFlags & X86_PTE_A))
3063 {
3064 /** @todo access bit emulation isn't 100% correct. */
3065 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3066 AssertRC(rc);
3067 }
3068 return VINF_SUCCESS;
3069 }
3070 }
3071 }
3072 else
3073 {
3074 /*
3075 * Crosses pages.
3076 */
3077 size_t cb2 = cb - cb1;
3078 uint64_t fFlags1;
3079 RTGCPHYS GCPhys1;
3080 uint64_t fFlags2;
3081 RTGCPHYS GCPhys2;
3082 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3083 if (RT_SUCCESS(rc))
3084 {
3085 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3086 if (RT_SUCCESS(rc))
3087 {
3088 if (1) /** @todo we should check reserved bits ... */
3089 {
3090 const void *pvSrc;
3091 PGMPAGEMAPLOCK Lock;
3092 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3093 switch (rc)
3094 {
3095 case VINF_SUCCESS:
3096 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3097 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3098 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3099 PGMPhysReleasePageMappingLock(pVM, &Lock);
3100 break;
3101 case VERR_PGM_PHYS_PAGE_RESERVED:
3102 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3103 memset(pvDst, 0xff, cb1);
3104 break;
3105 default:
3106 AssertMsgFailed(("%Rrc\n", rc));
3107 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3108 return rc;
3109 }
3110
3111 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3112 switch (rc)
3113 {
3114 case VINF_SUCCESS:
3115 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3116 PGMPhysReleasePageMappingLock(pVM, &Lock);
3117 break;
3118 case VERR_PGM_PHYS_PAGE_RESERVED:
3119 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3120 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3121 break;
3122 default:
3123 AssertMsgFailed(("%Rrc\n", rc));
3124 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3125 return rc;
3126 }
3127
3128 if (!(fFlags1 & X86_PTE_A))
3129 {
3130 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3131 AssertRC(rc);
3132 }
3133 if (!(fFlags2 & X86_PTE_A))
3134 {
3135 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3136 AssertRC(rc);
3137 }
3138 return VINF_SUCCESS;
3139 }
3140 /* sort out which page */
3141 }
3142 else
3143 GCPtrSrc += cb1; /* fault on 2nd page */
3144 }
3145 }
3146
3147 /*
3148 * Raise a #PF if we're allowed to do that.
3149 */
3150 /* Calc the error bits. */
3151 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3152 uint32_t uErr;
3153 switch (rc)
3154 {
3155 case VINF_SUCCESS:
3156 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3157 rc = VERR_ACCESS_DENIED;
3158 break;
3159
3160 case VERR_PAGE_NOT_PRESENT:
3161 case VERR_PAGE_TABLE_NOT_PRESENT:
3162 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3163 break;
3164
3165 default:
3166 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3167 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3168 return rc;
3169 }
3170 if (fRaiseTrap)
3171 {
3172 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3173 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3174 }
3175 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3176 return rc;
3177}
3178
3179
3180/**
3181 * Performs a write to guest virtual memory for instruction emulation.
3182 *
3183 * This will check permissions, raise exceptions and update the dirty and access
3184 * bits.
3185 *
3186 * @returns VBox status code suitable to scheduling.
3187 * @retval VINF_SUCCESS if the read was performed successfully.
3188 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3189 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3190 *
3191 * @param pVCpu The VMCPU handle.
3192 * @param pCtxCore The context core.
3193 * @param GCPtrDst The destination address.
3194 * @param pvSrc What to write.
3195 * @param cb The number of bytes to write. Not more than a page.
3196 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3197 * an appropriate error status will be returned (no
3198 * informational at all).
3199 *
3200 * @remarks Takes the PGM lock.
3201 * @remarks A page fault on the 2nd page of the access will be raised without
3202 * writing the bits on the first page since we're ASSUMING that the
3203 * caller is emulating an instruction access.
3204 * @remarks This function will dynamically map physical pages in GC. This may
3205 * unmap mappings done by the caller. Be careful!
3206 */
3207VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
3208{
3209 Assert(cb <= PAGE_SIZE);
3210 PVM pVM = pVCpu->CTX_SUFF(pVM);
3211
3212 /*
3213 * 1. Translate virtual to physical. This may fault.
3214 * 2. Map the physical address.
3215 * 3. Do the write operation.
3216 * 4. Set access bits if required.
3217 */
3218 int rc;
3219 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3220 if (cb <= cb1)
3221 {
3222 /*
3223 * Not crossing pages.
3224 */
3225 RTGCPHYS GCPhys;
3226 uint64_t fFlags;
3227 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3228 if (RT_SUCCESS(rc))
3229 {
3230 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3231 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3232 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3233 {
3234 void *pvDst;
3235 PGMPAGEMAPLOCK Lock;
3236 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3237 switch (rc)
3238 {
3239 case VINF_SUCCESS:
3240 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3241 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3242 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3243 PGMPhysReleasePageMappingLock(pVM, &Lock);
3244 break;
3245 case VERR_PGM_PHYS_PAGE_RESERVED:
3246 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3247 /* bit bucket */
3248 break;
3249 default:
3250 AssertMsgFailed(("%Rrc\n", rc));
3251 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3252 return rc;
3253 }
3254
3255 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3256 {
3257 /** @todo dirty & access bit emulation isn't 100% correct. */
3258 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3259 AssertRC(rc);
3260 }
3261 return VINF_SUCCESS;
3262 }
3263 rc = VERR_ACCESS_DENIED;
3264 }
3265 }
3266 else
3267 {
3268 /*
3269 * Crosses pages.
3270 */
3271 size_t cb2 = cb - cb1;
3272 uint64_t fFlags1;
3273 RTGCPHYS GCPhys1;
3274 uint64_t fFlags2;
3275 RTGCPHYS GCPhys2;
3276 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3277 if (RT_SUCCESS(rc))
3278 {
3279 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3280 if (RT_SUCCESS(rc))
3281 {
3282 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3283 && (fFlags2 & X86_PTE_RW))
3284 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3285 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) )
3286 {
3287 void *pvDst;
3288 PGMPAGEMAPLOCK Lock;
3289 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3290 switch (rc)
3291 {
3292 case VINF_SUCCESS:
3293 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3294 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3295 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3296 PGMPhysReleasePageMappingLock(pVM, &Lock);
3297 break;
3298 case VERR_PGM_PHYS_PAGE_RESERVED:
3299 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3300 /* bit bucket */
3301 break;
3302 default:
3303 AssertMsgFailed(("%Rrc\n", rc));
3304 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3305 return rc;
3306 }
3307
3308 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3309 switch (rc)
3310 {
3311 case VINF_SUCCESS:
3312 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3313 PGMPhysReleasePageMappingLock(pVM, &Lock);
3314 break;
3315 case VERR_PGM_PHYS_PAGE_RESERVED:
3316 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3317 /* bit bucket */
3318 break;
3319 default:
3320 AssertMsgFailed(("%Rrc\n", rc));
3321 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3322 return rc;
3323 }
3324
3325 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3326 {
3327 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3328 AssertRC(rc);
3329 }
3330 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3331 {
3332 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3333 AssertRC(rc);
3334 }
3335 return VINF_SUCCESS;
3336 }
3337 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3338 GCPtrDst += cb1; /* fault on the 2nd page. */
3339 rc = VERR_ACCESS_DENIED;
3340 }
3341 else
3342 GCPtrDst += cb1; /* fault on the 2nd page. */
3343 }
3344 }
3345
3346 /*
3347 * Raise a #PF if we're allowed to do that.
3348 */
3349 /* Calc the error bits. */
3350 uint32_t uErr;
3351 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3352 switch (rc)
3353 {
3354 case VINF_SUCCESS:
3355 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3356 rc = VERR_ACCESS_DENIED;
3357 break;
3358
3359 case VERR_ACCESS_DENIED:
3360 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3361 break;
3362
3363 case VERR_PAGE_NOT_PRESENT:
3364 case VERR_PAGE_TABLE_NOT_PRESENT:
3365 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3366 break;
3367
3368 default:
3369 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3370 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3371 return rc;
3372 }
3373 if (fRaiseTrap)
3374 {
3375 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3376 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3377 }
3378 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3379 return rc;
3380}
3381
3382
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette