VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 5285

Last change on this file since 5285 was 5040, checked in by vboxsync, 17 years ago

GC phys/virt to HC virt functions are no longer accessible in our PDM interface.
Rewrote disassembly functions to use the mapping functions.

Code that runs in EMT (like CSAM/PATM) can still use the old conversion functions. Easier to use
and (far) less overhead.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 78.4 KB
Line 
1/* $Id: PGMAllPhys.cpp 5040 2007-09-26 09:03:00Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @def PGM_IGNORE_RAM_FLAGS_RESERVED
19 * Don't respect the MM_RAM_FLAGS_RESERVED flag when converting to HC addresses.
20 *
21 * Since this flag is currently incorrectly kept set for ROM regions we will
22 * have to ignore it for now so we don't break stuff.
23 *
24 * @todo this has been fixed now I believe, remove this hack.
25 */
26#define PGM_IGNORE_RAM_FLAGS_RESERVED
27
28
29/*******************************************************************************
30* Header Files *
31*******************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM_PHYS
33#include <VBox/pgm.h>
34#include <VBox/trpm.h>
35#include <VBox/vmm.h>
36#include <VBox/iom.h>
37#include <VBox/rem.h>
38#include "PGMInternal.h"
39#include <VBox/vm.h>
40#include <VBox/param.h>
41#include <VBox/err.h>
42#include <iprt/assert.h>
43#include <iprt/string.h>
44#include <iprt/asm.h>
45#include <VBox/log.h>
46#ifdef IN_RING3
47# include <iprt/thread.h>
48#endif
49
50
51
52/**
53 * Checks if Address Gate 20 is enabled or not.
54 *
55 * @returns true if enabled.
56 * @returns false if disabled.
57 * @param pVM VM handle.
58 */
59PGMDECL(bool) PGMPhysIsA20Enabled(PVM pVM)
60{
61 LogFlow(("PGMPhysIsA20Enabled %d\n", pVM->pgm.s.fA20Enabled));
62 return !!pVM->pgm.s.fA20Enabled ; /* stupid MS compiler doesn't trust me. */
63}
64
65
66/**
67 * Validates a GC physical address.
68 *
69 * @returns true if valid.
70 * @returns false if invalid.
71 * @param pVM The VM handle.
72 * @param GCPhys The physical address to validate.
73 */
74PGMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
75{
76 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
77 return pPage != NULL;
78}
79
80
81/**
82 * Checks if a GC physical address is a normal page,
83 * i.e. not ROM, MMIO or reserved.
84 *
85 * @returns true if normal.
86 * @returns false if invalid, ROM, MMIO or reserved page.
87 * @param pVM The VM handle.
88 * @param GCPhys The physical address to check.
89 */
90PGMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
91{
92 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
93 return pPage
94 && !(pPage->HCPhys & (MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO2));
95}
96
97
98/**
99 * Converts a GC physical address to a HC physical address.
100 *
101 * @returns VINF_SUCCESS on success.
102 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
103 * page but has no physical backing.
104 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
105 * GC physical address.
106 *
107 * @param pVM The VM handle.
108 * @param GCPhys The GC physical address to convert.
109 * @param pHCPhys Where to store the HC physical address on success.
110 */
111PGMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
112{
113 PPGMPAGE pPage;
114 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
115 if (VBOX_FAILURE(rc))
116 return rc;
117
118#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
119 if (RT_UNLIKELY(pPage->HCPhys & MM_RAM_FLAGS_RESERVED)) /** @todo PAGE FLAGS */
120 return VERR_PGM_PHYS_PAGE_RESERVED;
121#endif
122
123 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
124 return VINF_SUCCESS;
125}
126
127
128/**
129 * Invalidates the GC page mapping TLB.
130 *
131 * @param pVM The VM handle.
132 */
133PDMDECL(void) PGMPhysInvalidatePageGCMapTLB(PVM pVM)
134{
135 /* later */
136 NOREF(pVM);
137}
138
139
140/**
141 * Invalidates the ring-0 page mapping TLB.
142 *
143 * @param pVM The VM handle.
144 */
145PDMDECL(void) PGMPhysInvalidatePageR0MapTLB(PVM pVM)
146{
147 PGMPhysInvalidatePageR3MapTLB(pVM);
148}
149
150
151/**
152 * Invalidates the ring-3 page mapping TLB.
153 *
154 * @param pVM The VM handle.
155 */
156PDMDECL(void) PGMPhysInvalidatePageR3MapTLB(PVM pVM)
157{
158 pgmLock(pVM);
159 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
160 {
161 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
162 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
163 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
164 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
165 }
166 pgmUnlock(pVM);
167}
168
169
170
171/**
172 * Makes sure that there is at least one handy page ready for use.
173 *
174 * This will also take the appropriate actions when reaching water-marks.
175 *
176 * @returns The following VBox status codes.
177 * @retval VINF_SUCCESS on success.
178 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
179 *
180 * @param pVM The VM handle.
181 *
182 * @remarks Must be called from within the PGM critical section. It may
183 * nip back to ring-3/0 in some cases.
184 */
185static int pgmPhysEnsureHandyPage(PVM pVM)
186{
187 /** @remarks
188 * low-water mark logic for R0 & GC:
189 * - 75%: Set FF.
190 * - 50%: Force return to ring-3 ASAP.
191 *
192 * For ring-3 there is a little problem wrt to the recompiler, so:
193 * - 75%: Set FF.
194 * - 50%: Try allocate pages; on failure we'll force REM to quite ASAP.
195 *
196 * The basic idea is that we should be able to get out of any situation with
197 * only 50% of handy pages remaining.
198 *
199 * At the moment we'll not adjust the number of handy pages relative to the
200 * actual VM RAM committment, that's too much work for now.
201 */
202 Assert(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages));
203 if ( !pVM->pgm.s.cHandyPages
204#ifdef IN_RING3
205 || pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2 /* 50% */
206#endif
207 )
208 {
209 Log(("PGM: cHandyPages=%u out of %u -> allocate more\n", pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
210#ifdef IN_RING3
211 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
212#elif defined(IN_RING0)
213 /** @todo call PGMR0PhysAllocateHandyPages directly - need to make sure we can call kernel code first and deal with the seeding fallback. */
214 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
215#else
216 int rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES, 0);
217#endif
218 if (RT_UNLIKELY(rc != VINF_SUCCESS))
219 {
220 Assert(rc == VINF_EM_NO_MEMORY);
221 if (!pVM->pgm.s.cHandyPages)
222 {
223 LogRel(("PGM: no more handy pages!\n"));
224 return VERR_EM_NO_MEMORY;
225 }
226 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
227#ifdef IN_RING3
228 REMR3NotifyFF(pVM);
229#else
230 VM_FF_SET(pVM, VM_FF_TO_R3);
231#endif
232 }
233 Assert(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages));
234 }
235 else if (pVM->pgm.s.cHandyPages - 1 <= (RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 4) * 3) /* 75% */
236 {
237 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
238#ifndef IN_RING3
239 if (pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages) / 2)
240 {
241 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages - 1 <= RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
242 VM_FF_SET(pVM, VM_FF_TO_R3);
243 }
244#endif
245 }
246
247 return VINF_SUCCESS;
248}
249
250
251/**
252 * Replace a zero or shared page with new page that we can write to.
253 *
254 * @returns The following VBox status codes.
255 * @retval VINF_SUCCESS on success, pPage is modified.
256 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
257 *
258 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
259 *
260 * @param pVM The VM address.
261 * @param pPage The physical page tracking structure. This will
262 * be modified on success.
263 * @param GCPhys The address of the page.
264 *
265 * @remarks Must be called from within the PGM critical section. It may
266 * nip back to ring-3/0 in some cases.
267 *
268 * @remarks This function shouldn't really fail, however if it does
269 * it probably means we've screwed up the size of the amount
270 * and/or the low-water mark of handy pages. Or, that some
271 * device I/O is causing a lot of pages to be allocated while
272 * while the host is in a low-memory condition.
273 */
274int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
275{
276 /*
277 * Ensure that we've got a page handy, take it and use it.
278 */
279 int rc = pgmPhysEnsureHandyPage(pVM);
280 if (VBOX_FAILURE(rc))
281 {
282 Assert(rc == VERR_EM_NO_MEMORY);
283 return rc;
284 }
285 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%d %RGp\n", PGM_PAGE_GET_STATE(pPage), GCPhys));
286 Assert(!PGM_PAGE_IS_RESERVED(pPage));
287 Assert(!PGM_PAGE_IS_MMIO(pPage));
288
289 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
290 Assert(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages));
291 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
292 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
293 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
294 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
295
296 /*
297 * There are one or two action to be taken the next time we allocate handy pages:
298 * - Tell the GMM (global memory manager) what the page is being used for.
299 * (Speeds up replacement operations - sharing and defragmenting.)
300 * - If the current backing is shared, it must be freed.
301 */
302 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
303 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys;
304
305 if (PGM_PAGE_IS_SHARED(pPage))
306 {
307 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
308 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
309 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
310
311 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
312 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
313 STAM_COUNTER_INC(&pVM->pgm.s.StatPageReplaceShared);
314 pVM->pgm.s.cSharedPages--;
315 }
316 else
317 {
318 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
319 STAM_COUNTER_INC(&pVM->pgm.s.StatPageReplaceZero);
320 pVM->pgm.s.cZeroPages--;
321 }
322
323 /*
324 * Do the PGMPAGE modifications.
325 */
326 pVM->pgm.s.cPrivatePages++;
327 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
328 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
329 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
330
331 return VINF_SUCCESS;
332}
333
334
335/**
336 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
337 *
338 * @returns VBox status code.
339 * @retval VINF_SUCCESS on success.
340 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
341 *
342 * @param pVM The VM address.
343 * @param pPage The physical page tracking structure.
344 * @param GCPhys The address of the page.
345 *
346 * @remarks Called from within the PGM critical section.
347 */
348int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
349{
350 switch (pPage->u2State)
351 {
352 case PGM_PAGE_STATE_WRITE_MONITORED:
353 pPage->fWrittenTo = true;
354 pPage->u2State = PGM_PAGE_STATE_ALLOCATED;
355 /* fall thru */
356 default: /* to shut up GCC */
357 case PGM_PAGE_STATE_ALLOCATED:
358 return VINF_SUCCESS;
359
360 /*
361 * Zero pages can be dummy pages for MMIO or reserved memory,
362 * so we need to check the flags before joining cause with
363 * shared page replacement.
364 */
365 case PGM_PAGE_STATE_ZERO:
366 if ( PGM_PAGE_IS_MMIO(pPage)
367 || PGM_PAGE_IS_RESERVED(pPage))
368 return VERR_PGM_PHYS_PAGE_RESERVED;
369 /* fall thru */
370 case PGM_PAGE_STATE_SHARED:
371 return pgmPhysAllocPage(pVM, pPage, GCPhys);
372 }
373}
374
375
376/**
377 * Maps a page into the current virtual address space so it can be accessed.
378 *
379 * @returns VBox status code.
380 * @retval VINF_SUCCESS on success.
381 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
382 *
383 * @param pVM The VM address.
384 * @param pPage The physical page tracking structure.
385 * @param GCPhys The address of the page.
386 * @param ppMap Where to store the address of the mapping tracking structure.
387 * @param ppv Where to store the mapping address of the page. The page
388 * offset is masked off!
389 *
390 * @remarks Called from within the PGM critical section.
391 */
392int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
393{
394#ifdef IN_GC
395 /*
396 * Just some sketchy GC code.
397 */
398 *ppMap = NULL;
399 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
400 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
401 return PGMGCDynMapHCPage(pVM, HCPhys, ppv);
402
403#else /* IN_RING3 || IN_RING0 */
404
405 /*
406 * Find/make Chunk TLB entry for the mapping chunk.
407 */
408 PPGMCHUNKR3MAP pMap;
409 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
410 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
411 if (pTlbe->idChunk == idChunk)
412 {
413 STAM_COUNTER_INC(&pVM->pgm.s.StatChunkR3MapTlbHits);
414 pMap = pTlbe->pChunk;
415 }
416 else if (idChunk != NIL_GMM_CHUNKID)
417 {
418 STAM_COUNTER_INC(&pVM->pgm.s.StatChunkR3MapTlbMisses);
419
420 /*
421 * Find the chunk, map it if necessary.
422 */
423 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
424 if (!pMap)
425 {
426#ifdef IN_RING0
427 int rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_MAP_CHUNK, idChunk);
428 AssertRCReturn(rc, rc);
429 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
430 Assert(pMap);
431#else
432 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
433 if (VBOX_FAILURE(rc))
434 return rc;
435#endif
436 }
437
438 /*
439 * Enter it into the Chunk TLB.
440 */
441 pTlbe->idChunk = idChunk;
442 pTlbe->pChunk = pMap;
443 pMap->iAge = 0;
444 }
445 else
446 {
447 Assert(PGM_PAGE_IS_ZERO(pPage));
448 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
449 *ppMap = NULL;
450 return VINF_SUCCESS;
451 }
452
453 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
454 *ppMap = pMap;
455 return VINF_SUCCESS;
456#endif /* IN_RING3 */
457}
458
459
460#ifndef IN_GC
461/**
462 * Load a guest page into the ring-3 physical TLB.
463 *
464 * @returns VBox status code.
465 * @retval VINF_SUCCESS on success
466 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
467 * @param pPGM The PGM instance pointer.
468 * @param GCPhys The guest physical address in question.
469 */
470int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
471{
472 STAM_COUNTER_INC(&pPGM->CTXMID(StatPage,MapTlbMisses));
473
474 /*
475 * Find the ram range.
476 * 99.8% of requests are expected to be in the first range.
477 */
478 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
479 RTGCPHYS off = GCPhys - pRam->GCPhys;
480 if (RT_UNLIKELY(off >= pRam->cb))
481 {
482 do
483 {
484 pRam = CTXSUFF(pRam->pNext);
485 if (!pRam)
486 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
487 off = GCPhys - pRam->GCPhys;
488 } while (off >= pRam->cb);
489 }
490
491 /*
492 * Map the page.
493 * Make a special case for the zero page as it is kind of special.
494 */
495 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
496 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
497 if (!PGM_PAGE_IS_ZERO(pPage))
498 {
499 void *pv;
500 PPGMPAGEMAP pMap;
501 int rc = pgmPhysPageMap(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
502 if (VBOX_FAILURE(rc))
503 return rc;
504 pTlbe->pMap = pMap;
505 pTlbe->pv = pv;
506 }
507 else
508 {
509 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
510 pTlbe->pMap = NULL;
511 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
512 }
513 pTlbe->pPage = pPage;
514 return VINF_SUCCESS;
515}
516#endif /* !IN_GC */
517
518
519/**
520 * Requests the mapping of a guest page into the current context.
521 *
522 * This API should only be used for very short term, as it will consume
523 * scarse resources (R0 and GC) in the mapping cache. When you're done
524 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
525 *
526 * This API will assume your intention is to write to the page, and will
527 * therefore replace shared and zero pages. If you do not intend to modify
528 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
529 *
530 * @returns VBox status code.
531 * @retval VINF_SUCCESS on success.
532 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
533 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
534 *
535 * @param pVM The VM handle.
536 * @param GCPhys The guest physical address of the page that should be mapped.
537 * @param ppv Where to store the address corresponding to GCPhys.
538 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
539 *
540 * @remark Avoid calling this API from within critical sections (other than
541 * the PGM one) because of the deadlock risk.
542 * @thread Any thread.
543 */
544PGMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
545{
546#ifdef NEW_PHYS_CODE
547#ifdef IN_GC
548 /* Until a physical TLB is implemented for GC, let PGMGCDynMapGCPageEx handle it. */
549 return PGMGCDynMapGCPageEx(pVM, GCPhys, ppv);
550#else
551 int rc = pgmLock(pVM);
552 AssertRCReturn(rc);
553
554 /*
555 * Query the Physical TLB entry for the page (may fail).
556 */
557 PGMPHYSTLBE pTlbe;
558 int rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
559 if (RT_SUCCESS(rc))
560 {
561 /*
562 * If the page is shared, the zero page, or being write monitored
563 * it must be converted to an page that's writable if possible.
564 */
565 PPGMPAGE pPage = pTlbe->pPage;
566 if (RT_UNLIKELY(pPage->u2State != PGM_PAGE_STATE_ALLOCATED))
567 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
568 if (RT_SUCCESS(rc))
569 {
570 /*
571 * Now, just perform the locking and calculate the return address.
572 */
573 PPGMPAGEMAP pMap = pTlbe->pMap;
574 pMap->cRefs++;
575 if (RT_LIKELY(pPage->cLocks != PGM_PAGE_MAX_LOCKS))
576 if (RT_UNLIKELY(++pPage->cLocks == PGM_PAGE_MAX_LOCKS))
577 {
578 AssertMsgFailed(("%VGp is entering permanent locked state!\n", GCPhys));
579 pMap->cRefs++; /* Extra ref to prevent it from going away. */
580 }
581
582 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
583 pLock->pvPage = pPage;
584 pLock->pvMap = pMap;
585 }
586 }
587
588 pgmUnlock(pVM);
589 return rc;
590
591#endif /* IN_RING3 || IN_RING0 */
592
593#else
594 /*
595 * Temporary fallback code.
596 */
597# ifdef IN_GC
598 return PGMGCDynMapGCPageEx(pVM, GCPhys, ppv);
599# else
600 return PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1, ppv);
601# endif
602#endif
603}
604
605
606/**
607 * Requests the mapping of a guest page into the current context.
608 *
609 * This API should only be used for very short term, as it will consume
610 * scarse resources (R0 and GC) in the mapping cache. When you're done
611 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
612 *
613 * @returns VBox status code.
614 * @retval VINF_SUCCESS on success.
615 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
616 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
617 *
618 * @param pVM The VM handle.
619 * @param GCPhys The guest physical address of the page that should be mapped.
620 * @param ppv Where to store the address corresponding to GCPhys.
621 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
622 *
623 * @remark Avoid calling this API from within critical sections (other than
624 * the PGM one) because of the deadlock risk.
625 * @thread Any thread.
626 */
627PGMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void * const *ppv, PPGMPAGEMAPLOCK pLock)
628{
629 /** @todo implement this */
630 return PGMPhysGCPhys2CCPtr(pVM, GCPhys, (void **)ppv, pLock);
631}
632
633
634/**
635 * Requests the mapping of a guest page given by virtual address into the current context.
636 *
637 * This API should only be used for very short term, as it will consume
638 * scarse resources (R0 and GC) in the mapping cache. When you're done
639 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
640 *
641 * This API will assume your intention is to write to the page, and will
642 * therefore replace shared and zero pages. If you do not intend to modify
643 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
644 *
645 * @returns VBox status code.
646 * @retval VINF_SUCCESS on success.
647 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
648 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
649 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
650 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
651 *
652 * @param pVM The VM handle.
653 * @param GCPhys The guest physical address of the page that should be mapped.
654 * @param ppv Where to store the address corresponding to GCPhys.
655 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
656 *
657 * @remark Avoid calling this API from within critical sections (other than
658 * the PGM one) because of the deadlock risk.
659 * @thread EMT
660 */
661PGMDECL(int) PGMPhysGCPtr2CCPtr(PVM pVM, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
662{
663 RTGCPHYS GCPhys;
664 int rc = PGMPhysGCPtr2GCPhys(pVM, GCPtr, &GCPhys);
665 if (VBOX_SUCCESS(rc))
666 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, pLock);
667 return rc;
668}
669
670
671/**
672 * Requests the mapping of a guest page given by virtual address into the current context.
673 *
674 * This API should only be used for very short term, as it will consume
675 * scarse resources (R0 and GC) in the mapping cache. When you're done
676 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
677 *
678 * @returns VBox status code.
679 * @retval VINF_SUCCESS on success.
680 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
681 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
682 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
683 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
684 *
685 * @param pVM The VM handle.
686 * @param GCPhys The guest physical address of the page that should be mapped.
687 * @param ppv Where to store the address corresponding to GCPhys.
688 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
689 *
690 * @remark Avoid calling this API from within critical sections (other than
691 * the PGM one) because of the deadlock risk.
692 * @thread EMT
693 */
694PGMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVM pVM, RTGCPTR GCPtr, void * const *ppv, PPGMPAGEMAPLOCK pLock)
695{
696 RTGCPHYS GCPhys;
697 int rc = PGMPhysGCPtr2GCPhys(pVM, GCPtr, &GCPhys);
698 if (VBOX_SUCCESS(rc))
699 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, pLock);
700 return rc;
701}
702
703
704/**
705 * Release the mapping of a guest page.
706 *
707 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
708 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
709 *
710 * @param pVM The VM handle.
711 * @param pLock The lock structure initialized by the mapping function.
712 */
713PGMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
714{
715#ifdef NEW_PHYS_CODE
716#ifdef IN_GC
717 /* currently nothing to do here. */
718/* --- postponed
719#elif defined(IN_RING0)
720*/
721
722#else /* IN_RING3 */
723 pgmLock(pVM);
724
725 PPGMPAGE pPage = (PPGMPAGE)pLock->pvPage;
726 Assert(pPage->cLocks >= 1);
727 if (pPage->cLocks != PGM_PAGE_MAX_LOCKS)
728 pPage->cLocks--;
729
730 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pLock->pvChunk;
731 Assert(pChunk->cRefs >= 1);
732 pChunk->cRefs--;
733 pChunk->iAge = 0;
734
735 pgmUnlock(pVM);
736#endif /* IN_RING3 */
737#else
738 NOREF(pVM);
739 NOREF(pLock);
740#endif
741}
742
743
744/**
745 * Converts a GC physical address to a HC pointer.
746 *
747 * @returns VINF_SUCCESS on success.
748 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
749 * page but has no physical backing.
750 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
751 * GC physical address.
752 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
753 * a dynamic ram chunk boundary
754 * @param pVM The VM handle.
755 * @param GCPhys The GC physical address to convert.
756 * @param cbRange Physical range
757 * @param pHCPtr Where to store the HC pointer on success.
758 */
759PGMDECL(int) PGMPhysGCPhys2HCPtr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTHCPTR pHCPtr)
760{
761#ifdef NEW_PHYS_CODE
762 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
763#endif
764
765#ifdef PGM_DYNAMIC_RAM_ALLOC
766 if ((GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK) != ((GCPhys+cbRange-1) & PGM_DYNAMIC_CHUNK_BASE_MASK))
767 {
768 AssertMsgFailed(("%VGp - %VGp crosses a chunk boundary!!\n", GCPhys, GCPhys+cbRange));
769 LogRel(("PGMPhysGCPhys2HCPtr %VGp - %VGp crosses a chunk boundary!!\n", GCPhys, GCPhys+cbRange));
770 return VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY;
771 }
772#endif
773
774 PPGMRAMRANGE pRam;
775 PPGMPAGE pPage;
776 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
777 if (VBOX_FAILURE(rc))
778 return rc;
779
780#ifndef PGM_IGNORE_RAM_FLAGS_RESERVED
781 if (RT_UNLIKELY(PGM_PAGE_IS_RESERVED(pPage)))
782 return VERR_PGM_PHYS_PAGE_RESERVED;
783#endif
784
785 RTGCPHYS off = GCPhys - pRam->GCPhys;
786 if (RT_UNLIKELY(off + cbRange > pRam->cb))
787 {
788 AssertMsgFailed(("%VGp - %VGp crosses a chunk boundary!!\n", GCPhys, GCPhys + cbRange));
789 return VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY;
790 }
791
792 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
793 {
794 unsigned iChunk = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
795 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
796 }
797 else if (RT_LIKELY(pRam->pvHC))
798 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
799 else
800 return VERR_PGM_PHYS_PAGE_RESERVED;
801 return VINF_SUCCESS;
802}
803
804
805/**
806 * Converts a guest pointer to a GC physical address.
807 *
808 * This uses the current CR3/CR0/CR4 of the guest.
809 *
810 * @returns VBox status code.
811 * @param pVM The VM Handle
812 * @param GCPtr The guest pointer to convert.
813 * @param pGCPhys Where to store the GC physical address.
814 */
815PGMDECL(int) PGMPhysGCPtr2GCPhys(PVM pVM, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
816{
817 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
818 if (pGCPhys && VBOX_SUCCESS(rc))
819 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
820 return rc;
821}
822
823
824/**
825 * Converts a guest pointer to a HC physical address.
826 *
827 * This uses the current CR3/CR0/CR4 of the guest.
828 *
829 * @returns VBox status code.
830 * @param pVM The VM Handle
831 * @param GCPtr The guest pointer to convert.
832 * @param pHCPhys Where to store the HC physical address.
833 */
834PGMDECL(int) PGMPhysGCPtr2HCPhys(PVM pVM, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
835{
836 RTGCPHYS GCPhys;
837 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
838 if (VBOX_SUCCESS(rc))
839 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
840 return rc;
841}
842
843
844/**
845 * Converts a guest pointer to a HC pointer.
846 *
847 * This uses the current CR3/CR0/CR4 of the guest.
848 *
849 * @returns VBox status code.
850 * @param pVM The VM Handle
851 * @param GCPtr The guest pointer to convert.
852 * @param pHCPtr Where to store the HC virtual address.
853 */
854PGMDECL(int) PGMPhysGCPtr2HCPtr(PVM pVM, RTGCPTR GCPtr, PRTHCPTR pHCPtr)
855{
856#ifdef NEW_PHYS_CODE
857 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
858#endif
859
860 RTGCPHYS GCPhys;
861 int rc = PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
862 if (VBOX_SUCCESS(rc))
863 rc = PGMPhysGCPhys2HCPtr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
864 return rc;
865}
866
867
868/**
869 * Converts a guest virtual address to a HC pointer by specfied CR3 and flags.
870 *
871 * @returns VBox status code.
872 * @param pVM The VM Handle
873 * @param GCPtr The guest pointer to convert.
874 * @param cr3 The guest CR3.
875 * @param fFlags Flags used for interpreting the PD correctly: X86_CR4_PSE and X86_CR4_PAE
876 * @param pHCPtr Where to store the HC pointer.
877 *
878 * @remark This function is used by the REM at a time where PGM could
879 * potentially not be in sync. It could also be used by a
880 * future DBGF API to cpu state independent conversions.
881 */
882PGMDECL(int) PGMPhysGCPtr2HCPtrByGstCR3(PVM pVM, RTGCPTR GCPtr, uint32_t cr3, unsigned fFlags, PRTHCPTR pHCPtr)
883{
884#ifdef NEW_PHYS_CODE
885 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
886#endif
887 /*
888 * PAE or 32-bit?
889 */
890 int rc;
891 if (!(fFlags & X86_CR4_PAE))
892 {
893 PX86PD pPD;
894 rc = PGM_GCPHYS_2_PTR(pVM, cr3 & X86_CR3_PAGE_MASK, &pPD);
895 if (VBOX_SUCCESS(rc))
896 {
897 VBOXPDE Pde = pPD->a[(RTGCUINTPTR)GCPtr >> X86_PD_SHIFT];
898 if (Pde.n.u1Present)
899 {
900 if ((fFlags & X86_CR4_PSE) && Pde.b.u1Size)
901 { /* (big page) */
902 rc = PGMPhysGCPhys2HCPtr(pVM, (Pde.u & X86_PDE4M_PG_MASK) | ((RTGCUINTPTR)GCPtr & X86_PAGE_4M_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
903 }
904 else
905 { /* (normal page) */
906 PVBOXPT pPT;
907 rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & X86_PDE_PG_MASK, &pPT);
908 if (VBOX_SUCCESS(rc))
909 {
910 VBOXPTE Pte = pPT->a[((RTGCUINTPTR)GCPtr >> X86_PT_SHIFT) & X86_PT_MASK];
911 if (Pte.n.u1Present)
912 return PGMPhysGCPhys2HCPtr(pVM, (Pte.u & X86_PTE_PG_MASK) | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
913 rc = VERR_PAGE_NOT_PRESENT;
914 }
915 }
916 }
917 else
918 rc = VERR_PAGE_TABLE_NOT_PRESENT;
919 }
920 }
921 else
922 {
923 /** @todo long mode! */
924 PX86PDPTR pPdptr;
925 rc = PGM_GCPHYS_2_PTR(pVM, cr3 & X86_CR3_PAE_PAGE_MASK, &pPdptr);
926 if (VBOX_SUCCESS(rc))
927 {
928 X86PDPE Pdpe = pPdptr->a[((RTGCUINTPTR)GCPtr >> X86_PDPTR_SHIFT) & X86_PDPTR_MASK];
929 if (Pdpe.n.u1Present)
930 {
931 PX86PDPAE pPD;
932 rc = PGM_GCPHYS_2_PTR(pVM, Pdpe.u & X86_PDPE_PG_MASK, &pPD);
933 if (VBOX_SUCCESS(rc))
934 {
935 X86PDEPAE Pde = pPD->a[((RTGCUINTPTR)GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK];
936 if (Pde.n.u1Present)
937 {
938 if ((fFlags & X86_CR4_PSE) && Pde.b.u1Size)
939 { /* (big page) */
940 rc = PGMPhysGCPhys2HCPtr(pVM, (Pde.u & X86_PDE4M_PAE_PG_MASK) | ((RTGCUINTPTR)GCPtr & X86_PAGE_4M_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
941 }
942 else
943 { /* (normal page) */
944 PX86PTPAE pPT;
945 rc = PGM_GCPHYS_2_PTR(pVM, (Pde.u & X86_PDE_PAE_PG_MASK), &pPT);
946 if (VBOX_SUCCESS(rc))
947 {
948 X86PTEPAE Pte = pPT->a[((RTGCUINTPTR)GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK];
949 if (Pte.n.u1Present)
950 return PGMPhysGCPhys2HCPtr(pVM, (Pte.u & X86_PTE_PAE_PG_MASK) | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pHCPtr);
951 rc = VERR_PAGE_NOT_PRESENT;
952 }
953 }
954 }
955 else
956 rc = VERR_PAGE_TABLE_NOT_PRESENT;
957 }
958 }
959 else
960 rc = VERR_PAGE_TABLE_NOT_PRESENT;
961 }
962 }
963 return rc;
964}
965
966
967#undef LOG_GROUP
968#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
969
970
971#ifdef IN_RING3
972/**
973 * Cache PGMPhys memory access
974 *
975 * @param pVM VM Handle.
976 * @param pCache Cache structure pointer
977 * @param GCPhys GC physical address
978 * @param pbHC HC pointer corresponding to physical page
979 *
980 * @thread EMT.
981 */
982static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbHC)
983{
984 uint32_t iCacheIndex;
985
986 GCPhys = PAGE_ADDRESS(GCPhys);
987 pbHC = (uint8_t *)PAGE_ADDRESS(pbHC);
988
989 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
990
991 ASMBitSet(&pCache->aEntries, iCacheIndex);
992
993 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
994 pCache->Entry[iCacheIndex].pbHC = pbHC;
995}
996#endif
997
998/**
999 * Read physical memory.
1000 *
1001 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
1002 * want to ignore those.
1003 *
1004 * @param pVM VM Handle.
1005 * @param GCPhys Physical address start reading from.
1006 * @param pvBuf Where to put the read bits.
1007 * @param cbRead How many bytes to read.
1008 */
1009PGMDECL(void) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1010{
1011#ifdef IN_RING3
1012 bool fGrabbedLock = false;
1013#endif
1014
1015 AssertMsg(cbRead > 0, ("don't even think about reading zero bytes!\n"));
1016 if (cbRead == 0)
1017 return;
1018
1019 LogFlow(("PGMPhysRead: %VGp %d\n", GCPhys, cbRead));
1020
1021#ifdef IN_RING3
1022 if (!VM_IS_EMT(pVM))
1023 {
1024 pgmLock(pVM);
1025 fGrabbedLock = true;
1026 }
1027#endif
1028
1029 /*
1030 * Copy loop on ram ranges.
1031 */
1032 PPGMRAMRANGE pCur = CTXSUFF(pVM->pgm.s.pRamRanges);
1033 for (;;)
1034 {
1035 /* Find range. */
1036 while (pCur && GCPhys > pCur->GCPhysLast)
1037 pCur = CTXSUFF(pCur->pNext);
1038 /* Inside range or not? */
1039 if (pCur && GCPhys >= pCur->GCPhys)
1040 {
1041 /*
1042 * Must work our way thru this page by page.
1043 */
1044 RTGCPHYS off = GCPhys - pCur->GCPhys;
1045 while (off < pCur->cb)
1046 {
1047 unsigned iPage = off >> PAGE_SHIFT;
1048 PPGMPAGE pPage = &pCur->aPages[iPage];
1049 size_t cb;
1050
1051 /* Physical chunk in dynamically allocated range not present? */
1052 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(pPage)))
1053 {
1054 /* Treat it as reserved; return zeros */
1055 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1056 if (cb >= cbRead)
1057 {
1058 memset(pvBuf, 0, cbRead);
1059 goto end;
1060 }
1061 memset(pvBuf, 0, cb);
1062 }
1063 else
1064 {
1065 switch (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_ROM)) /** @todo PAGE FLAGS */
1066 {
1067 /*
1068 * Normal memory or ROM.
1069 */
1070 case 0:
1071 case MM_RAM_FLAGS_ROM:
1072 case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_RESERVED:
1073 //case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2: /* = shadow */ - //MMIO2 isn't in the mask.
1074 case MM_RAM_FLAGS_PHYSICAL_WRITE:
1075 case MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_PHYSICAL_WRITE: // MMIO2 isn't in the mask.
1076 case MM_RAM_FLAGS_VIRTUAL_WRITE:
1077 {
1078#ifdef IN_GC
1079 void *pvSrc = NULL;
1080 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvSrc);
1081 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK);
1082#else
1083 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
1084#endif
1085 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1086 if (cb >= cbRead)
1087 {
1088#if defined(IN_RING3) && defined(PGM_PHYSMEMACCESS_CACHING)
1089 if (cbRead <= 4 && !fGrabbedLock /* i.e. EMT */)
1090 pgmPhysCacheAdd(pVM, &pVM->pgm.s.pgmphysreadcache, GCPhys, (uint8_t*)pvSrc);
1091#endif /* IN_RING3 && PGM_PHYSMEMACCESS_CACHING */
1092 memcpy(pvBuf, pvSrc, cbRead);
1093 goto end;
1094 }
1095 memcpy(pvBuf, pvSrc, cb);
1096 break;
1097 }
1098
1099 /*
1100 * All reserved, nothing there.
1101 */
1102 case MM_RAM_FLAGS_RESERVED:
1103 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1104 if (cb >= cbRead)
1105 {
1106 memset(pvBuf, 0, cbRead);
1107 goto end;
1108 }
1109 memset(pvBuf, 0, cb);
1110 break;
1111
1112 /*
1113 * Physical handler.
1114 */
1115 case MM_RAM_FLAGS_PHYSICAL_ALL:
1116 case MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_PHYSICAL_ALL: /** r=bird: MMIO2 isn't in the mask! */
1117 {
1118 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1119 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1120#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1121
1122 /* find and call the handler */
1123 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesHC->PhysHandlers, GCPhys);
1124 if (pNode && pNode->pfnHandlerR3)
1125 {
1126 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1127 if (cbRange < cb)
1128 cb = cbRange;
1129 if (cb > cbRead)
1130 cb = cbRead;
1131
1132 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
1133
1134 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1135 rc = pNode->pfnHandlerR3(pVM, GCPhys, pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pNode->pvUserR3);
1136 }
1137#endif /* IN_RING3 */
1138 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1139 {
1140#ifdef IN_GC
1141 void *pvSrc = NULL;
1142 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvSrc);
1143 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK);
1144#else
1145 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
1146#endif
1147
1148 if (cb >= cbRead)
1149 {
1150 memcpy(pvBuf, pvSrc, cbRead);
1151 goto end;
1152 }
1153 memcpy(pvBuf, pvSrc, cb);
1154 }
1155 else if (cb >= cbRead)
1156 goto end;
1157 break;
1158 }
1159
1160 case MM_RAM_FLAGS_VIRTUAL_ALL:
1161 {
1162 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1163 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1164#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1165 /* Search the whole tree for matching physical addresses (rather expensive!) */
1166 PPGMVIRTHANDLER pNode;
1167 unsigned iPage;
1168 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pNode, &iPage);
1169 if (VBOX_SUCCESS(rc2) && pNode->pfnHandlerHC)
1170 {
1171 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1172 if (cbRange < cb)
1173 cb = cbRange;
1174 if (cb > cbRead)
1175 cb = cbRead;
1176 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pNode->GCPtr & PAGE_BASE_GC_MASK)
1177 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
1178
1179 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
1180
1181 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1182 rc = pNode->pfnHandlerHC(pVM, (RTGCPTR)GCPtr, pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, 0);
1183 }
1184#endif /* IN_RING3 */
1185 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1186 {
1187#ifdef IN_GC
1188 void *pvSrc = NULL;
1189 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvSrc);
1190 pvSrc = (char *)pvSrc + (off & PAGE_OFFSET_MASK);
1191#else
1192 void *pvSrc = PGMRAMRANGE_GETHCPTR(pCur, off)
1193#endif
1194 if (cb >= cbRead)
1195 {
1196 memcpy(pvBuf, pvSrc, cbRead);
1197 goto end;
1198 }
1199 memcpy(pvBuf, pvSrc, cb);
1200 }
1201 else if (cb >= cbRead)
1202 goto end;
1203 break;
1204 }
1205
1206 /*
1207 * The rest needs to be taken more carefully.
1208 */
1209 default:
1210#if 1 /** @todo r=bird: Can you do this properly please. */
1211 /** @todo Try MMIO; quick hack */
1212 if (cbRead <= 4 && IOMMMIORead(pVM, GCPhys, (uint32_t *)pvBuf, cbRead) == VINF_SUCCESS)
1213 goto end;
1214#endif
1215
1216 /** @todo fix me later. */
1217 AssertReleaseMsgFailed(("Unknown read at %VGp size %d implement the complex physical reading case %x\n",
1218 GCPhys, cbRead,
1219 pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_ROM))); /** @todo PAGE FLAGS */
1220 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1221 break;
1222 }
1223 }
1224 cbRead -= cb;
1225 off += cb;
1226 pvBuf = (char *)pvBuf + cb;
1227 }
1228
1229 GCPhys = pCur->GCPhysLast + 1;
1230 }
1231 else
1232 {
1233 LogFlow(("PGMPhysRead: Unassigned %VGp size=%d\n", GCPhys, cbRead));
1234
1235 /*
1236 * Unassigned address space.
1237 */
1238 size_t cb;
1239 if ( !pCur
1240 || (cb = pCur->GCPhys - GCPhys) >= cbRead)
1241 {
1242 memset(pvBuf, 0, cbRead);
1243 goto end;
1244 }
1245
1246 memset(pvBuf, 0, cb);
1247 cbRead -= cb;
1248 pvBuf = (char *)pvBuf + cb;
1249 GCPhys += cb;
1250 }
1251 }
1252end:
1253#ifdef IN_RING3
1254 if (fGrabbedLock)
1255 pgmUnlock(pVM);
1256#endif
1257 return;
1258}
1259
1260/**
1261 * Write to physical memory.
1262 *
1263 * This API respects access handlers and MMIO. Use PGMPhysReadGCPhys() if you
1264 * want to ignore those.
1265 *
1266 * @param pVM VM Handle.
1267 * @param GCPhys Physical address to write to.
1268 * @param pvBuf What to write.
1269 * @param cbWrite How many bytes to write.
1270 */
1271PGMDECL(void) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
1272{
1273#ifdef IN_RING3
1274 bool fGrabbedLock = false;
1275#endif
1276
1277 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
1278 AssertMsg(cbWrite > 0, ("don't even think about writing zero bytes!\n"));
1279 if (cbWrite == 0)
1280 return;
1281
1282 LogFlow(("PGMPhysWrite: %VGp %d\n", GCPhys, cbWrite));
1283
1284#ifdef IN_RING3
1285 if (!VM_IS_EMT(pVM))
1286 {
1287 pgmLock(pVM);
1288 fGrabbedLock = true;
1289 }
1290#endif
1291 /*
1292 * Copy loop on ram ranges.
1293 */
1294 PPGMRAMRANGE pCur = CTXSUFF(pVM->pgm.s.pRamRanges);
1295 for (;;)
1296 {
1297 /* Find range. */
1298 while (pCur && GCPhys > pCur->GCPhysLast)
1299 pCur = CTXSUFF(pCur->pNext);
1300 /* Inside range or not? */
1301 if (pCur && GCPhys >= pCur->GCPhys)
1302 {
1303 /*
1304 * Must work our way thru this page by page.
1305 */
1306 unsigned off = GCPhys - pCur->GCPhys;
1307 while (off < pCur->cb)
1308 {
1309 unsigned iPage = off >> PAGE_SHIFT;
1310 PPGMPAGE pPage = &pCur->aPages[iPage];
1311
1312 /* Physical chunk in dynamically allocated range not present? */
1313 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(pPage)))
1314 {
1315 int rc;
1316#ifdef IN_RING3
1317 if (fGrabbedLock)
1318 {
1319 pgmUnlock(pVM);
1320 rc = pgmr3PhysGrowRange(pVM, GCPhys);
1321 if (rc == VINF_SUCCESS)
1322 PGMPhysWrite(pVM, GCPhys, pvBuf, cbWrite); /* try again; can't assume pCur is still valid (paranoia) */
1323 return;
1324 }
1325 rc = pgmr3PhysGrowRange(pVM, GCPhys);
1326#else
1327 rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
1328#endif
1329 if (rc != VINF_SUCCESS)
1330 goto end;
1331 }
1332
1333 size_t cb;
1334 /** @todo r=bird: missing MM_RAM_FLAGS_ROM here, we shall not allow anyone to overwrite the ROM! */
1335 switch (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_VIRTUAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_PHYSICAL_WRITE)) /** @todo PAGE FLAGS */
1336 {
1337 /*
1338 * Normal memory, MMIO2 or writable shadow ROM.
1339 */
1340 case 0:
1341 case MM_RAM_FLAGS_MMIO2:
1342 case MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2: /* shadow rom */
1343 {
1344#ifdef IN_GC
1345 void *pvDst = NULL;
1346 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst);
1347 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK);
1348#else
1349 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1350#endif
1351 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1352 if (cb >= cbWrite)
1353 {
1354#if defined(IN_RING3) && defined(PGM_PHYSMEMACCESS_CACHING)
1355 if (cbWrite <= 4 && !fGrabbedLock /* i.e. EMT */)
1356 pgmPhysCacheAdd(pVM, &pVM->pgm.s.pgmphyswritecache, GCPhys, (uint8_t*)pvDst);
1357#endif /* IN_RING3 && PGM_PHYSMEMACCESS_CACHING */
1358 memcpy(pvDst, pvBuf, cbWrite);
1359 goto end;
1360 }
1361 memcpy(pvDst, pvBuf, cb);
1362 break;
1363 }
1364
1365 /*
1366 * All reserved, nothing there.
1367 */
1368 case MM_RAM_FLAGS_RESERVED:
1369 case MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO2:
1370 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1371 if (cb >= cbWrite)
1372 goto end;
1373 break;
1374
1375 /*
1376 * Physical handler.
1377 */
1378 case MM_RAM_FLAGS_PHYSICAL_ALL:
1379 case MM_RAM_FLAGS_PHYSICAL_WRITE:
1380 case MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_PHYSICAL_ALL:
1381 case MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_PHYSICAL_WRITE:
1382 {
1383 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1384 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1385#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1386 /* find and call the handler */
1387 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesHC->PhysHandlers, GCPhys);
1388 if (pNode && pNode->pfnHandlerR3)
1389 {
1390 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1391 if (cbRange < cb)
1392 cb = cbRange;
1393 if (cb > cbWrite)
1394 cb = cbWrite;
1395
1396 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1397
1398 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1399 rc = pNode->pfnHandlerR3(pVM, GCPhys, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, pNode->pvUserR3);
1400 }
1401#endif /* IN_RING3 */
1402 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1403 {
1404#ifdef IN_GC
1405 void *pvDst = NULL;
1406 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst);
1407 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK);
1408#else
1409 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1410#endif
1411 if (cb >= cbWrite)
1412 {
1413 memcpy(pvDst, pvBuf, cbWrite);
1414 goto end;
1415 }
1416 memcpy(pvDst, pvBuf, cb);
1417 }
1418 else if (cb >= cbWrite)
1419 goto end;
1420 break;
1421 }
1422
1423 case MM_RAM_FLAGS_VIRTUAL_ALL:
1424 case MM_RAM_FLAGS_VIRTUAL_WRITE:
1425 {
1426 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1427 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1428#ifdef IN_RING3
1429/** @todo deal with this in GC and R0! */
1430 /* Search the whole tree for matching physical addresses (rather expensive!) */
1431 PPGMVIRTHANDLER pNode;
1432 unsigned iPage;
1433 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pNode, &iPage);
1434 if (VBOX_SUCCESS(rc2) && pNode->pfnHandlerHC)
1435 {
1436 size_t cbRange = pNode->Core.KeyLast - GCPhys + 1;
1437 if (cbRange < cb)
1438 cb = cbRange;
1439 if (cb > cbWrite)
1440 cb = cbWrite;
1441 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pNode->GCPtr & PAGE_BASE_GC_MASK)
1442 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
1443
1444 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1445
1446 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1447 rc = pNode->pfnHandlerHC(pVM, (RTGCPTR)GCPtr, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, 0);
1448 }
1449#endif /* IN_RING3 */
1450 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1451 {
1452#ifdef IN_GC
1453 void *pvDst = NULL;
1454 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst);
1455 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK);
1456#else
1457 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1458#endif
1459 if (cb >= cbWrite)
1460 {
1461 memcpy(pvDst, pvBuf, cbWrite);
1462 goto end;
1463 }
1464 memcpy(pvDst, pvBuf, cb);
1465 }
1466 else if (cb >= cbWrite)
1467 goto end;
1468 break;
1469 }
1470
1471 /*
1472 * Physical write handler + virtual write handler.
1473 * Consider this a quick workaround for the CSAM + shadow caching problem.
1474 *
1475 * We hand it to the shadow caching first since it requires the unchanged
1476 * data. CSAM will have to put up with it already being changed.
1477 */
1478 case MM_RAM_FLAGS_PHYSICAL_WRITE | MM_RAM_FLAGS_VIRTUAL_WRITE:
1479 {
1480 int rc = VINF_PGM_HANDLER_DO_DEFAULT;
1481 cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1482#ifdef IN_RING3 /** @todo deal with this in GC and R0! */
1483 /* 1. The physical handler */
1484 PPGMPHYSHANDLER pPhysNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesHC->PhysHandlers, GCPhys);
1485 if (pPhysNode && pPhysNode->pfnHandlerR3)
1486 {
1487 size_t cbRange = pPhysNode->Core.KeyLast - GCPhys + 1;
1488 if (cbRange < cb)
1489 cb = cbRange;
1490 if (cb > cbWrite)
1491 cb = cbWrite;
1492
1493 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1494
1495 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1496 rc = pPhysNode->pfnHandlerR3(pVM, GCPhys, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, pPhysNode->pvUserR3);
1497 }
1498
1499 /* 2. The virtual handler (will see incorrect data) */
1500 PPGMVIRTHANDLER pVirtNode;
1501 unsigned iPage;
1502 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirtNode, &iPage);
1503 if (VBOX_SUCCESS(rc2) && pVirtNode->pfnHandlerHC)
1504 {
1505 size_t cbRange = pVirtNode->Core.KeyLast - GCPhys + 1;
1506 if (cbRange < cb)
1507 cb = cbRange;
1508 if (cb > cbWrite)
1509 cb = cbWrite;
1510 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirtNode->GCPtr & PAGE_BASE_GC_MASK)
1511 + (iPage << PAGE_SHIFT) + (off & PAGE_OFFSET_MASK);
1512
1513 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1514
1515 /** @note Dangerous assumption that HC handlers don't do anything that really requires an EMT lock! */
1516 rc2 = pVirtNode->pfnHandlerHC(pVM, (RTGCPTR)GCPtr, pvDst, (void *)pvBuf, cb, PGMACCESSTYPE_WRITE, 0);
1517 if ( ( rc2 != VINF_PGM_HANDLER_DO_DEFAULT
1518 && rc == VINF_PGM_HANDLER_DO_DEFAULT)
1519 || ( VBOX_FAILURE(rc2)
1520 && VBOX_SUCCESS(rc)))
1521 rc = rc2;
1522 }
1523#endif /* IN_RING3 */
1524 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1525 {
1526#ifdef IN_GC
1527 void *pvDst = NULL;
1528 PGMGCDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), &pvDst);
1529 pvDst = (char *)pvDst + (off & PAGE_OFFSET_MASK);
1530#else
1531 void *pvDst = PGMRAMRANGE_GETHCPTR(pCur, off)
1532#endif
1533 if (cb >= cbWrite)
1534 {
1535 memcpy(pvDst, pvBuf, cbWrite);
1536 goto end;
1537 }
1538 memcpy(pvDst, pvBuf, cb);
1539 }
1540 else if (cb >= cbWrite)
1541 goto end;
1542 break;
1543 }
1544
1545
1546 /*
1547 * The rest needs to be taken more carefully.
1548 */
1549 default:
1550#if 1 /** @todo r=bird: Can you do this properly please. */
1551 /** @todo Try MMIO; quick hack */
1552 if (cbWrite <= 4 && IOMMMIOWrite(pVM, GCPhys, *(uint32_t *)pvBuf, cbWrite) == VINF_SUCCESS)
1553 goto end;
1554#endif
1555
1556 /** @todo fix me later. */
1557 AssertReleaseMsgFailed(("Unknown write at %VGp size %d implement the complex physical writing case %x\n",
1558 GCPhys, cbWrite,
1559 (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_VIRTUAL_WRITE | MM_RAM_FLAGS_PHYSICAL_ALL | MM_RAM_FLAGS_PHYSICAL_WRITE)))); /** @todo PAGE FLAGS */
1560 /* skip the write */
1561 cb = cbWrite;
1562 break;
1563 }
1564
1565 cbWrite -= cb;
1566 off += cb;
1567 pvBuf = (const char *)pvBuf + cb;
1568 }
1569
1570 GCPhys = pCur->GCPhysLast + 1;
1571 }
1572 else
1573 {
1574 /*
1575 * Unassigned address space.
1576 */
1577 size_t cb;
1578 if ( !pCur
1579 || (cb = pCur->GCPhys - GCPhys) >= cbWrite)
1580 goto end;
1581
1582 cbWrite -= cb;
1583 pvBuf = (const char *)pvBuf + cb;
1584 GCPhys += cb;
1585 }
1586 }
1587end:
1588#ifdef IN_RING3
1589 if (fGrabbedLock)
1590 pgmUnlock(pVM);
1591#endif
1592 return;
1593}
1594
1595#ifndef IN_GC /* Ring 0 & 3 only */
1596
1597/**
1598 * Read from guest physical memory by GC physical address, bypassing
1599 * MMIO and access handlers.
1600 *
1601 * @returns VBox status.
1602 * @param pVM VM handle.
1603 * @param pvDst The destination address.
1604 * @param GCPhysSrc The source address (GC physical address).
1605 * @param cb The number of bytes to read.
1606 */
1607PGMDECL(int) PGMPhysReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
1608{
1609 /*
1610 * Anything to be done?
1611 */
1612 if (!cb)
1613 return VINF_SUCCESS;
1614
1615 /*
1616 * Loop ram ranges.
1617 */
1618 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
1619 pRam;
1620 pRam = pRam->CTXSUFF(pNext))
1621 {
1622 RTGCPHYS off = GCPhysSrc - pRam->GCPhys;
1623 if (off < pRam->cb)
1624 {
1625 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1626 {
1627 /* Copy page by page as we're not dealing with a linear HC range. */
1628 for (;;)
1629 {
1630 /* convert */
1631 void *pvSrc;
1632 int rc = pgmRamGCPhys2HCPtrWithRange(pVM, pRam, GCPhysSrc, &pvSrc);
1633 if (VBOX_FAILURE(rc))
1634 return rc;
1635
1636 /* copy */
1637 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPhysSrc & PAGE_OFFSET_MASK);
1638 if (cbRead >= cb)
1639 {
1640 memcpy(pvDst, pvSrc, cb);
1641 return VINF_SUCCESS;
1642 }
1643 memcpy(pvDst, pvSrc, cbRead);
1644
1645 /* next */
1646 cb -= cbRead;
1647 pvDst = (uint8_t *)pvDst + cbRead;
1648 GCPhysSrc += cbRead;
1649 }
1650 }
1651 else if (pRam->pvHC)
1652 {
1653 /* read */
1654 size_t cbRead = pRam->cb - off;
1655 if (cbRead >= cb)
1656 {
1657 memcpy(pvDst, (uint8_t *)pRam->pvHC + off, cb);
1658 return VINF_SUCCESS;
1659 }
1660 memcpy(pvDst, (uint8_t *)pRam->pvHC + off, cbRead);
1661
1662 /* next */
1663 cb -= cbRead;
1664 pvDst = (uint8_t *)pvDst + cbRead;
1665 GCPhysSrc += cbRead;
1666 }
1667 else
1668 return VERR_PGM_PHYS_PAGE_RESERVED;
1669 }
1670 else if (GCPhysSrc < pRam->GCPhysLast)
1671 break;
1672 }
1673 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1674}
1675
1676
1677/**
1678 * Write to guest physical memory referenced by GC pointer.
1679 * Write memory to GC physical address in guest physical memory.
1680 *
1681 * This will bypass MMIO and access handlers.
1682 *
1683 * @returns VBox status.
1684 * @param pVM VM handle.
1685 * @param GCPhysDst The GC physical address of the destination.
1686 * @param pvSrc The source buffer.
1687 * @param cb The number of bytes to write.
1688 */
1689PGMDECL(int) PGMPhysWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
1690{
1691 /*
1692 * Anything to be done?
1693 */
1694 if (!cb)
1695 return VINF_SUCCESS;
1696
1697 LogFlow(("PGMPhysWriteGCPhys: %VGp %d\n", GCPhysDst, cb));
1698
1699 /*
1700 * Loop ram ranges.
1701 */
1702 for (PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
1703 pRam;
1704 pRam = pRam->CTXSUFF(pNext))
1705 {
1706 RTGCPHYS off = GCPhysDst - pRam->GCPhys;
1707 if (off < pRam->cb)
1708 {
1709#ifdef NEW_PHYS_CODE
1710/** @todo PGMRamGCPhys2HCPtrWithRange. */
1711#endif
1712 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1713 {
1714 /* Copy page by page as we're not dealing with a linear HC range. */
1715 for (;;)
1716 {
1717 /* convert */
1718 void *pvDst;
1719 int rc = pgmRamGCPhys2HCPtrWithRange(pVM, pRam, GCPhysDst, &pvDst);
1720 if (VBOX_FAILURE(rc))
1721 return rc;
1722
1723 /* copy */
1724 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPhysDst & PAGE_OFFSET_MASK);
1725 if (cbWrite >= cb)
1726 {
1727 memcpy(pvDst, pvSrc, cb);
1728 return VINF_SUCCESS;
1729 }
1730 memcpy(pvDst, pvSrc, cbWrite);
1731
1732 /* next */
1733 cb -= cbWrite;
1734 pvSrc = (uint8_t *)pvSrc + cbWrite;
1735 GCPhysDst += cbWrite;
1736 }
1737 }
1738 else if (pRam->pvHC)
1739 {
1740 /* write */
1741 size_t cbWrite = pRam->cb - off;
1742 if (cbWrite >= cb)
1743 {
1744 memcpy((uint8_t *)pRam->pvHC + off, pvSrc, cb);
1745 return VINF_SUCCESS;
1746 }
1747 memcpy((uint8_t *)pRam->pvHC + off, pvSrc, cbWrite);
1748
1749 /* next */
1750 cb -= cbWrite;
1751 GCPhysDst += cbWrite;
1752 pvSrc = (uint8_t *)pvSrc + cbWrite;
1753 }
1754 else
1755 return VERR_PGM_PHYS_PAGE_RESERVED;
1756 }
1757 else if (GCPhysDst < pRam->GCPhysLast)
1758 break;
1759 }
1760 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1761}
1762
1763
1764/**
1765 * Read from guest physical memory referenced by GC pointer.
1766 *
1767 * This function uses the current CR3/CR0/CR4 of the guest and will
1768 * bypass access handlers and not set any accessed bits.
1769 *
1770 * @returns VBox status.
1771 * @param pVM VM handle.
1772 * @param pvDst The destination address.
1773 * @param GCPtrSrc The source address (GC pointer).
1774 * @param cb The number of bytes to read.
1775 */
1776PGMDECL(int) PGMPhysReadGCPtr(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
1777{
1778 /*
1779 * Anything to do?
1780 */
1781 if (!cb)
1782 return VINF_SUCCESS;
1783
1784 /*
1785 * Optimize reads within a single page.
1786 */
1787 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
1788 {
1789 void *pvSrc;
1790 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrSrc, &pvSrc);
1791 if (VBOX_FAILURE(rc))
1792 return rc;
1793 memcpy(pvDst, pvSrc, cb);
1794 return VINF_SUCCESS;
1795 }
1796
1797 /*
1798 * Page by page.
1799 */
1800 for (;;)
1801 {
1802 /* convert */
1803 void *pvSrc;
1804 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrSrc, &pvSrc);
1805 if (VBOX_FAILURE(rc))
1806 return rc;
1807
1808 /* copy */
1809 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
1810 if (cbRead >= cb)
1811 {
1812 memcpy(pvDst, pvSrc, cb);
1813 return VINF_SUCCESS;
1814 }
1815 memcpy(pvDst, pvSrc, cbRead);
1816
1817 /* next */
1818 cb -= cbRead;
1819 pvDst = (uint8_t *)pvDst + cbRead;
1820 GCPtrSrc += cbRead;
1821 }
1822}
1823
1824
1825/**
1826 * Write to guest physical memory referenced by GC pointer.
1827 *
1828 * This function uses the current CR3/CR0/CR4 of the guest and will
1829 * bypass access handlers and not set dirty or accessed bits.
1830 *
1831 * @returns VBox status.
1832 * @param pVM VM handle.
1833 * @param GCPtrDst The destination address (GC pointer).
1834 * @param pvSrc The source address.
1835 * @param cb The number of bytes to write.
1836 */
1837PGMDECL(int) PGMPhysWriteGCPtr(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
1838{
1839 /*
1840 * Anything to do?
1841 */
1842 if (!cb)
1843 return VINF_SUCCESS;
1844
1845 LogFlow(("PGMPhysWriteGCPtr: %VGv %d\n", GCPtrDst, cb));
1846
1847 /*
1848 * Optimize writes within a single page.
1849 */
1850 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
1851 {
1852 void *pvDst;
1853 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrDst, &pvDst);
1854 if (VBOX_FAILURE(rc))
1855 return rc;
1856 memcpy(pvDst, pvSrc, cb);
1857 return VINF_SUCCESS;
1858 }
1859
1860 /*
1861 * Page by page.
1862 */
1863 for (;;)
1864 {
1865 /* convert */
1866 void *pvDst;
1867 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrDst, &pvDst);
1868 if (VBOX_FAILURE(rc))
1869 return rc;
1870
1871 /* copy */
1872 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
1873 if (cbWrite >= cb)
1874 {
1875 memcpy(pvDst, pvSrc, cb);
1876 return VINF_SUCCESS;
1877 }
1878 memcpy(pvDst, pvSrc, cbWrite);
1879
1880 /* next */
1881 cb -= cbWrite;
1882 pvSrc = (uint8_t *)pvSrc + cbWrite;
1883 GCPtrDst += cbWrite;
1884 }
1885}
1886
1887/**
1888 * Read from guest physical memory referenced by GC pointer.
1889 *
1890 * This function uses the current CR3/CR0/CR4 of the guest and will
1891 * respect access handlers and set accessed bits.
1892 *
1893 * @returns VBox status.
1894 * @param pVM VM handle.
1895 * @param pvDst The destination address.
1896 * @param GCPtrSrc The source address (GC pointer).
1897 * @param cb The number of bytes to read.
1898 */
1899/** @todo use the PGMPhysReadGCPtr name and rename the unsafe one to something appropriate */
1900PGMDECL(int) PGMPhysReadGCPtrSafe(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
1901{
1902 RTGCPHYS GCPhys;
1903 int rc;
1904
1905 /*
1906 * Anything to do?
1907 */
1908 if (!cb)
1909 return VINF_SUCCESS;
1910
1911 LogFlow(("PGMPhysReadGCPtrSafe: %VGv %d\n", GCPtrSrc, cb));
1912
1913 /*
1914 * Optimize reads within a single page.
1915 */
1916 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
1917 {
1918 /* Convert virtual to physical address */
1919 rc = PGMPhysGCPtr2GCPhys(pVM, GCPtrSrc, &GCPhys);
1920 AssertRCReturn(rc, rc);
1921
1922 /* mark the guest page as accessed. */
1923 rc = PGMGstModifyPage(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
1924 AssertRC(rc);
1925
1926 PGMPhysRead(pVM, GCPhys, pvDst, cb);
1927 return VINF_SUCCESS;
1928 }
1929
1930 /*
1931 * Page by page.
1932 */
1933 for (;;)
1934 {
1935 /* Convert virtual to physical address */
1936 rc = PGMPhysGCPtr2GCPhys(pVM, GCPtrSrc, &GCPhys);
1937 AssertRCReturn(rc, rc);
1938
1939 /* mark the guest page as accessed. */
1940 int rc = PGMGstModifyPage(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
1941 AssertRC(rc);
1942
1943 /* copy */
1944 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
1945 if (cbRead >= cb)
1946 {
1947 PGMPhysRead(pVM, GCPhys, pvDst, cb);
1948 return VINF_SUCCESS;
1949 }
1950 PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
1951
1952 /* next */
1953 cb -= cbRead;
1954 pvDst = (uint8_t *)pvDst + cbRead;
1955 GCPtrSrc += cbRead;
1956 }
1957}
1958
1959
1960/**
1961 * Write to guest physical memory referenced by GC pointer.
1962 *
1963 * This function uses the current CR3/CR0/CR4 of the guest and will
1964 * respect access handlers and set dirty and accessed bits.
1965 *
1966 * @returns VBox status.
1967 * @param pVM VM handle.
1968 * @param GCPtrDst The destination address (GC pointer).
1969 * @param pvSrc The source address.
1970 * @param cb The number of bytes to write.
1971 */
1972/** @todo use the PGMPhysWriteGCPtr name and rename the unsafe one to something appropriate */
1973PGMDECL(int) PGMPhysWriteGCPtrSafe(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
1974{
1975 RTGCPHYS GCPhys;
1976 int rc;
1977
1978 /*
1979 * Anything to do?
1980 */
1981 if (!cb)
1982 return VINF_SUCCESS;
1983
1984 LogFlow(("PGMPhysWriteGCPtrSafe: %VGv %d\n", GCPtrDst, cb));
1985
1986 /*
1987 * Optimize writes within a single page.
1988 */
1989 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
1990 {
1991 /* Convert virtual to physical address */
1992 rc = PGMPhysGCPtr2GCPhys(pVM, GCPtrDst, &GCPhys);
1993 AssertRCReturn(rc, rc);
1994
1995 /* mark the guest page as accessed and dirty. */
1996 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
1997 AssertRC(rc);
1998
1999 PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2000 return VINF_SUCCESS;
2001 }
2002
2003 /*
2004 * Page by page.
2005 */
2006 for (;;)
2007 {
2008 /* Convert virtual to physical address */
2009 rc = PGMPhysGCPtr2GCPhys(pVM, GCPtrDst, &GCPhys);
2010 AssertRCReturn(rc, rc);
2011
2012 /* mark the guest page as accessed and dirty. */
2013 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2014 AssertRC(rc);
2015
2016 /* copy */
2017 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2018 if (cbWrite >= cb)
2019 {
2020 PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2021 return VINF_SUCCESS;
2022 }
2023 PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
2024
2025 /* next */
2026 cb -= cbWrite;
2027 pvSrc = (uint8_t *)pvSrc + cbWrite;
2028 GCPtrDst += cbWrite;
2029 }
2030}
2031
2032/**
2033 * Write to guest physical memory referenced by GC pointer and update the PTE.
2034 *
2035 * This function uses the current CR3/CR0/CR4 of the guest and will
2036 * bypass access handlers and set any dirty and accessed bits in the PTE.
2037 *
2038 * If you don't want to set the dirty bit, use PGMPhysWriteGCPtr().
2039 *
2040 * @returns VBox status.
2041 * @param pVM VM handle.
2042 * @param GCPtrDst The destination address (GC pointer).
2043 * @param pvSrc The source address.
2044 * @param cb The number of bytes to write.
2045 */
2046PGMDECL(int) PGMPhysWriteGCPtrDirty(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2047{
2048 /*
2049 * Anything to do?
2050 */
2051 if (!cb)
2052 return VINF_SUCCESS;
2053
2054 /*
2055 * Optimize writes within a single page.
2056 */
2057 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2058 {
2059 void *pvDst;
2060 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrDst, &pvDst);
2061 if (VBOX_FAILURE(rc))
2062 return rc;
2063 memcpy(pvDst, pvSrc, cb);
2064 rc = PGMGstModifyPage(pVM, GCPtrDst, cb, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2065 AssertRC(rc);
2066 return VINF_SUCCESS;
2067 }
2068
2069 /*
2070 * Page by page.
2071 */
2072 for (;;)
2073 {
2074 /* convert */
2075 void *pvDst;
2076 int rc = PGMPhysGCPtr2HCPtr(pVM, GCPtrDst, &pvDst);
2077 if (VBOX_FAILURE(rc))
2078 return rc;
2079
2080 /* mark the guest page as accessed and dirty. */
2081 rc = PGMGstModifyPage(pVM, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2082 AssertRC(rc);
2083
2084 /* copy */
2085 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2086 if (cbWrite >= cb)
2087 {
2088 memcpy(pvDst, pvSrc, cb);
2089 return VINF_SUCCESS;
2090 }
2091 memcpy(pvDst, pvSrc, cbWrite);
2092
2093 /* next */
2094 cb -= cbWrite;
2095 GCPtrDst += cbWrite;
2096 pvSrc = (char *)pvSrc + cbWrite;
2097 }
2098}
2099
2100#endif /* !IN_GC */
2101
2102
2103
2104/**
2105 * Performs a read of guest virtual memory for instruction emulation.
2106 *
2107 * This will check permissions, raise exceptions and update the access bits.
2108 *
2109 * The current implementation will bypass all access handlers. It may later be
2110 * changed to at least respect MMIO.
2111 *
2112 *
2113 * @returns VBox status code suitable to scheduling.
2114 * @retval VINF_SUCCESS if the read was performed successfully.
2115 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2116 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2117 *
2118 * @param pVM The VM handle.
2119 * @param pCtxCore The context core.
2120 * @param pvDst Where to put the bytes we've read.
2121 * @param GCPtrSrc The source address.
2122 * @param cb The number of bytes to read. Not more than a page.
2123 *
2124 * @remark This function will dynamically map physical pages in GC. This may unmap
2125 * mappings done by the caller. Be careful!
2126 */
2127PGMDECL(int) PGMPhysInterpretedRead(PVM pVM, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
2128{
2129 Assert(cb <= PAGE_SIZE);
2130
2131/** @todo r=bird: This isn't perfect!
2132 * -# It's not checking for reserved bits being 1.
2133 * -# It's not correctly dealing with the access bit.
2134 * -# It's not respecting MMIO memory or any other access handlers.
2135 */
2136 /*
2137 * 1. Translate virtual to physical. This may fault.
2138 * 2. Map the physical address.
2139 * 3. Do the read operation.
2140 * 4. Set access bits if required.
2141 */
2142 int rc;
2143 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
2144 if (cb <= cb1)
2145 {
2146 /*
2147 * Not crossing pages.
2148 */
2149 RTGCPHYS GCPhys;
2150 uint64_t fFlags;
2151 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags, &GCPhys);
2152 if (VBOX_SUCCESS(rc))
2153 {
2154 /** @todo we should check reserved bits ... */
2155 void *pvSrc;
2156 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
2157 switch (rc)
2158 {
2159 case VINF_SUCCESS:
2160Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
2161 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
2162 break;
2163 case VERR_PGM_PHYS_PAGE_RESERVED:
2164 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2165 memset(pvDst, 0, cb);
2166 break;
2167 default:
2168 return rc;
2169 }
2170
2171 /** @todo access bit emulation isn't 100% correct. */
2172 if (!(fFlags & X86_PTE_A))
2173 {
2174 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2175 AssertRC(rc);
2176 }
2177 return VINF_SUCCESS;
2178 }
2179 }
2180 else
2181 {
2182 /*
2183 * Crosses pages.
2184 */
2185 unsigned cb2 = cb - cb1;
2186 uint64_t fFlags1;
2187 RTGCPHYS GCPhys1;
2188 uint64_t fFlags2;
2189 RTGCPHYS GCPhys2;
2190 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc, &fFlags1, &GCPhys1);
2191 if (VBOX_SUCCESS(rc))
2192 rc = PGM_GST_PFN(GetPage,pVM)(pVM, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
2193 if (VBOX_SUCCESS(rc))
2194 {
2195 /** @todo we should check reserved bits ... */
2196AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%VGv\n", cb, cb1, cb2, GCPtrSrc));
2197 void *pvSrc1;
2198 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
2199 switch (rc)
2200 {
2201 case VINF_SUCCESS:
2202 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
2203 break;
2204 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2205 memset(pvDst, 0, cb1);
2206 break;
2207 default:
2208 return rc;
2209 }
2210
2211 void *pvSrc2;
2212 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
2213 switch (rc)
2214 {
2215 case VINF_SUCCESS:
2216 memcpy((uint8_t *)pvDst + cb2, pvSrc2, cb2);
2217 break;
2218 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2219 memset((uint8_t *)pvDst + cb2, 0, cb2);
2220 break;
2221 default:
2222 return rc;
2223 }
2224
2225 if (!(fFlags1 & X86_PTE_A))
2226 {
2227 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2228 AssertRC(rc);
2229 }
2230 if (!(fFlags2 & X86_PTE_A))
2231 {
2232 rc = PGM_GST_PFN(ModifyPage,pVM)(pVM, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2233 AssertRC(rc);
2234 }
2235 return VINF_SUCCESS;
2236 }
2237 }
2238
2239 /*
2240 * Raise a #PF.
2241 */
2242 uint32_t uErr;
2243
2244 /* Get the current privilege level. */
2245 uint32_t cpl = CPUMGetGuestCPL(pVM, pCtxCore);
2246 switch (rc)
2247 {
2248 case VINF_SUCCESS:
2249 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
2250 break;
2251
2252 case VERR_PAGE_NOT_PRESENT:
2253 case VERR_PAGE_TABLE_NOT_PRESENT:
2254 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
2255 break;
2256
2257 default:
2258 AssertMsgFailed(("rc=%Vrc GCPtrSrc=%VGv cb=%#x\n", rc, GCPtrSrc, cb));
2259 return rc;
2260 }
2261 Log(("PGMPhysInterpretedRead: GCPtrSrc=%VGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
2262 return TRPMRaiseXcptErrCR2(pVM, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
2263}
2264
2265/// @todo PGMDECL(int) PGMPhysInterpretedWrite(PVM pVM, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2266
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette