VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 104821

Last change on this file since 104821 was 104767, checked in by vboxsync, 6 months ago

VMM/PGM,IOM,PDM: MMIO cleanups. bugref:10687

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 19.2 KB
Line 
1/* $Id: PGMAllGst.h 104767 2024-05-23 12:03:04Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Internal Functions *
31*********************************************************************************************************************************/
32RT_C_DECLS_BEGIN
33/** @todo Do we really need any of these forward declarations? */
34#if PGM_GST_TYPE == PGM_TYPE_32BIT \
35 || PGM_GST_TYPE == PGM_TYPE_PAE \
36 || PGM_GST_TYPE == PGM_TYPE_AMD64
37DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk);
38#endif
39PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3);
40PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk);
41PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
42PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu);
43
44#ifdef IN_RING3 /* r3 only for now. */
45PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta);
46#endif
47RT_C_DECLS_END
48
49
50/**
51 * Enters the guest mode.
52 *
53 * @returns VBox status code.
54 * @param pVCpu The cross context virtual CPU structure.
55 * @param GCPhysCR3 The physical address from the CR3 register.
56 */
57PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3)
58{
59 /*
60 * Map and monitor CR3
61 */
62 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
63 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
64 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
65 return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
66}
67
68
69/**
70 * Exits the guest mode.
71 *
72 * @returns VBox status code.
73 * @param pVCpu The cross context virtual CPU structure.
74 */
75PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu)
76{
77 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
78 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
79 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
80 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
81}
82
83
84#if PGM_GST_TYPE == PGM_TYPE_32BIT \
85 || PGM_GST_TYPE == PGM_TYPE_PAE \
86 || PGM_GST_TYPE == PGM_TYPE_AMD64
87
88
89DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
90{
91 NOREF(iLevel); NOREF(pVCpu);
92 pWalk->fNotPresent = true;
93 pWalk->uLevel = (uint8_t)iLevel;
94 return VERR_PAGE_TABLE_NOT_PRESENT;
95}
96
97DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel, int rc)
98{
99 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
100 pWalk->fBadPhysAddr = true;
101 pWalk->uLevel = (uint8_t)iLevel;
102 return VERR_PAGE_TABLE_NOT_PRESENT;
103}
104
105DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
106{
107 NOREF(pVCpu);
108 pWalk->fRsvdError = true;
109 pWalk->uLevel = (uint8_t)iLevel;
110 return VERR_PAGE_TABLE_NOT_PRESENT;
111}
112
113
114/**
115 * Performs a guest page table walk.
116 *
117 * @returns VBox status code.
118 * @retval VINF_SUCCESS on success.
119 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
120 *
121 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
122 * @param GCPtr The guest virtual address to walk by.
123 * @param pWalk The page walk info.
124 * @param pGstWalk The guest mode specific page walk info.
125 * @thread EMT(pVCpu)
126 */
127DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk)
128{
129 int rc;
130
131# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
132/** @def PGM_GST_SLAT_WALK
133 * Macro to perform guest second-level address translation (EPT or Nested).
134 *
135 * @param a_pVCpu The cross context virtual CPU structure of the calling
136 * EMT.
137 * @param a_GCPtrNested The nested-guest linear address that caused the
138 * second-level translation.
139 * @param a_GCPhysNested The nested-guest physical address to translate.
140 * @param a_GCPhysOut Where to store the guest-physical address (result).
141 */
142# define PGM_GST_SLAT_WALK(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_GCPhysOut, a_pWalk) \
143 do { \
144 if ((a_pVCpu)->pgm.s.enmGuestSlatMode == PGMSLAT_EPT) \
145 { \
146 PGMPTWALK WalkSlat; \
147 PGMPTWALKGST WalkGstSlat; \
148 int const rcX = pgmGstSlatWalk(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, &WalkSlat, \
149 &WalkGstSlat); \
150 if (RT_SUCCESS(rcX)) \
151 (a_GCPhysOut) = WalkSlat.GCPhys; \
152 else \
153 { \
154 *(a_pWalk) = WalkSlat; \
155 return rcX; \
156 } \
157 } \
158 } while (0)
159# endif
160
161 /*
162 * Init the walking structures.
163 */
164 RT_ZERO(*pWalk);
165 RT_ZERO(*pGstWalk);
166 pWalk->GCPtr = GCPtr;
167
168# if PGM_GST_TYPE == PGM_TYPE_32BIT \
169 || PGM_GST_TYPE == PGM_TYPE_PAE
170 /*
171 * Boundary check for PAE and 32-bit (prevents trouble further down).
172 */
173 if (RT_UNLIKELY(GCPtr >= _4G))
174 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 8);
175# endif
176
177 uint64_t fEffective;
178 {
179# if PGM_GST_TYPE == PGM_TYPE_AMD64
180 /*
181 * The PML4 table.
182 */
183 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGstWalk->pPml4);
184 if (RT_SUCCESS(rc)) { /* probable */ }
185 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
186
187 PX86PML4E pPml4e;
188 pGstWalk->pPml4e = pPml4e = &pGstWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];
189 X86PML4E Pml4e;
190 pGstWalk->Pml4e.u = Pml4e.u = pPml4e->u;
191
192 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
193 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4);
194
195 if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
196 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4);
197
198 fEffective = Pml4e.u & ( X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT | X86_PML4E_PCD | X86_PML4E_A
199 | X86_PML4E_NX);
200 pWalk->fEffective = fEffective;
201
202 /*
203 * The PDPT.
204 */
205 RTGCPHYS GCPhysPdpt = Pml4e.u & X86_PML4E_PG_MASK;
206# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
207 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPdpt, GCPhysPdpt, pWalk);
208# endif
209 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pGstWalk->pPdpt);
210 if (RT_SUCCESS(rc)) { /* probable */ }
211 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
212
213# elif PGM_GST_TYPE == PGM_TYPE_PAE
214 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGstWalk->pPdpt);
215 if (RT_SUCCESS(rc)) { /* probable */ }
216 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
217# endif
218 }
219 {
220# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
221 PX86PDPE pPdpe;
222 pGstWalk->pPdpe = pPdpe = &pGstWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
223 X86PDPE Pdpe;
224 pGstWalk->Pdpe.u = Pdpe.u = pPdpe->u;
225
226 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpe)) { /* probable */ }
227 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3);
228
229 if (RT_LIKELY(GST_IS_PDPE_VALID(pVCpu, Pdpe))) { /* likely */ }
230 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3);
231
232# if PGM_GST_TYPE == PGM_TYPE_AMD64
233 fEffective &= (Pdpe.u & ( X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US
234 | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A));
235 fEffective |= Pdpe.u & X86_PDPE_LM_NX;
236# else
237 /*
238 * NX in the legacy-mode PAE PDPE is reserved. The valid check above ensures the NX bit is not set.
239 * The RW, US, A bits MBZ in PAE PDPTE entries but must be 1 the way we compute cumulative (effective) access rights.
240 */
241 Assert(!(Pdpe.u & X86_PDPE_LM_NX));
242 fEffective = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A
243 | (Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD));
244# endif
245 pWalk->fEffective = fEffective;
246
247 /*
248 * The PD.
249 */
250 RTGCPHYS GCPhysPd = Pdpe.u & X86_PDPE_PG_MASK;
251# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
252 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPd, GCPhysPd, pWalk);
253# endif
254 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pGstWalk->pPd);
255 if (RT_SUCCESS(rc)) { /* probable */ }
256 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);
257
258# elif PGM_GST_TYPE == PGM_TYPE_32BIT
259 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pGstWalk->pPd);
260 if (RT_SUCCESS(rc)) { /* probable */ }
261 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
262# endif
263 }
264 {
265 PGSTPDE pPde;
266 pGstWalk->pPde = pPde = &pGstWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];
267 GSTPDE Pde;
268 pGstWalk->Pde.u = Pde.u = pPde->u;
269 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
270 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2);
271 if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu))
272 {
273 if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ }
274 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
275
276 /*
277 * We're done.
278 */
279# if PGM_GST_TYPE == PGM_TYPE_32BIT
280 fEffective = Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
281# else
282 fEffective &= Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
283 fEffective |= Pde.u & X86_PDE2M_PAE_NX;
284# endif
285 fEffective |= Pde.u & (X86_PDE4M_D | X86_PDE4M_G);
286 fEffective |= (Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT;
287 pWalk->fEffective = fEffective;
288 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
289 Assert(fEffective & PGM_PTATTRS_R_MASK);
290
291 pWalk->fBigPage = true;
292 pWalk->fSucceeded = true;
293 RTGCPHYS GCPhysPde = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
294 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
295# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
296 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPde, GCPhysPde, pWalk);
297# endif
298 pWalk->GCPhys = GCPhysPde;
299 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
300 return VINF_SUCCESS;
301 }
302
303 if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
304 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
305# if PGM_GST_TYPE == PGM_TYPE_32BIT
306 fEffective = Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
307# else
308 fEffective &= Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
309 fEffective |= Pde.u & X86_PDE_PAE_NX;
310# endif
311 pWalk->fEffective = fEffective;
312
313 /*
314 * The PT.
315 */
316 RTGCPHYS GCPhysPt = GST_GET_PDE_GCPHYS(Pde);
317# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
318 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPt, GCPhysPt, pWalk);
319# endif
320 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pGstWalk->pPt);
321 if (RT_SUCCESS(rc)) { /* probable */ }
322 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
323 }
324 {
325 PGSTPTE pPte;
326 pGstWalk->pPte = pPte = &pGstWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
327 GSTPTE Pte;
328 pGstWalk->Pte.u = Pte.u = pPte->u;
329
330 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
331 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1);
332
333 if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ }
334 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1);
335
336 /*
337 * We're done.
338 */
339 fEffective &= Pte.u & (X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A);
340 fEffective |= Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G);
341# if PGM_GST_TYPE != PGM_TYPE_32BIT
342 fEffective |= Pte.u & X86_PTE_PAE_NX;
343# endif
344 pWalk->fEffective = fEffective;
345 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
346 Assert(fEffective & PGM_PTATTRS_R_MASK);
347
348 pWalk->fSucceeded = true;
349 RTGCPHYS GCPhysPte = GST_GET_PTE_GCPHYS(Pte)
350 | (GCPtr & GUEST_PAGE_OFFSET_MASK);
351# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
352 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPte, GCPhysPte, pWalk);
353# endif
354 pWalk->GCPhys = GCPhysPte;
355 return VINF_SUCCESS;
356 }
357}
358
359#endif /* 32BIT, PAE, AMD64 */
360
361/**
362 * Gets effective Guest OS page information.
363 *
364 * @returns VBox status code.
365 * @param pVCpu The cross context virtual CPU structure.
366 * @param GCPtr Guest Context virtual address of the page.
367 * @param pWalk Where to store the page walk info.
368 * @thread EMT(pVCpu)
369 */
370PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
371{
372#if PGM_GST_TYPE == PGM_TYPE_REAL \
373 || PGM_GST_TYPE == PGM_TYPE_PROT
374
375# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
376 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
377 {
378 PGMPTWALK WalkSlat;
379 PGMPTWALKGST WalkGstSlat;
380 int const rc = pgmGstSlatWalk(pVCpu, GCPtr, true /* fIsLinearAddrValid */, GCPtr, &WalkSlat, &WalkGstSlat);
381 if (RT_SUCCESS(rc))
382 {
383 RT_ZERO(*pWalk);
384 pWalk->fSucceeded = true;
385 pWalk->GCPtr = GCPtr;
386 pWalk->GCPhys = WalkSlat.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
387 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
388 }
389 else
390 *pWalk = WalkSlat;
391 return rc;
392 }
393# endif
394
395 /*
396 * Fake it.
397 */
398 RT_ZERO(*pWalk);
399 pWalk->fSucceeded = true;
400 pWalk->GCPtr = GCPtr;
401 pWalk->GCPhys = GCPtr & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
402 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
403 NOREF(pVCpu);
404 return VINF_SUCCESS;
405
406#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
407 || PGM_GST_TYPE == PGM_TYPE_PAE \
408 || PGM_GST_TYPE == PGM_TYPE_AMD64
409
410 GSTPTWALK GstWalk;
411 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, pWalk, &GstWalk);
412 if (RT_FAILURE(rc))
413 return rc;
414
415 Assert(pWalk->fSucceeded);
416 Assert(pWalk->GCPtr == GCPtr);
417
418 PGMPTATTRS fFlags;
419 if (!pWalk->fBigPage)
420 fFlags = (GstWalk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */
421 | (pWalk->fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK))
422# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
423 | (pWalk->fEffective & PGM_PTATTRS_NX_MASK)
424# endif
425 ;
426 else
427 fFlags = (GstWalk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */
428 | (pWalk->fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK | PGM_PTATTRS_PAT_MASK))
429# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
430 | (pWalk->fEffective & PGM_PTATTRS_NX_MASK)
431# endif
432 ;
433
434 pWalk->GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
435 pWalk->fEffective = fFlags;
436 return VINF_SUCCESS;
437
438#else
439# error "shouldn't be here!"
440 /* something else... */
441 return VERR_NOT_SUPPORTED;
442#endif
443}
444
445
446/**
447 * Modify page flags for a range of pages in the guest's tables
448 *
449 * The existing flags are ANDed with the fMask and ORed with the fFlags.
450 *
451 * @returns VBox status code.
452 * @param pVCpu The cross context virtual CPU structure.
453 * @param GCPtr Virtual address of the first page in the range. Page aligned!
454 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
455 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
456 * @param fMask The AND mask - page flags X86_PTE_*.
457 */
458PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
459{
460 Assert((cb & GUEST_PAGE_OFFSET_MASK) == 0); RT_NOREF_PV(cb);
461
462#if PGM_GST_TYPE == PGM_TYPE_32BIT \
463 || PGM_GST_TYPE == PGM_TYPE_PAE \
464 || PGM_GST_TYPE == PGM_TYPE_AMD64
465 for (;;)
466 {
467 PGMPTWALK Walk;
468 GSTPTWALK GstWalk;
469 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk, &GstWalk);
470 if (RT_FAILURE(rc))
471 return rc;
472
473 if (!Walk.fBigPage)
474 {
475 /*
476 * 4KB Page table, process
477 *
478 * Walk pages till we're done.
479 */
480 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
481 while (iPTE < RT_ELEMENTS(GstWalk.pPt->a))
482 {
483 GSTPTE Pte = GstWalk.pPt->a[iPTE];
484 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
485 | (fFlags & ~GST_PTE_PG_MASK);
486 GstWalk.pPt->a[iPTE] = Pte;
487
488 /* next page */
489 cb -= GUEST_PAGE_SIZE;
490 if (!cb)
491 return VINF_SUCCESS;
492 GCPtr += GUEST_PAGE_SIZE;
493 iPTE++;
494 }
495 }
496 else
497 {
498 /*
499 * 2/4MB Page table
500 */
501 GSTPDE PdeNew;
502# if PGM_GST_TYPE == PGM_TYPE_32BIT
503 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
504# else
505 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
506# endif
507 | (fFlags & ~GST_PTE_PG_MASK)
508 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
509 *GstWalk.pPde = PdeNew;
510
511 /* advance */
512 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
513 if (cbDone >= cb)
514 return VINF_SUCCESS;
515 cb -= cbDone;
516 GCPtr += cbDone;
517 }
518 }
519
520#else
521 /* real / protected mode: ignore. */
522 NOREF(pVCpu); NOREF(GCPtr); NOREF(fFlags); NOREF(fMask);
523 return VINF_SUCCESS;
524#endif
525}
526
527
528#ifdef IN_RING3
529/**
530 * Relocate any GC pointers related to guest mode paging.
531 *
532 * @returns VBox status code.
533 * @param pVCpu The cross context virtual CPU structure.
534 * @param offDelta The relocation offset.
535 */
536PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta)
537{
538 RT_NOREF(pVCpu, offDelta);
539 return VINF_SUCCESS;
540}
541#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette