VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 96491

Last change on this file since 96491 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 19.4 KB
Line 
1/* $Id: PGMAllGst.h 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Internal Functions *
31*********************************************************************************************************************************/
32RT_C_DECLS_BEGIN
33#if PGM_GST_TYPE == PGM_TYPE_32BIT \
34 || PGM_GST_TYPE == PGM_TYPE_PAE \
35 || PGM_GST_TYPE == PGM_TYPE_AMD64
36DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk);
37#endif
38PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk);
39PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
40
41#ifdef IN_RING3 /* r3 only for now. */
42PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3);
43PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta);
44PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu);
45#endif
46RT_C_DECLS_END
47
48
49/**
50 * Enters the guest mode.
51 *
52 * @returns VBox status code.
53 * @param pVCpu The cross context virtual CPU structure.
54 * @param GCPhysCR3 The physical address from the CR3 register.
55 */
56PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3)
57{
58 /*
59 * Map and monitor CR3
60 */
61 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
62 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
63 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
64 return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
65}
66
67
68/**
69 * Exits the guest mode.
70 *
71 * @returns VBox status code.
72 * @param pVCpu The cross context virtual CPU structure.
73 */
74PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu)
75{
76 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
77 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
78 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
79 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
80}
81
82
83#if PGM_GST_TYPE == PGM_TYPE_32BIT \
84 || PGM_GST_TYPE == PGM_TYPE_PAE \
85 || PGM_GST_TYPE == PGM_TYPE_AMD64
86
87
88DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
89{
90 NOREF(iLevel); NOREF(pVCpu);
91 pWalk->fNotPresent = true;
92 pWalk->uLevel = (uint8_t)iLevel;
93 return VERR_PAGE_TABLE_NOT_PRESENT;
94}
95
96DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel, int rc)
97{
98 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
99 pWalk->fBadPhysAddr = true;
100 pWalk->uLevel = (uint8_t)iLevel;
101 return VERR_PAGE_TABLE_NOT_PRESENT;
102}
103
104DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
105{
106 NOREF(pVCpu);
107 pWalk->fRsvdError = true;
108 pWalk->uLevel = (uint8_t)iLevel;
109 return VERR_PAGE_TABLE_NOT_PRESENT;
110}
111
112
113/**
114 * Performs a guest page table walk.
115 *
116 * @returns VBox status code.
117 * @retval VINF_SUCCESS on success.
118 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
119 *
120 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
121 * @param GCPtr The guest virtual address to walk by.
122 * @param pWalk The page walk info.
123 * @param pGstWalk The guest mode specific page walk info.
124 */
125DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk)
126{
127 int rc;
128
129#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
130/** @def PGM_GST_SLAT_WALK
131 * Macro to perform guest second-level address translation (EPT or Nested).
132 *
133 * @param a_pVCpu The cross context virtual CPU structure of the calling
134 * EMT.
135 * @param a_GCPtrNested The nested-guest linear address that caused the
136 * second-level translation.
137 * @param a_GCPhysNested The nested-guest physical address to translate.
138 * @param a_GCPhysOut Where to store the guest-physical address (result).
139 */
140# define PGM_GST_SLAT_WALK(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_GCPhysOut, a_pWalk) \
141 do { \
142 if ((a_pVCpu)->pgm.s.enmGuestSlatMode == PGMSLAT_EPT) \
143 { \
144 PGMPTWALK SlatWalk; \
145 PGMPTWALKGST SlatGstWalk; \
146 int const rcX = pgmGstSlatWalk(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, &SlatWalk, \
147 &SlatGstWalk); \
148 if (RT_SUCCESS(rcX)) \
149 (a_GCPhysOut) = SlatWalk.GCPhys; \
150 else \
151 { \
152 *(a_pWalk) = SlatWalk; \
153 return rcX; \
154 } \
155 } \
156 } while (0)
157#endif
158
159 /*
160 * Init the walking structures.
161 */
162 RT_ZERO(*pWalk);
163 RT_ZERO(*pGstWalk);
164 pWalk->GCPtr = GCPtr;
165
166# if PGM_GST_TYPE == PGM_TYPE_32BIT \
167 || PGM_GST_TYPE == PGM_TYPE_PAE
168 /*
169 * Boundary check for PAE and 32-bit (prevents trouble further down).
170 */
171 if (RT_UNLIKELY(GCPtr >= _4G))
172 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 8);
173# endif
174
175 uint64_t fEffective;
176 {
177# if PGM_GST_TYPE == PGM_TYPE_AMD64
178 /*
179 * The PML4 table.
180 */
181 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGstWalk->pPml4);
182 if (RT_SUCCESS(rc)) { /* probable */ }
183 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
184
185 PX86PML4E pPml4e;
186 pGstWalk->pPml4e = pPml4e = &pGstWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];
187 X86PML4E Pml4e;
188 pGstWalk->Pml4e.u = Pml4e.u = pPml4e->u;
189
190 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
191 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4);
192
193 if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
194 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4);
195
196 fEffective = Pml4e.u & ( X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT | X86_PML4E_PCD | X86_PML4E_A
197 | X86_PML4E_NX);
198 pWalk->fEffective = fEffective;
199
200 /*
201 * The PDPT.
202 */
203 RTGCPHYS GCPhysPdpt = Pml4e.u & X86_PML4E_PG_MASK;
204#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
205 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPdpt, GCPhysPdpt, pWalk);
206#endif
207 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pGstWalk->pPdpt);
208 if (RT_SUCCESS(rc)) { /* probable */ }
209 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
210
211# elif PGM_GST_TYPE == PGM_TYPE_PAE
212 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGstWalk->pPdpt);
213 if (RT_SUCCESS(rc)) { /* probable */ }
214 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
215#endif
216 }
217 {
218# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
219 PX86PDPE pPdpe;
220 pGstWalk->pPdpe = pPdpe = &pGstWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
221 X86PDPE Pdpe;
222 pGstWalk->Pdpe.u = Pdpe.u = pPdpe->u;
223
224 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpe)) { /* probable */ }
225 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3);
226
227 if (RT_LIKELY(GST_IS_PDPE_VALID(pVCpu, Pdpe))) { /* likely */ }
228 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3);
229
230# if PGM_GST_TYPE == PGM_TYPE_AMD64
231 fEffective &= (Pdpe.u & ( X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US
232 | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A));
233 fEffective |= Pdpe.u & X86_PDPE_LM_NX;
234# else
235 /*
236 * NX in the legacy-mode PAE PDPE is reserved. The valid check above ensures the NX bit is not set.
237 * The RW, US, A bits MBZ in PAE PDPTE entries but must be 1 the way we compute cumulative (effective) access rights.
238 */
239 Assert(!(Pdpe.u & X86_PDPE_LM_NX));
240 fEffective = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A
241 | (Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD));
242# endif
243 pWalk->fEffective = fEffective;
244
245 /*
246 * The PD.
247 */
248 RTGCPHYS GCPhysPd = Pdpe.u & X86_PDPE_PG_MASK;
249# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
250 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPd, GCPhysPd, pWalk);
251# endif
252 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pGstWalk->pPd);
253 if (RT_SUCCESS(rc)) { /* probable */ }
254 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);
255
256# elif PGM_GST_TYPE == PGM_TYPE_32BIT
257 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pGstWalk->pPd);
258 if (RT_SUCCESS(rc)) { /* probable */ }
259 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
260# endif
261 }
262 {
263 PGSTPDE pPde;
264 pGstWalk->pPde = pPde = &pGstWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];
265 GSTPDE Pde;
266 pGstWalk->Pde.u = Pde.u = pPde->u;
267 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
268 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2);
269 if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu))
270 {
271 if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ }
272 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
273
274 /*
275 * We're done.
276 */
277# if PGM_GST_TYPE == PGM_TYPE_32BIT
278 fEffective = Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
279# else
280 fEffective &= Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
281 fEffective |= Pde.u & X86_PDE2M_PAE_NX;
282# endif
283 fEffective |= Pde.u & (X86_PDE4M_D | X86_PDE4M_G);
284 fEffective |= (Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT;
285 pWalk->fEffective = fEffective;
286 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
287 Assert(fEffective & PGM_PTATTRS_R_MASK);
288
289 pWalk->fBigPage = true;
290 pWalk->fSucceeded = true;
291 RTGCPHYS GCPhysPde = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
292 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
293# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
294 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPde, GCPhysPde, pWalk);
295# endif
296 pWalk->GCPhys = GCPhysPde;
297 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
298 return VINF_SUCCESS;
299 }
300
301 if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
302 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
303# if PGM_GST_TYPE == PGM_TYPE_32BIT
304 fEffective = Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
305# else
306 fEffective &= Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
307 fEffective |= Pde.u & X86_PDE_PAE_NX;
308# endif
309 pWalk->fEffective = fEffective;
310
311 /*
312 * The PT.
313 */
314 RTGCPHYS GCPhysPt = GST_GET_PDE_GCPHYS(Pde);
315# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
316 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPt, GCPhysPt, pWalk);
317# endif
318 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pGstWalk->pPt);
319 if (RT_SUCCESS(rc)) { /* probable */ }
320 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
321 }
322 {
323 PGSTPTE pPte;
324 pGstWalk->pPte = pPte = &pGstWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
325 GSTPTE Pte;
326 pGstWalk->Pte.u = Pte.u = pPte->u;
327
328 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
329 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1);
330
331 if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ }
332 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1);
333
334 /*
335 * We're done.
336 */
337 fEffective &= Pte.u & (X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A);
338 fEffective |= Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G);
339# if PGM_GST_TYPE != PGM_TYPE_32BIT
340 fEffective |= Pte.u & X86_PTE_PAE_NX;
341# endif
342 pWalk->fEffective = fEffective;
343 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
344 Assert(fEffective & PGM_PTATTRS_R_MASK);
345
346 pWalk->fSucceeded = true;
347 RTGCPHYS GCPhysPte = GST_GET_PTE_GCPHYS(Pte)
348 | (GCPtr & GUEST_PAGE_OFFSET_MASK);
349# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
350 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPte, GCPhysPte, pWalk);
351# endif
352 pWalk->GCPhys = GCPhysPte;
353 return VINF_SUCCESS;
354 }
355}
356
357#endif /* 32BIT, PAE, AMD64 */
358
359/**
360 * Gets effective Guest OS page information.
361 *
362 * When GCPtr is in a big page, the function will return as if it was a normal
363 * 4KB page. If the need for distinguishing between big and normal page becomes
364 * necessary at a later point, a PGMGstGetPage Ex() will be created for that
365 * purpose.
366 *
367 * @returns VBox status code.
368 * @param pVCpu The cross context virtual CPU structure.
369 * @param GCPtr Guest Context virtual address of the page.
370 * @param pWalk Where to store the page walk info.
371 */
372PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
373{
374#if PGM_GST_TYPE == PGM_TYPE_REAL \
375 || PGM_GST_TYPE == PGM_TYPE_PROT
376
377 RT_ZERO(*pWalk);
378# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
379 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
380 {
381 PGMPTWALK SlatWalk;
382 PGMPTWALKGST SlatGstWalk;
383 int const rc = pgmGstSlatWalk(pVCpu, GCPtr, true /* fIsLinearAddrValid */, GCPtr, &SlatWalk, &SlatGstWalk);
384 if (RT_SUCCESS(rc))
385 {
386 pWalk->fSucceeded = true;
387 pWalk->GCPtr = GCPtr;
388 pWalk->GCPhys = SlatWalk.GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
389 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
390 }
391 else
392 *pWalk = SlatWalk;
393 return rc;
394 }
395# endif
396
397 /*
398 * Fake it.
399 */
400 pWalk->fSucceeded = true;
401 pWalk->GCPtr = GCPtr;
402 pWalk->GCPhys = GCPtr & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
403 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
404 NOREF(pVCpu);
405 return VINF_SUCCESS;
406
407#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
408 || PGM_GST_TYPE == PGM_TYPE_PAE \
409 || PGM_GST_TYPE == PGM_TYPE_AMD64
410
411 GSTPTWALK GstWalk;
412 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, pWalk, &GstWalk);
413 if (RT_FAILURE(rc))
414 return rc;
415
416 Assert(pWalk->fSucceeded);
417 Assert(pWalk->GCPtr == GCPtr);
418
419 PGMPTATTRS fFlags;
420 if (!pWalk->fBigPage)
421 fFlags = (GstWalk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */
422 | (pWalk->fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK))
423# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
424 | (pWalk->fEffective & PGM_PTATTRS_NX_MASK)
425# endif
426 ;
427 else
428 {
429 fFlags = (GstWalk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */
430 | (pWalk->fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK | PGM_PTATTRS_PAT_MASK))
431# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
432 | (pWalk->fEffective & PGM_PTATTRS_NX_MASK)
433# endif
434 ;
435 }
436
437 pWalk->GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
438 pWalk->fEffective = fFlags;
439 return VINF_SUCCESS;
440
441#else
442# error "shouldn't be here!"
443 /* something else... */
444 return VERR_NOT_SUPPORTED;
445#endif
446}
447
448
449/**
450 * Modify page flags for a range of pages in the guest's tables
451 *
452 * The existing flags are ANDed with the fMask and ORed with the fFlags.
453 *
454 * @returns VBox status code.
455 * @param pVCpu The cross context virtual CPU structure.
456 * @param GCPtr Virtual address of the first page in the range. Page aligned!
457 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
458 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
459 * @param fMask The AND mask - page flags X86_PTE_*.
460 */
461PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
462{
463 Assert((cb & GUEST_PAGE_OFFSET_MASK) == 0); RT_NOREF_PV(cb);
464
465#if PGM_GST_TYPE == PGM_TYPE_32BIT \
466 || PGM_GST_TYPE == PGM_TYPE_PAE \
467 || PGM_GST_TYPE == PGM_TYPE_AMD64
468 for (;;)
469 {
470 PGMPTWALK Walk;
471 GSTPTWALK GstWalk;
472 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk, &GstWalk);
473 if (RT_FAILURE(rc))
474 return rc;
475
476 if (!Walk.fBigPage)
477 {
478 /*
479 * 4KB Page table, process
480 *
481 * Walk pages till we're done.
482 */
483 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
484 while (iPTE < RT_ELEMENTS(GstWalk.pPt->a))
485 {
486 GSTPTE Pte = GstWalk.pPt->a[iPTE];
487 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
488 | (fFlags & ~GST_PTE_PG_MASK);
489 GstWalk.pPt->a[iPTE] = Pte;
490
491 /* next page */
492 cb -= GUEST_PAGE_SIZE;
493 if (!cb)
494 return VINF_SUCCESS;
495 GCPtr += GUEST_PAGE_SIZE;
496 iPTE++;
497 }
498 }
499 else
500 {
501 /*
502 * 2/4MB Page table
503 */
504 GSTPDE PdeNew;
505# if PGM_GST_TYPE == PGM_TYPE_32BIT
506 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
507# else
508 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
509# endif
510 | (fFlags & ~GST_PTE_PG_MASK)
511 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
512 *GstWalk.pPde = PdeNew;
513
514 /* advance */
515 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
516 if (cbDone >= cb)
517 return VINF_SUCCESS;
518 cb -= cbDone;
519 GCPtr += cbDone;
520 }
521 }
522
523#else
524 /* real / protected mode: ignore. */
525 NOREF(pVCpu); NOREF(GCPtr); NOREF(fFlags); NOREF(fMask);
526 return VINF_SUCCESS;
527#endif
528}
529
530
531#ifdef IN_RING3
532/**
533 * Relocate any GC pointers related to guest mode paging.
534 *
535 * @returns VBox status code.
536 * @param pVCpu The cross context virtual CPU structure.
537 * @param offDelta The relocation offset.
538 */
539PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta)
540{
541 RT_NOREF(pVCpu, offDelta);
542 return VINF_SUCCESS;
543}
544#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette