VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 92162

Last change on this file since 92162 was 92076, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 EPT walking bits - PTE.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 25.9 KB
Line 
1/* $Id: PGMAllGst.h 92076 2021-10-26 11:30:00Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Internal Functions *
21*********************************************************************************************************************************/
22RT_C_DECLS_BEGIN
23#if PGM_GST_TYPE == PGM_TYPE_32BIT \
24 || PGM_GST_TYPE == PGM_TYPE_PAE \
25 || PGM_GST_TYPE == PGM_TYPE_EPT \
26 || PGM_GST_TYPE == PGM_TYPE_AMD64
27DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk);
28#endif
29PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
30PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
31
32#ifdef IN_RING3 /* r3 only for now. */
33PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3);
34PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta);
35PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu);
36#endif
37RT_C_DECLS_END
38
39
40/**
41 * Enters the guest mode.
42 *
43 * @returns VBox status code.
44 * @param pVCpu The cross context virtual CPU structure.
45 * @param GCPhysCR3 The physical address from the CR3 register.
46 */
47PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3)
48{
49 /*
50 * Map and monitor CR3
51 */
52 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
53 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
54 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
55 return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fPdpesMapped */);
56}
57
58
59/**
60 * Exits the guest mode.
61 *
62 * @returns VBox status code.
63 * @param pVCpu The cross context virtual CPU structure.
64 */
65PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu)
66{
67 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
68 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
69 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
70 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
71}
72
73
74#if PGM_GST_TYPE == PGM_TYPE_32BIT \
75 || PGM_GST_TYPE == PGM_TYPE_PAE \
76 || PGM_GST_TYPE == PGM_TYPE_EPT \
77 || PGM_GST_TYPE == PGM_TYPE_AMD64
78
79
80DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel)
81{
82 NOREF(iLevel); NOREF(pVCpu);
83 pWalk->Core.fNotPresent = true;
84 pWalk->Core.uLevel = (uint8_t)iLevel;
85 return VERR_PAGE_TABLE_NOT_PRESENT;
86}
87
88DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel, int rc)
89{
90 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
91 pWalk->Core.fBadPhysAddr = true;
92 pWalk->Core.uLevel = (uint8_t)iLevel;
93 return VERR_PAGE_TABLE_NOT_PRESENT;
94}
95
96DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel)
97{
98 NOREF(pVCpu);
99 pWalk->Core.fRsvdError = true;
100 pWalk->Core.uLevel = (uint8_t)iLevel;
101 return VERR_PAGE_TABLE_NOT_PRESENT;
102}
103
104
105/**
106 * Performs a guest page table walk.
107 *
108 * @returns VBox status code.
109 * @retval VINF_SUCCESS on success.
110 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
111 *
112 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
113 * @param GCPtr The guest virtual address to walk by.
114 * @param pWalk Where to return the walk result. This is always set.
115 */
116DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk)
117{
118 int rc;
119
120 /*
121 * Init the walking structure.
122 */
123 RT_ZERO(*pWalk);
124 pWalk->Core.GCPtr = GCPtr;
125
126# if PGM_GST_TYPE == PGM_TYPE_32BIT \
127 || PGM_GST_TYPE == PGM_TYPE_PAE
128 /*
129 * Boundary check for PAE and 32-bit (prevents trouble further down).
130 */
131 if (RT_UNLIKELY(GCPtr >= _4G))
132 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 8);
133# endif
134
135 uint32_t fEffective = X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | 1;
136 {
137# if PGM_GST_TYPE == PGM_TYPE_AMD64
138 /*
139 * The PMLE4.
140 */
141 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pWalk->pPml4);
142 if (RT_SUCCESS(rc)) { /* probable */ }
143 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
144
145 PX86PML4E pPml4e;
146 pWalk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];
147 X86PML4E Pml4e;
148 pWalk->Pml4e.u = Pml4e.u = pPml4e->u;
149
150 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
151 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4);
152
153 if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
154 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4);
155
156 pWalk->Core.fEffective = fEffective = ((uint32_t)Pml4e.u & (X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT | X86_PML4E_PCD | X86_PML4E_A))
157 | ((uint32_t)(Pml4e.u >> 63) ^ 1) /*NX */;
158
159 /*
160 * The PDPE.
161 */
162 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pWalk->pPdpt);
163 if (RT_SUCCESS(rc)) { /* probable */ }
164 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
165
166# elif PGM_GST_TYPE == PGM_TYPE_PAE
167 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pWalk->pPdpt);
168 if (RT_SUCCESS(rc)) { /* probable */ }
169 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
170
171# elif PGM_GST_TYPE == PGM_TYPE_EPT
172 rc = pgmGstGetEptPML4PtrEx(pVCpu, &pWalk->pPml4);
173 if (RT_SUCCESS(rc)) { /* probable */ }
174 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
175
176 PEPTPML4E pPml4e;
177 pWalk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK];
178 EPTPML4E Pml4e;
179 pWalk->Pml4e.u = Pml4e.u = pPml4e->u;
180
181 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
182 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4);
183
184 if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
185 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4);
186
187 Assert(!pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmxModeBasedExecuteEpt);
188 uint64_t const fEptAttrs = Pml4e.u & EPT_PML4E_ATTR_MASK;
189 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
190 uint8_t const fRead = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_READ);
191 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
192 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
193 uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
194 pWalk->Core.fEffective = fEffective = RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)
195 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fRead & fWrite)
196 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
197 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)
198 | fEffectiveEpt;
199
200 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & EPT_PML4E_PG_MASK, &pWalk->pPdpt);
201 if (RT_SUCCESS(rc)) { /* probable */ }
202 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
203# endif
204 }
205 {
206# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
207 PX86PDPE pPdpe;
208 pWalk->pPdpe = pPdpe = &pWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
209 X86PDPE Pdpe;
210 pWalk->Pdpe.u = Pdpe.u = pPdpe->u;
211
212 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpe)) { /* probable */ }
213 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3);
214
215 if (RT_LIKELY(GST_IS_PDPE_VALID(pVCpu, Pdpe))) { /* likely */ }
216 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3);
217
218# if PGM_GST_TYPE == PGM_TYPE_AMD64
219 pWalk->Core.fEffective = fEffective &= ((uint32_t)Pdpe.u & (X86_PDPE_RW | X86_PDPE_US | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A))
220 | ((uint32_t)(Pdpe.u >> 63) ^ 1) /*NX */;
221# else
222 pWalk->Core.fEffective = fEffective = X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A
223 | ((uint32_t)Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD))
224 | ((uint32_t)(Pdpe.u >> 63) ^ 1) /*NX */;
225# endif
226
227 /*
228 * The PDE.
229 */
230 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pWalk->pPd);
231 if (RT_SUCCESS(rc)) { /* probable */ }
232 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);
233# elif PGM_GST_TYPE == PGM_TYPE_32BIT
234 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pWalk->pPd);
235 if (RT_SUCCESS(rc)) { /* probable */ }
236 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
237
238# elif PGM_GST_TYPE == PGM_TYPE_EPT
239 PEPTPDPTE pPdpte;
240 pWalk->pPdpte = pPdpte = &pWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
241 EPTPDPTE Pdpte;
242 pWalk->Pdpte.u = Pdpte.u = pPdpte->u;
243
244 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpte)) { /* probable */ }
245 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3);
246
247 /* The order of the following 2 "if" statements matter. */
248 if (GST_IS_PDPE_VALID(pVCpu, Pdpte))
249 {
250 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE_ATTR_MASK;
251 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
252 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
253 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
254 uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
255 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)
256 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)
257 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
258 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)
259 | fEffectiveEpt;
260 }
261 else if (GST_IS_BIG_PDPE_VALID(pVCpu, Pdpte))
262 {
263 uint64_t const fEptAttrs = Pdpte.u & EPT_PDPTE1G_ATTR_MASK;
264 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
265 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
266 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
267 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
268 uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
269 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)
270 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)
271 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
272 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)
273 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_D, fDirty)
274 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_MEMTYPE, 0)
275 | fEffectiveEpt;
276 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
277 pWalk->Core.fEffectiveUS = true;
278 pWalk->Core.fEffectiveNX = !fExecute;
279 pWalk->Core.fGigantPage = true;
280 pWalk->Core.fSucceeded = true;
281 pWalk->Core.GCPhys = GST_GET_BIG_PDPE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pdpte)
282 | (GCPtr & GST_GIGANT_PAGE_OFFSET_MASK);
283 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys);
284 return VINF_SUCCESS;
285 }
286 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3);
287# endif
288 }
289 {
290 PGSTPDE pPde;
291 pWalk->pPde = pPde = &pWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];
292 GSTPDE Pde;
293 pWalk->Pde.u = Pde.u = pPde->u;
294 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
295 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2);
296 if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu))
297 {
298 if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ }
299 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
300
301 /*
302 * We're done.
303 */
304# if PGM_GST_TYPE == PGM_TYPE_EPT
305 uint64_t const fEptAttrs = Pde.u & EPT_PDE2M_ATTR_MASK;
306 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
307 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
308 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
309 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
310 uint32_t fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
311 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)
312 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)
313 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
314 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)
315 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_D, fDirty)
316 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_MEMTYPE, 0)
317 | fEffectiveEpt;
318 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
319 pWalk->Core.fEffectiveUS = true;
320 pWalk->Core.fEffectiveNX = !fExecute;
321# else
322# if PGM_GST_TYPE == PGM_TYPE_32BIT
323 fEffective &= Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
324# else
325 fEffective &= ((uint32_t)Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A))
326 | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */;
327# endif
328 fEffective |= (uint32_t)Pde.u & (X86_PDE4M_D | X86_PDE4M_G);
329 fEffective |= (uint32_t)(Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT;
330 pWalk->Core.fEffective = fEffective;
331
332 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
333 pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US);
334# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
335 pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu);
336# else
337 pWalk->Core.fEffectiveNX = false;
338# endif
339# endif
340 pWalk->Core.fBigPage = true;
341 pWalk->Core.fSucceeded = true;
342
343 pWalk->Core.GCPhys = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
344 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
345 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys);
346 return VINF_SUCCESS;
347 }
348
349 if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
350 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
351# if PGM_GST_TYPE == PGM_TYPE_EPT
352 uint64_t const fEptAttrs = Pde.u & EPT_PDE_ATTR_MASK;
353 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
354 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
355 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
356 uint32_t const fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
357 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)
358 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)
359 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
360 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)
361 | fEffectiveEpt;
362# elif PGM_GST_TYPE == PGM_TYPE_32BIT
363 pWalk->Core.fEffective = fEffective &= Pde.u & (X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
364# else
365 pWalk->Core.fEffective = fEffective &= ((uint32_t)Pde.u & (X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A))
366 | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */;
367# endif
368
369 /*
370 * The PTE.
371 */
372 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pWalk->pPt);
373 if (RT_SUCCESS(rc)) { /* probable */ }
374 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
375 }
376 {
377 PGSTPTE pPte;
378 pWalk->pPte = pPte = &pWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
379 GSTPTE Pte;
380 pWalk->Pte.u = Pte.u = pPte->u;
381
382 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
383 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1);
384
385 if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ }
386 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1);
387
388 /*
389 * We're done.
390 */
391# if PGM_GST_TYPE == PGM_TYPE_EPT
392 uint64_t const fEptAttrs = Pte.u & EPT_PTE_ATTR_MASK;
393 uint8_t const fExecute = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_EXECUTE);
394 uint8_t const fWrite = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_WRITE);
395 uint8_t const fAccessed = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_ACCESSED);
396 uint8_t const fDirty = RT_BF_GET(fEptAttrs, VMX_BF_EPT_PT_DIRTY);
397 uint32_t fEffectiveEpt = ((uint32_t)fEptAttrs << PGMPTWALK_EFF_EPT_ATTR_SHIFT) & PGMPTWALK_EFF_EPT_ATTR_MASK;
398 pWalk->Core.fEffective = fEffective &= RT_BF_MAKE(PGM_BF_PTWALK_EFF_X, fExecute)
399 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_RW, fWrite)
400 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_US, 1)
401 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_A, fAccessed)
402 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_D, fDirty)
403 | RT_BF_MAKE(PGM_BF_PTWALK_EFF_MEMTYPE, 0)
404 | fEffectiveEpt;
405 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
406 pWalk->Core.fEffectiveUS = true;
407 pWalk->Core.fEffectiveNX = !fExecute;
408# else
409# if PGM_GST_TYPE == PGM_TYPE_32BIT
410 fEffective &= Pte.u & (X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A);
411# else
412 fEffective &= ((uint32_t)Pte.u & (X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A))
413 | ((uint32_t)(Pte.u >> 63) ^ 1) /*NX */;
414# endif
415 fEffective |= (uint32_t)Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G);
416 pWalk->Core.fEffective = fEffective;
417
418 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
419# if PGM_GST_TYPE == PGM_TYPE_EPT
420 pWalk->Core.fEffectiveUS = true;
421# else
422 pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US);
423# endif
424# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
425 pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu);
426# else
427 pWalk->Core.fEffectiveNX = false;
428# endif
429# endif
430 pWalk->Core.fSucceeded = true;
431 pWalk->Core.GCPhys = GST_GET_PDE_GCPHYS(Pte) /** @todo Shouldn't this be PTE_GCPHYS? */
432 | (GCPtr & PAGE_OFFSET_MASK);
433 return VINF_SUCCESS;
434 }
435}
436
437#endif /* 32BIT, PAE, EPT, AMD64 */
438
439/**
440 * Gets effective Guest OS page information.
441 *
442 * When GCPtr is in a big page, the function will return as if it was a normal
443 * 4KB page. If the need for distinguishing between big and normal page becomes
444 * necessary at a later point, a PGMGstGetPage Ex() will be created for that
445 * purpose.
446 *
447 * @returns VBox status code.
448 * @param pVCpu The cross context virtual CPU structure.
449 * @param GCPtr Guest Context virtual address of the page.
450 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
451 * @param pGCPhys Where to store the GC physical address of the page.
452 * This is page aligned!
453 */
454PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
455{
456#if PGM_GST_TYPE == PGM_TYPE_REAL \
457 || PGM_GST_TYPE == PGM_TYPE_PROT
458 /*
459 * Fake it.
460 */
461 if (pfFlags)
462 *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
463 if (pGCPhys)
464 *pGCPhys = GCPtr & PAGE_BASE_GC_MASK;
465 NOREF(pVCpu);
466 return VINF_SUCCESS;
467
468#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
469 || PGM_GST_TYPE == PGM_TYPE_PAE \
470 || PGM_GST_TYPE == PGM_TYPE_EPT \
471 || PGM_GST_TYPE == PGM_TYPE_AMD64
472
473 GSTPTWALK Walk;
474 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk);
475 if (RT_FAILURE(rc))
476 return rc;
477
478 if (pGCPhys)
479 *pGCPhys = Walk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
480
481 if (pfFlags)
482 {
483 if (!Walk.Core.fBigPage)
484 *pfFlags = (Walk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */
485 | (Walk.Core.fEffectiveRW ? X86_PTE_RW : 0)
486 | (Walk.Core.fEffectiveUS ? X86_PTE_US : 0)
487# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
488 | (Walk.Core.fEffectiveNX ? X86_PTE_PAE_NX : 0)
489# endif
490 ;
491 else
492 {
493 *pfFlags = (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */
494 | ((Walk.Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT)
495 | (Walk.Core.fEffectiveRW ? X86_PTE_RW : 0)
496 | (Walk.Core.fEffectiveUS ? X86_PTE_US : 0)
497# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
498 | (Walk.Core.fEffectiveNX ? X86_PTE_PAE_NX : 0)
499# endif
500 ;
501 }
502 }
503
504 return VINF_SUCCESS;
505
506#else
507# error "shouldn't be here!"
508 /* something else... */
509 return VERR_NOT_SUPPORTED;
510#endif
511}
512
513
514/**
515 * Modify page flags for a range of pages in the guest's tables
516 *
517 * The existing flags are ANDed with the fMask and ORed with the fFlags.
518 *
519 * @returns VBox status code.
520 * @param pVCpu The cross context virtual CPU structure.
521 * @param GCPtr Virtual address of the first page in the range. Page aligned!
522 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
523 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
524 * @param fMask The AND mask - page flags X86_PTE_*.
525 */
526PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
527{
528 Assert((cb & PAGE_OFFSET_MASK) == 0); RT_NOREF_PV(cb);
529
530#if PGM_GST_TYPE == PGM_TYPE_32BIT \
531 || PGM_GST_TYPE == PGM_TYPE_PAE \
532 || PGM_GST_TYPE == PGM_TYPE_AMD64
533 for (;;)
534 {
535 GSTPTWALK Walk;
536 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk);
537 if (RT_FAILURE(rc))
538 return rc;
539
540 if (!Walk.Core.fBigPage)
541 {
542 /*
543 * 4KB Page table, process
544 *
545 * Walk pages till we're done.
546 */
547 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
548 while (iPTE < RT_ELEMENTS(Walk.pPt->a))
549 {
550 GSTPTE Pte = Walk.pPt->a[iPTE];
551 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
552 | (fFlags & ~GST_PTE_PG_MASK);
553 Walk.pPt->a[iPTE] = Pte;
554
555 /* next page */
556 cb -= PAGE_SIZE;
557 if (!cb)
558 return VINF_SUCCESS;
559 GCPtr += PAGE_SIZE;
560 iPTE++;
561 }
562 }
563 else
564 {
565 /*
566 * 2/4MB Page table
567 */
568 GSTPDE PdeNew;
569# if PGM_GST_TYPE == PGM_TYPE_32BIT
570 PdeNew.u = (Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
571# else
572 PdeNew.u = (Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
573# endif
574 | (fFlags & ~GST_PTE_PG_MASK)
575 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
576 *Walk.pPde = PdeNew;
577
578 /* advance */
579 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
580 if (cbDone >= cb)
581 return VINF_SUCCESS;
582 cb -= cbDone;
583 GCPtr += cbDone;
584 }
585 }
586
587#else
588 /* real / protected mode: ignore. */
589 NOREF(pVCpu); NOREF(GCPtr); NOREF(fFlags); NOREF(fMask);
590 return VINF_SUCCESS;
591#endif
592}
593
594
595#ifdef IN_RING3
596/**
597 * Relocate any GC pointers related to guest mode paging.
598 *
599 * @returns VBox status code.
600 * @param pVCpu The cross context virtual CPU structure.
601 * @param offDelta The relocation offset.
602 */
603PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta)
604{
605 RT_NOREF(pVCpu, offDelta);
606 return VINF_SUCCESS;
607}
608#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette