VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 86647

Last change on this file since 86647 was 86487, checked in by vboxsync, 4 years ago

VMM/PGM: Working on eliminating page table bitfield use (PAE PTEs). bugref:9841 bugref:9746

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 18.5 KB
Line 
1/* $Id: PGMAllGst.h 86487 2020-10-08 08:17:31Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Internal Functions *
21*********************************************************************************************************************************/
22RT_C_DECLS_BEGIN
23#if PGM_GST_TYPE == PGM_TYPE_32BIT \
24 || PGM_GST_TYPE == PGM_TYPE_PAE \
25 || PGM_GST_TYPE == PGM_TYPE_AMD64
26DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk);
27#endif
28PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
29PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
30PGM_GST_DECL(int, GetPDE)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPDE);
31
32#ifdef IN_RING3 /* r3 only for now. */
33PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3);
34PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta);
35PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu);
36#endif
37RT_C_DECLS_END
38
39
40/**
41 * Enters the guest mode.
42 *
43 * @returns VBox status code.
44 * @param pVCpu The cross context virtual CPU structure.
45 * @param GCPhysCR3 The physical address from the CR3 register.
46 */
47PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3)
48{
49 /*
50 * Map and monitor CR3
51 */
52 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
53 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
54 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
55 return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
56}
57
58
59/**
60 * Exits the guest mode.
61 *
62 * @returns VBox status code.
63 * @param pVCpu The cross context virtual CPU structure.
64 */
65PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu)
66{
67 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
68 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
69 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
70 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
71}
72
73
74#if PGM_GST_TYPE == PGM_TYPE_32BIT \
75 || PGM_GST_TYPE == PGM_TYPE_PAE \
76 || PGM_GST_TYPE == PGM_TYPE_AMD64
77
78
79DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel)
80{
81 NOREF(iLevel); NOREF(pVCpu);
82 pWalk->Core.fNotPresent = true;
83 pWalk->Core.uLevel = (uint8_t)iLevel;
84 return VERR_PAGE_TABLE_NOT_PRESENT;
85}
86
87DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel, int rc)
88{
89 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
90 pWalk->Core.fBadPhysAddr = true;
91 pWalk->Core.uLevel = (uint8_t)iLevel;
92 return VERR_PAGE_TABLE_NOT_PRESENT;
93}
94
95DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, PGSTPTWALK pWalk, int iLevel)
96{
97 NOREF(pVCpu);
98 pWalk->Core.fRsvdError = true;
99 pWalk->Core.uLevel = (uint8_t)iLevel;
100 return VERR_PAGE_TABLE_NOT_PRESENT;
101}
102
103
104/**
105 * Performs a guest page table walk.
106 *
107 * @returns VBox status code.
108 * @retval VINF_SUCCESS on success.
109 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
110 *
111 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
112 * @param GCPtr The guest virtual address to walk by.
113 * @param pWalk Where to return the walk result. This is always set.
114 */
115DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk)
116{
117 int rc;
118
119 /*
120 * Init the walking structure.
121 */
122 RT_ZERO(*pWalk);
123 pWalk->Core.GCPtr = GCPtr;
124
125# if PGM_GST_TYPE == PGM_TYPE_32BIT \
126 || PGM_GST_TYPE == PGM_TYPE_PAE
127 /*
128 * Boundary check for PAE and 32-bit (prevents trouble further down).
129 */
130 if (RT_UNLIKELY(GCPtr >= _4G))
131 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 8);
132# endif
133
134 uint32_t fEffective = X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A | 1;
135 {
136# if PGM_GST_TYPE == PGM_TYPE_AMD64
137 /*
138 * The PMLE4.
139 */
140 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pWalk->pPml4);
141 if (RT_SUCCESS(rc)) { /* probable */ }
142 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
143
144 PX86PML4E pPml4e;
145 pWalk->pPml4e = pPml4e = &pWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];
146 X86PML4E Pml4e;
147 pWalk->Pml4e.u = Pml4e.u = pPml4e->u;
148
149 if (Pml4e.u & X86_PML4E_P) { /* probable */ }
150 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4);
151
152 if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
153 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4);
154
155 pWalk->Core.fEffective = fEffective = ((uint32_t)Pml4e.u & (X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT | X86_PML4E_PCD | X86_PML4E_A))
156 | ((uint32_t)(Pml4e.u >> 63) ^ 1) /*NX */;
157
158 /*
159 * The PDPE.
160 */
161 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pWalk->pPdpt);
162 if (RT_SUCCESS(rc)) { /* probable */ }
163 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
164
165# elif PGM_GST_TYPE == PGM_TYPE_PAE
166 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pWalk->pPdpt);
167 if (RT_SUCCESS(rc)) { /* probable */ }
168 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
169# endif
170 }
171 {
172# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
173 PX86PDPE pPdpe;
174 pWalk->pPdpe = pPdpe = &pWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
175 X86PDPE Pdpe;
176 pWalk->Pdpe.u = Pdpe.u = pPdpe->u;
177
178 if (Pdpe.u & X86_PDPE_P) { /* probable */ }
179 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3);
180
181 if (RT_LIKELY(GST_IS_PDPE_VALID(pVCpu, Pdpe))) { /* likely */ }
182 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3);
183
184# if PGM_GST_TYPE == PGM_TYPE_AMD64
185 pWalk->Core.fEffective = fEffective &= ((uint32_t)Pdpe.u & (X86_PDPE_RW | X86_PDPE_US | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A))
186 | ((uint32_t)(Pdpe.u >> 63) ^ 1) /*NX */;
187# else
188 pWalk->Core.fEffective = fEffective = X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A
189 | ((uint32_t)Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD))
190 | ((uint32_t)(Pdpe.u >> 63) ^ 1) /*NX */;
191# endif
192
193 /*
194 * The PDE.
195 */
196 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pWalk->pPd);
197 if (RT_SUCCESS(rc)) { /* probable */ }
198 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);
199# elif PGM_GST_TYPE == PGM_TYPE_32BIT
200 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pWalk->pPd);
201 if (RT_SUCCESS(rc)) { /* probable */ }
202 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
203# endif
204 }
205 {
206 PGSTPDE pPde;
207 pWalk->pPde = pPde = &pWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];
208 GSTPDE Pde;
209 pWalk->Pde.u = Pde.u = pPde->u;
210 if (Pde.u & X86_PDE_P) { /* probable */ }
211 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2);
212 if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu))
213 {
214 if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ }
215 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
216
217 /*
218 * We're done.
219 */
220# if PGM_GST_TYPE == PGM_TYPE_32BIT
221 fEffective &= Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
222# else
223 fEffective &= ((uint32_t)Pde.u & (X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A))
224 | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */;
225# endif
226 fEffective |= (uint32_t)Pde.u & (X86_PDE4M_D | X86_PDE4M_G);
227 fEffective |= (uint32_t)(Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT;
228 pWalk->Core.fEffective = fEffective;
229
230 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
231 pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US);
232# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
233 pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu);
234# else
235 pWalk->Core.fEffectiveNX = false;
236# endif
237 pWalk->Core.fBigPage = true;
238 pWalk->Core.fSucceeded = true;
239
240 pWalk->Core.GCPhys = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
241 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
242 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->Core.GCPhys);
243 return VINF_SUCCESS;
244 }
245
246 if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
247 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
248# if PGM_GST_TYPE == PGM_TYPE_32BIT
249 pWalk->Core.fEffective = fEffective &= Pde.u & (X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
250# else
251 pWalk->Core.fEffective = fEffective &= ((uint32_t)Pde.u & (X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A))
252 | ((uint32_t)(Pde.u >> 63) ^ 1) /*NX */;
253# endif
254
255 /*
256 * The PTE.
257 */
258 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GST_GET_PDE_GCPHYS(Pde), &pWalk->pPt);
259 if (RT_SUCCESS(rc)) { /* probable */ }
260 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
261 }
262 {
263 PGSTPTE pPte;
264 pWalk->pPte = pPte = &pWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
265 GSTPTE Pte;
266 pWalk->Pte.u = Pte.u = pPte->u;
267
268 if (Pte.u & X86_PTE_P) { /* probable */ }
269 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1);
270
271 if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ }
272 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1);
273
274 /*
275 * We're done.
276 */
277# if PGM_GST_TYPE == PGM_TYPE_32BIT
278 fEffective &= Pte.u & (X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A);
279# else
280 fEffective &= ((uint32_t)Pte.u & (X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A))
281 | ((uint32_t)(Pte.u >> 63) ^ 1) /*NX */;
282# endif
283 fEffective |= (uint32_t)Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G);
284 pWalk->Core.fEffective = fEffective;
285
286 pWalk->Core.fEffectiveRW = !!(fEffective & X86_PTE_RW);
287 pWalk->Core.fEffectiveUS = !!(fEffective & X86_PTE_US);
288# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
289 pWalk->Core.fEffectiveNX = !(fEffective & 1) && GST_IS_NX_ACTIVE(pVCpu);
290# else
291 pWalk->Core.fEffectiveNX = false;
292# endif
293 pWalk->Core.fSucceeded = true;
294
295 pWalk->Core.GCPhys = GST_GET_PDE_GCPHYS(Pte)
296 | (GCPtr & PAGE_OFFSET_MASK);
297 return VINF_SUCCESS;
298 }
299}
300
301#endif /* 32BIT, PAE, AMD64 */
302
303/**
304 * Gets effective Guest OS page information.
305 *
306 * When GCPtr is in a big page, the function will return as if it was a normal
307 * 4KB page. If the need for distinguishing between big and normal page becomes
308 * necessary at a later point, a PGMGstGetPage Ex() will be created for that
309 * purpose.
310 *
311 * @returns VBox status code.
312 * @param pVCpu The cross context virtual CPU structure.
313 * @param GCPtr Guest Context virtual address of the page.
314 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
315 * @param pGCPhys Where to store the GC physical address of the page.
316 * This is page aligned!
317 */
318PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
319{
320#if PGM_GST_TYPE == PGM_TYPE_REAL \
321 || PGM_GST_TYPE == PGM_TYPE_PROT
322 /*
323 * Fake it.
324 */
325 if (pfFlags)
326 *pfFlags = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
327 if (pGCPhys)
328 *pGCPhys = GCPtr & PAGE_BASE_GC_MASK;
329 NOREF(pVCpu);
330 return VINF_SUCCESS;
331
332#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
333 || PGM_GST_TYPE == PGM_TYPE_PAE \
334 || PGM_GST_TYPE == PGM_TYPE_AMD64
335
336 GSTPTWALK Walk;
337 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk);
338 if (RT_FAILURE(rc))
339 return rc;
340
341 if (pGCPhys)
342 *pGCPhys = Walk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
343
344 if (pfFlags)
345 {
346 if (!Walk.Core.fBigPage)
347 *pfFlags = (Walk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */
348 | (Walk.Core.fEffectiveRW ? X86_PTE_RW : 0)
349 | (Walk.Core.fEffectiveUS ? X86_PTE_US : 0)
350# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
351 | (Walk.Core.fEffectiveNX ? X86_PTE_PAE_NX : 0)
352# endif
353 ;
354 else
355 {
356 *pfFlags = (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */
357 | ((Walk.Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT)
358 | (Walk.Core.fEffectiveRW ? X86_PTE_RW : 0)
359 | (Walk.Core.fEffectiveUS ? X86_PTE_US : 0)
360# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
361 | (Walk.Core.fEffectiveNX ? X86_PTE_PAE_NX : 0)
362# endif
363 ;
364 }
365 }
366
367 return VINF_SUCCESS;
368
369#else
370# error "shouldn't be here!"
371 /* something else... */
372 return VERR_NOT_SUPPORTED;
373#endif
374}
375
376
377/**
378 * Modify page flags for a range of pages in the guest's tables
379 *
380 * The existing flags are ANDed with the fMask and ORed with the fFlags.
381 *
382 * @returns VBox status code.
383 * @param pVCpu The cross context virtual CPU structure.
384 * @param GCPtr Virtual address of the first page in the range. Page aligned!
385 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
386 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
387 * @param fMask The AND mask - page flags X86_PTE_*.
388 */
389PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
390{
391 Assert((cb & PAGE_OFFSET_MASK) == 0); RT_NOREF_PV(cb);
392
393#if PGM_GST_TYPE == PGM_TYPE_32BIT \
394 || PGM_GST_TYPE == PGM_TYPE_PAE \
395 || PGM_GST_TYPE == PGM_TYPE_AMD64
396 for (;;)
397 {
398 GSTPTWALK Walk;
399 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk);
400 if (RT_FAILURE(rc))
401 return rc;
402
403 if (!Walk.Core.fBigPage)
404 {
405 /*
406 * 4KB Page table, process
407 *
408 * Walk pages till we're done.
409 */
410 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
411 while (iPTE < RT_ELEMENTS(Walk.pPt->a))
412 {
413 GSTPTE Pte = Walk.pPt->a[iPTE];
414 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
415 | (fFlags & ~GST_PTE_PG_MASK);
416 Walk.pPt->a[iPTE] = Pte;
417
418 /* next page */
419 cb -= PAGE_SIZE;
420 if (!cb)
421 return VINF_SUCCESS;
422 GCPtr += PAGE_SIZE;
423 iPTE++;
424 }
425 }
426 else
427 {
428 /*
429 * 2/4MB Page table
430 */
431 GSTPDE PdeNew;
432# if PGM_GST_TYPE == PGM_TYPE_32BIT
433 PdeNew.u = (Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
434# else
435 PdeNew.u = (Walk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
436# endif
437 | (fFlags & ~GST_PTE_PG_MASK)
438 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
439 *Walk.pPde = PdeNew;
440
441 /* advance */
442 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
443 if (cbDone >= cb)
444 return VINF_SUCCESS;
445 cb -= cbDone;
446 GCPtr += cbDone;
447 }
448 }
449
450#else
451 /* real / protected mode: ignore. */
452 NOREF(pVCpu); NOREF(GCPtr); NOREF(fFlags); NOREF(fMask);
453 return VINF_SUCCESS;
454#endif
455}
456
457
458/**
459 * Retrieve guest PDE information.
460 *
461 * @returns VBox status code.
462 * @param pVCpu The cross context virtual CPU structure.
463 * @param GCPtr Guest context pointer.
464 * @param pPDE Pointer to guest PDE structure.
465 */
466PGM_GST_DECL(int, GetPDE)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPDE)
467{
468#if PGM_GST_TYPE == PGM_TYPE_32BIT \
469 || PGM_GST_TYPE == PGM_TYPE_PAE \
470 || PGM_GST_TYPE == PGM_TYPE_AMD64
471
472# if PGM_GST_TYPE != PGM_TYPE_AMD64
473 /* Boundary check. */
474 if (RT_UNLIKELY(GCPtr >= _4G))
475 return VERR_PAGE_TABLE_NOT_PRESENT;
476# endif
477
478# if PGM_GST_TYPE == PGM_TYPE_32BIT
479 unsigned iPd = (GCPtr >> GST_PD_SHIFT) & GST_PD_MASK;
480 PX86PD pPd = pgmGstGet32bitPDPtr(pVCpu);
481
482# elif PGM_GST_TYPE == PGM_TYPE_PAE
483 unsigned iPd = 0; /* shut up gcc */
484 PCX86PDPAE pPd = pgmGstGetPaePDPtr(pVCpu, GCPtr, &iPd, NULL);
485
486# elif PGM_GST_TYPE == PGM_TYPE_AMD64
487 PX86PML4E pPml4eIgn;
488 X86PDPE PdpeIgn;
489 unsigned iPd = 0; /* shut up gcc */
490 PCX86PDPAE pPd = pgmGstGetLongModePDPtr(pVCpu, GCPtr, &pPml4eIgn, &PdpeIgn, &iPd);
491 /* Note! We do not return an effective PDE here like we do for the PTE in GetPage method. */
492# endif
493
494 if (RT_LIKELY(pPd))
495 pPDE->u = (X86PGPAEUINT)pPd->a[iPd].u;
496 else
497 pPDE->u = 0;
498 return VINF_SUCCESS;
499
500#else
501 NOREF(pVCpu); NOREF(GCPtr); NOREF(pPDE);
502 AssertFailed();
503 return VERR_NOT_IMPLEMENTED;
504#endif
505}
506
507
508#ifdef IN_RING3
509/**
510 * Relocate any GC pointers related to guest mode paging.
511 *
512 * @returns VBox status code.
513 * @param pVCpu The cross context virtual CPU structure.
514 * @param offDelta The relocation offset.
515 */
516PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta)
517{
518 RT_NOREF(pVCpu, offDelta);
519 return VINF_SUCCESS;
520}
521#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette