VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllShw.h@ 73299

Last change on this file since 73299 was 73273, checked in by vboxsync, 6 years ago

PGM: Cleanups. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 23.1 KB
Line 
1/* $Id: PGMAllShw.h 73273 2018-07-20 14:58:52Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Shadow Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22#undef SHWPT
23#undef PSHWPT
24#undef SHWPTE
25#undef PSHWPTE
26#undef SHWPD
27#undef PSHWPD
28#undef SHWPDE
29#undef PSHWPDE
30#undef SHW_PDE_PG_MASK
31#undef SHW_PD_SHIFT
32#undef SHW_PD_MASK
33#undef SHW_PTE_PG_MASK
34#undef SHW_PTE_IS_P
35#undef SHW_PTE_IS_RW
36#undef SHW_PTE_IS_US
37#undef SHW_PTE_IS_A
38#undef SHW_PTE_IS_D
39#undef SHW_PTE_IS_P_RW
40#undef SHW_PTE_IS_TRACK_DIRTY
41#undef SHW_PTE_GET_HCPHYS
42#undef SHW_PTE_GET_U
43#undef SHW_PTE_LOG64
44#undef SHW_PTE_SET
45#undef SHW_PTE_ATOMIC_SET
46#undef SHW_PTE_ATOMIC_SET2
47#undef SHW_PTE_SET_RO
48#undef SHW_PTE_SET_RW
49#undef SHW_PT_SHIFT
50#undef SHW_PT_MASK
51#undef SHW_TOTAL_PD_ENTRIES
52#undef SHW_PDPT_SHIFT
53#undef SHW_PDPT_MASK
54#undef SHW_PDPE_PG_MASK
55
56#if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT
57# define SHWPT X86PT
58# define PSHWPT PX86PT
59# define SHWPTE X86PTE
60# define PSHWPTE PX86PTE
61# define SHWPD X86PD
62# define PSHWPD PX86PD
63# define SHWPDE X86PDE
64# define PSHWPDE PX86PDE
65# define SHW_PDE_PG_MASK X86_PDE_PG_MASK
66# define SHW_PD_SHIFT X86_PD_SHIFT
67# define SHW_PD_MASK X86_PD_MASK
68# define SHW_TOTAL_PD_ENTRIES X86_PG_ENTRIES
69# define SHW_PTE_PG_MASK X86_PTE_PG_MASK
70# define SHW_PTE_IS_P(Pte) ( (Pte).n.u1Present )
71# define SHW_PTE_IS_RW(Pte) ( (Pte).n.u1Write )
72# define SHW_PTE_IS_US(Pte) ( (Pte).n.u1User )
73# define SHW_PTE_IS_A(Pte) ( (Pte).n.u1Accessed )
74# define SHW_PTE_IS_D(Pte) ( (Pte).n.u1Dirty )
75# define SHW_PTE_IS_P_RW(Pte) ( (Pte).n.u1Present && (Pte).n.u1Write )
76# define SHW_PTE_IS_TRACK_DIRTY(Pte) ( !!((Pte).u & PGM_PTFLAGS_TRACK_DIRTY) )
77# define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PG_MASK )
78# define SHW_PTE_LOG64(Pte) ( (uint64_t)(Pte).u )
79# define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
80# define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
81# define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU32(&(Pte).u, (uNew)); } while (0)
82# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU32(&(Pte).u, (Pte2).u); } while (0)
83# define SHW_PTE_SET_RO(Pte) do { (Pte).n.u1Write = 0; } while (0)
84# define SHW_PTE_SET_RW(Pte) do { (Pte).n.u1Write = 1; } while (0)
85# define SHW_PT_SHIFT X86_PT_SHIFT
86# define SHW_PT_MASK X86_PT_MASK
87
88#elif PGM_SHW_TYPE == PGM_TYPE_EPT
89# define SHWPT EPTPT
90# define PSHWPT PEPTPT
91# define SHWPTE EPTPTE
92# define PSHWPTE PEPTPTE
93# define SHWPD EPTPD
94# define PSHWPD PEPTPD
95# define SHWPDE EPTPDE
96# define PSHWPDE PEPTPDE
97# define SHW_PDE_PG_MASK EPT_PDE_PG_MASK
98# define SHW_PD_SHIFT EPT_PD_SHIFT
99# define SHW_PD_MASK EPT_PD_MASK
100# define SHW_PTE_PG_MASK EPT_PTE_PG_MASK
101# define SHW_PTE_IS_P(Pte) ( (Pte).n.u1Present ) /* Approximation, works for us. */
102# define SHW_PTE_IS_RW(Pte) ( (Pte).n.u1Write )
103# define SHW_PTE_IS_US(Pte) ( true )
104# define SHW_PTE_IS_A(Pte) ( true )
105# define SHW_PTE_IS_D(Pte) ( true )
106# define SHW_PTE_IS_P_RW(Pte) ( (Pte).n.u1Present && (Pte).n.u1Write )
107# define SHW_PTE_IS_TRACK_DIRTY(Pte) ( false )
108# define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PG_MASK )
109# define SHW_PTE_LOG64(Pte) ( (Pte).u )
110# define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
111# define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
112# define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU64(&(Pte).u, (uNew)); } while (0)
113# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).u, (Pte2).u); } while (0)
114# define SHW_PTE_SET_RO(Pte) do { (Pte).n.u1Write = 0; } while (0)
115# define SHW_PTE_SET_RW(Pte) do { (Pte).n.u1Write = 1; } while (0)
116# define SHW_PT_SHIFT EPT_PT_SHIFT
117# define SHW_PT_MASK EPT_PT_MASK
118# define SHW_PDPT_SHIFT EPT_PDPT_SHIFT
119# define SHW_PDPT_MASK EPT_PDPT_MASK
120# define SHW_PDPE_PG_MASK EPT_PDPE_PG_MASK
121# define SHW_TOTAL_PD_ENTRIES (EPT_PG_AMD64_ENTRIES*EPT_PG_AMD64_PDPE_ENTRIES)
122
123#else
124# define SHWPT PGMSHWPTPAE
125# define PSHWPT PPGMSHWPTPAE
126# define SHWPTE PGMSHWPTEPAE
127# define PSHWPTE PPGMSHWPTEPAE
128# define SHWPD X86PDPAE
129# define PSHWPD PX86PDPAE
130# define SHWPDE X86PDEPAE
131# define PSHWPDE PX86PDEPAE
132# define SHW_PDE_PG_MASK X86_PDE_PAE_PG_MASK
133# define SHW_PD_SHIFT X86_PD_PAE_SHIFT
134# define SHW_PD_MASK X86_PD_PAE_MASK
135# define SHW_PTE_PG_MASK X86_PTE_PAE_PG_MASK
136# define SHW_PTE_IS_P(Pte) PGMSHWPTEPAE_IS_P(Pte)
137# define SHW_PTE_IS_RW(Pte) PGMSHWPTEPAE_IS_RW(Pte)
138# define SHW_PTE_IS_US(Pte) PGMSHWPTEPAE_IS_US(Pte)
139# define SHW_PTE_IS_A(Pte) PGMSHWPTEPAE_IS_A(Pte)
140# define SHW_PTE_IS_D(Pte) PGMSHWPTEPAE_IS_D(Pte)
141# define SHW_PTE_IS_P_RW(Pte) PGMSHWPTEPAE_IS_P_RW(Pte)
142# define SHW_PTE_IS_TRACK_DIRTY(Pte) PGMSHWPTEPAE_IS_TRACK_DIRTY(Pte)
143# define SHW_PTE_GET_HCPHYS(Pte) PGMSHWPTEPAE_GET_HCPHYS(Pte)
144# define SHW_PTE_LOG64(Pte) PGMSHWPTEPAE_GET_LOG(Pte)
145# define SHW_PTE_GET_U(Pte) PGMSHWPTEPAE_GET_U(Pte) /**< Use with care. */
146# define SHW_PTE_SET(Pte, uNew) PGMSHWPTEPAE_SET(Pte, uNew)
147# define SHW_PTE_ATOMIC_SET(Pte, uNew) PGMSHWPTEPAE_ATOMIC_SET(Pte, uNew)
148# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2)
149# define SHW_PTE_SET_RO(Pte) PGMSHWPTEPAE_SET_RO(Pte)
150# define SHW_PTE_SET_RW(Pte) PGMSHWPTEPAE_SET_RW(Pte)
151# define SHW_PT_SHIFT X86_PT_PAE_SHIFT
152# define SHW_PT_MASK X86_PT_PAE_MASK
153
154# if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
155# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
156# define SHW_PDPT_MASK X86_PDPT_MASK_AMD64
157# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
158# define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
159
160# elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
161# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
162# define SHW_PDPT_MASK X86_PDPT_MASK_PAE
163# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
164# define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
165
166# else
167# error "Misconfigured PGM_SHW_TYPE or something..."
168# endif
169#endif
170
171
172
173/*********************************************************************************************************************************
174* Internal Functions *
175*********************************************************************************************************************************/
176RT_C_DECLS_BEGIN
177PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
178PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags);
179PGM_SHW_DECL(int, Enter)(PVMCPU pVCpu, bool fIs64BitsPagingMode);
180PGM_SHW_DECL(int, Exit)(PVMCPU pVCpu);
181#ifdef IN_RING3
182PGM_SHW_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta);
183#endif
184RT_C_DECLS_END
185
186
187/**
188 * Enters the shadow mode.
189 *
190 * @returns VBox status code.
191 * @param pVCpu The cross context virtual CPU structure.
192 * @param fIs64BitsPagingMode New shadow paging mode is for 64 bits? (only relevant for 64 bits guests on a 32 bits AMD-V nested paging host)
193 */
194PGM_SHW_DECL(int, Enter)(PVMCPU pVCpu, bool fIs64BitsPagingMode)
195{
196#if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
197
198# if PGM_TYPE_IS_NESTED(PGM_SHW_TYPE) && HC_ARCH_BITS == 32
199 /* Must distinguish between 32 and 64 bits guest paging modes as we'll use
200 a different shadow paging root/mode in both cases. */
201 RTGCPHYS GCPhysCR3 = (fIs64BitsPagingMode) ? RT_BIT_64(63) : RT_BIT_64(62);
202# else
203 RTGCPHYS GCPhysCR3 = RT_BIT_64(63); NOREF(fIs64BitsPagingMode);
204# endif
205 PPGMPOOLPAGE pNewShwPageCR3;
206 PVM pVM = pVCpu->CTX_SUFF(pVM);
207
208 Assert((HMIsNestedPagingActive(pVM) || VM_IS_NEM_ENABLED(pVM)) == pVM->pgm.s.fNestedPaging);
209 Assert(pVM->pgm.s.fNestedPaging);
210 Assert(!pVCpu->pgm.s.pShwPageCR3R3);
211
212 pgmLock(pVM);
213
214 int rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_ROOT_NESTED, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
215 NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/,
216 &pNewShwPageCR3);
217 AssertLogRelRCReturnStmt(rc, pgmUnlock(pVM), rc);
218
219 pVCpu->pgm.s.pShwPageCR3R3 = (R3PTRTYPE(PPGMPOOLPAGE))MMHyperCCToR3(pVM, pNewShwPageCR3);
220 pVCpu->pgm.s.pShwPageCR3RC = (RCPTRTYPE(PPGMPOOLPAGE))MMHyperCCToRC(pVM, pNewShwPageCR3);
221 pVCpu->pgm.s.pShwPageCR3R0 = (R0PTRTYPE(PPGMPOOLPAGE))MMHyperCCToR0(pVM, pNewShwPageCR3);
222
223 pgmUnlock(pVM);
224
225 Log(("Enter nested shadow paging mode: root %RHv phys %RHp\n", pVCpu->pgm.s.pShwPageCR3R3, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key));
226#else
227 NOREF(pVCpu); NOREF(fIs64BitsPagingMode);
228#endif
229 return VINF_SUCCESS;
230}
231
232
233/**
234 * Exits the shadow mode.
235 *
236 * @returns VBox status code.
237 * @param pVCpu The cross context virtual CPU structure.
238 */
239PGM_SHW_DECL(int, Exit)(PVMCPU pVCpu)
240{
241#if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
242 PVM pVM = pVCpu->CTX_SUFF(pVM);
243 if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
244 {
245 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
246
247 pgmLock(pVM);
248
249 /* Do *not* unlock this page as we have two of them floating around in the 32-bit host & 64-bit guest case.
250 * We currently assert when you try to free one of them; don't bother to really allow this.
251 *
252 * Note that this is two nested paging root pages max. This isn't a leak. They are reused.
253 */
254 /* pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); */
255
256 pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), NIL_PGMPOOL_IDX, UINT32_MAX);
257 pVCpu->pgm.s.pShwPageCR3R3 = 0;
258 pVCpu->pgm.s.pShwPageCR3R0 = 0;
259 pVCpu->pgm.s.pShwPageCR3RC = 0;
260
261 pgmUnlock(pVM);
262
263 Log(("Leave nested shadow paging mode\n"));
264 }
265#else
266 RT_NOREF_PV(pVCpu);
267#endif
268 return VINF_SUCCESS;
269}
270
271
272/**
273 * Gets effective page information (from the VMM page directory).
274 *
275 * @returns VBox status code.
276 * @param pVCpu The cross context virtual CPU structure.
277 * @param GCPtr Guest Context virtual address of the page.
278 * @param pfFlags Where to store the flags. These are X86_PTE_*.
279 * @param pHCPhys Where to store the HC physical address of the page.
280 * This is page aligned.
281 * @remark You should use PGMMapGetPage() for pages in a mapping.
282 */
283PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
284{
285 PVM pVM = pVCpu->CTX_SUFF(pVM);
286
287 PGM_LOCK_ASSERT_OWNER(pVM);
288
289 /*
290 * Get the PDE.
291 */
292#if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
293 X86PDEPAE Pde;
294
295 /* PML4 */
296 X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
297 if (!Pml4e.n.u1Present)
298 return VERR_PAGE_TABLE_NOT_PRESENT;
299
300 /* PDPT */
301 PX86PDPT pPDPT;
302 int rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
303 if (RT_FAILURE(rc))
304 return rc;
305 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
306 X86PDPE Pdpe = pPDPT->a[iPDPT];
307 if (!Pdpe.n.u1Present)
308 return VERR_PAGE_TABLE_NOT_PRESENT;
309
310 /* PD */
311 PX86PDPAE pPd;
312 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
313 if (RT_FAILURE(rc))
314 return rc;
315 const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
316 Pde = pPd->a[iPd];
317
318 /* Merge accessed, write, user and no-execute bits into the PDE. */
319 Pde.n.u1Accessed &= Pml4e.n.u1Accessed & Pdpe.lm.u1Accessed;
320 Pde.n.u1Write &= Pml4e.n.u1Write & Pdpe.lm.u1Write;
321 Pde.n.u1User &= Pml4e.n.u1User & Pdpe.lm.u1User;
322 Pde.n.u1NoExecute |= Pml4e.n.u1NoExecute | Pdpe.lm.u1NoExecute;
323
324#elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
325 X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
326
327#elif PGM_SHW_TYPE == PGM_TYPE_EPT
328 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
329 PEPTPD pPDDst;
330 EPTPDE Pde;
331
332 int rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
333 if (rc != VINF_SUCCESS) /** @todo this function isn't expected to return informational status codes. Check callers / fix. */
334 {
335 AssertRC(rc);
336 return rc;
337 }
338 Assert(pPDDst);
339 Pde = pPDDst->a[iPd];
340
341#elif PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT
342 X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
343#else
344# error "Misconfigured PGM_SHW_TYPE or something..."
345#endif
346 if (!Pde.n.u1Present)
347 return VERR_PAGE_TABLE_NOT_PRESENT;
348
349 /* Deal with large pages. */
350 if (Pde.b.u1Size)
351 {
352 /*
353 * Store the results.
354 * RW and US flags depend on the entire page translation hierarchy - except for
355 * legacy PAE which has a simplified PDPE.
356 */
357 if (pfFlags)
358 {
359 *pfFlags = (Pde.u & ~SHW_PDE_PG_MASK);
360#if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
361 if ( (Pde.u & X86_PTE_PAE_NX)
362# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE)
363 && CPUMIsGuestNXEnabled(pVCpu) /** @todo why do we have to check the guest state here? */
364# endif
365 )
366 *pfFlags |= X86_PTE_PAE_NX;
367#endif
368 }
369
370 if (pHCPhys)
371 *pHCPhys = (Pde.u & SHW_PDE_PG_MASK) + (GCPtr & (RT_BIT(SHW_PD_SHIFT) - 1) & X86_PAGE_4K_BASE_MASK);
372
373 return VINF_SUCCESS;
374 }
375
376 /*
377 * Get PT entry.
378 */
379 PSHWPT pPT;
380 if (!(Pde.u & PGM_PDFLAGS_MAPPING))
381 {
382 int rc2 = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
383 if (RT_FAILURE(rc2))
384 return rc2;
385 }
386 else /* mapping: */
387 {
388#if PGM_SHW_TYPE == PGM_TYPE_AMD64 \
389 || PGM_SHW_TYPE == PGM_TYPE_EPT \
390 || defined(PGM_WITHOUT_MAPPINGS)
391 AssertFailed(); /* can't happen */
392 pPT = NULL; /* shut up MSC */
393#else
394 Assert(pgmMapAreMappingsEnabled(pVM));
395
396 PPGMMAPPING pMap = pgmGetMapping(pVM, (RTGCPTR)GCPtr);
397 AssertMsgReturn(pMap, ("GCPtr=%RGv\n", GCPtr), VERR_PGM_MAPPING_IPE);
398# if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT
399 pPT = pMap->aPTs[(GCPtr - pMap->GCPtr) >> X86_PD_SHIFT].CTX_SUFF(pPT);
400# else /* PAE */
401 pPT = pMap->aPTs[(GCPtr - pMap->GCPtr) >> X86_PD_SHIFT].CTX_SUFF(paPaePTs);
402# endif
403#endif
404 }
405 const unsigned iPt = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
406 SHWPTE Pte = pPT->a[iPt];
407 if (!SHW_PTE_IS_P(Pte))
408 return VERR_PAGE_NOT_PRESENT;
409
410 /*
411 * Store the results.
412 * RW and US flags depend on the entire page translation hierarchy - except for
413 * legacy PAE which has a simplified PDPE.
414 */
415 if (pfFlags)
416 {
417 *pfFlags = (SHW_PTE_GET_U(Pte) & ~SHW_PTE_PG_MASK)
418 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
419
420#if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
421 /* The NX bit is determined by a bitwise OR between the PT and PD */
422 if ( ((SHW_PTE_GET_U(Pte) | Pde.u) & X86_PTE_PAE_NX)
423# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE)
424 && CPUMIsGuestNXEnabled(pVCpu) /** @todo why do we have to check the guest state here? */
425# endif
426 )
427 *pfFlags |= X86_PTE_PAE_NX;
428#endif
429 }
430
431 if (pHCPhys)
432 *pHCPhys = SHW_PTE_GET_HCPHYS(Pte);
433
434 return VINF_SUCCESS;
435}
436
437
438/**
439 * Modify page flags for a range of pages in the shadow context.
440 *
441 * The existing flags are ANDed with the fMask and ORed with the fFlags.
442 *
443 * @returns VBox status code.
444 * @param pVCpu The cross context virtual CPU structure.
445 * @param GCPtr Virtual address of the first page in the range. Page aligned!
446 * @param cb Size (in bytes) of the range to apply the modification to. Page aligned!
447 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
448 * @param fMask The AND mask - page flags X86_PTE_*.
449 * Be extremely CAREFUL with ~'ing values because they can be 32-bit!
450 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
451 * @remark You must use PGMMapModifyPage() for pages in a mapping.
452 */
453PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
454{
455 PVM pVM = pVCpu->CTX_SUFF(pVM);
456 int rc;
457
458 PGM_LOCK_ASSERT_OWNER(pVM);
459
460 /*
461 * Walk page tables and pages till we're done.
462 */
463 for (;;)
464 {
465 /*
466 * Get the PDE.
467 */
468#if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
469 X86PDEPAE Pde;
470 /* PML4 */
471 X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
472 if (!Pml4e.n.u1Present)
473 return VERR_PAGE_TABLE_NOT_PRESENT;
474
475 /* PDPT */
476 PX86PDPT pPDPT;
477 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
478 if (RT_FAILURE(rc))
479 return rc;
480 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
481 X86PDPE Pdpe = pPDPT->a[iPDPT];
482 if (!Pdpe.n.u1Present)
483 return VERR_PAGE_TABLE_NOT_PRESENT;
484
485 /* PD */
486 PX86PDPAE pPd;
487 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
488 if (RT_FAILURE(rc))
489 return rc;
490 const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
491 Pde = pPd->a[iPd];
492
493#elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
494 X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
495
496#elif PGM_SHW_TYPE == PGM_TYPE_EPT
497 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
498 PEPTPD pPDDst;
499 EPTPDE Pde;
500
501 rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
502 if (rc != VINF_SUCCESS)
503 {
504 AssertRC(rc);
505 return rc;
506 }
507 Assert(pPDDst);
508 Pde = pPDDst->a[iPd];
509
510#else /* PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT */
511 X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
512#endif
513 if (!Pde.n.u1Present)
514 return VERR_PAGE_TABLE_NOT_PRESENT;
515
516 AssertFatal(!Pde.b.u1Size);
517
518 /*
519 * Map the page table.
520 */
521 PSHWPT pPT;
522 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
523 if (RT_FAILURE(rc))
524 return rc;
525
526 unsigned iPTE = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
527 while (iPTE < RT_ELEMENTS(pPT->a))
528 {
529 if (SHW_PTE_IS_P(pPT->a[iPTE]))
530 {
531 SHWPTE const OrgPte = pPT->a[iPTE];
532 SHWPTE NewPte;
533
534 SHW_PTE_SET(NewPte, (SHW_PTE_GET_U(OrgPte) & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK));
535 if (!SHW_PTE_IS_P(NewPte))
536 {
537 /** @todo Some CSAM code path might end up here and upset
538 * the page pool. */
539 AssertFailed();
540 }
541 else if ( SHW_PTE_IS_RW(NewPte)
542 && !SHW_PTE_IS_RW(OrgPte)
543 && !(fOpFlags & PGM_MK_PG_IS_MMIO2) )
544 {
545 /** @todo Optimize \#PF handling by caching data. We can
546 * then use this when PGM_MK_PG_IS_WRITE_FAULT is
547 * set instead of resolving the guest physical
548 * address yet again. */
549 RTGCPHYS GCPhys;
550 uint64_t fGstPte;
551 rc = PGMGstGetPage(pVCpu, GCPtr, &fGstPte, &GCPhys);
552 AssertRC(rc);
553 if (RT_SUCCESS(rc))
554 {
555 Assert((fGstPte & X86_PTE_RW) || !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP /* allow netware hack */));
556 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
557 Assert(pPage);
558 if (pPage)
559 {
560 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
561 AssertRCReturn(rc, rc);
562 Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GCPhys, pPage));
563 }
564 }
565 }
566
567 SHW_PTE_ATOMIC_SET2(pPT->a[iPTE], NewPte);
568#if PGM_SHW_TYPE == PGM_TYPE_EPT
569 HMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr);
570#else
571 PGM_INVL_PG_ALL_VCPU(pVM, GCPtr);
572#endif
573 }
574
575 /* next page */
576 cb -= PAGE_SIZE;
577 if (!cb)
578 return VINF_SUCCESS;
579 GCPtr += PAGE_SIZE;
580 iPTE++;
581 }
582 }
583}
584
585
586#ifdef IN_RING3
587/**
588 * Relocate any GC pointers related to shadow mode paging.
589 *
590 * @returns VBox status code.
591 * @param pVCpu The cross context virtual CPU structure.
592 * @param offDelta The relocation offset.
593 */
594PGM_SHW_DECL(int, Relocate)(PVMCPU pVCpu, RTGCPTR offDelta)
595{
596 pVCpu->pgm.s.pShwPageCR3RC += offDelta;
597 return VINF_SUCCESS;
598}
599#endif
600
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette