VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllShw.h@ 34147

Last change on this file since 34147 was 32036, checked in by vboxsync, 14 years ago

Removed X86_PTE_PAE_PG_MASK, renamed X86_PTE_PAE_PG_MASK_FULL to X86_PTE_PAE_PG_MASK.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 19.0 KB
Line 
1/* $Id: PGMAllShw.h 32036 2010-08-27 10:14:39Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Shadow Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Defined Constants And Macros *
20*******************************************************************************/
21#undef SHWPT
22#undef PSHWPT
23#undef SHWPTE
24#undef PSHWPTE
25#undef SHWPD
26#undef PSHWPD
27#undef SHWPDE
28#undef PSHWPDE
29#undef SHW_PDE_PG_MASK
30#undef SHW_PD_SHIFT
31#undef SHW_PD_MASK
32#undef SHW_PTE_PG_MASK
33#undef SHW_PTE_IS_P
34#undef SHW_PTE_IS_RW
35#undef SHW_PTE_IS_US
36#undef SHW_PTE_IS_A
37#undef SHW_PTE_IS_D
38#undef SHW_PTE_IS_P_RW
39#undef SHW_PTE_IS_TRACK_DIRTY
40#undef SHW_PTE_GET_HCPHYS
41#undef SHW_PTE_GET_U
42#undef SHW_PTE_LOG64
43#undef SHW_PTE_SET
44#undef SHW_PTE_ATOMIC_SET
45#undef SHW_PTE_ATOMIC_SET2
46#undef SHW_PTE_SET_RO
47#undef SHW_PTE_SET_RW
48#undef SHW_PT_SHIFT
49#undef SHW_PT_MASK
50#undef SHW_TOTAL_PD_ENTRIES
51#undef SHW_PDPT_SHIFT
52#undef SHW_PDPT_MASK
53#undef SHW_PDPE_PG_MASK
54#undef SHW_POOL_ROOT_IDX
55
56#if PGM_SHW_TYPE == PGM_TYPE_32BIT
57# define SHWPT X86PT
58# define PSHWPT PX86PT
59# define SHWPTE X86PTE
60# define PSHWPTE PX86PTE
61# define SHWPD X86PD
62# define PSHWPD PX86PD
63# define SHWPDE X86PDE
64# define PSHWPDE PX86PDE
65# define SHW_PDE_PG_MASK X86_PDE_PG_MASK
66# define SHW_PD_SHIFT X86_PD_SHIFT
67# define SHW_PD_MASK X86_PD_MASK
68# define SHW_TOTAL_PD_ENTRIES X86_PG_ENTRIES
69# define SHW_PTE_PG_MASK X86_PTE_PG_MASK
70# define SHW_PTE_IS_P(Pte) ( (Pte).n.u1Present )
71# define SHW_PTE_IS_RW(Pte) ( (Pte).n.u1Write )
72# define SHW_PTE_IS_US(Pte) ( (Pte).n.u1User )
73# define SHW_PTE_IS_A(Pte) ( (Pte).n.u1Accessed )
74# define SHW_PTE_IS_D(Pte) ( (Pte).n.u1Dirty )
75# define SHW_PTE_IS_P_RW(Pte) ( (Pte).n.u1Present && (Pte).n.u1Write )
76# define SHW_PTE_IS_TRACK_DIRTY(Pte) ( !!((Pte).u & PGM_PTFLAGS_TRACK_DIRTY) )
77# define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PG_MASK )
78# define SHW_PTE_LOG64(Pte) ( (uint64_t)(Pte).u )
79# define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
80# define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
81# define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU32(&(Pte).u, (uNew)); } while (0)
82# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU32(&(Pte).u, (Pte2).u); } while (0)
83# define SHW_PTE_SET_RO(Pte) do { (Pte).n.u1Write = 0; } while (0)
84# define SHW_PTE_SET_RW(Pte) do { (Pte).n.u1Write = 1; } while (0)
85# define SHW_PT_SHIFT X86_PT_SHIFT
86# define SHW_PT_MASK X86_PT_MASK
87# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PD
88
89#elif PGM_SHW_TYPE == PGM_TYPE_EPT
90# define SHWPT EPTPT
91# define PSHWPT PEPTPT
92# define SHWPTE EPTPTE
93# define PSHWPTE PEPTPTE
94# define SHWPD EPTPD
95# define PSHWPD PEPTPD
96# define SHWPDE EPTPDE
97# define PSHWPDE PEPTPDE
98# define SHW_PDE_PG_MASK EPT_PDE_PG_MASK
99# define SHW_PD_SHIFT EPT_PD_SHIFT
100# define SHW_PD_MASK EPT_PD_MASK
101# define SHW_PTE_PG_MASK EPT_PTE_PG_MASK
102# define SHW_PTE_IS_P(Pte) ( (Pte).n.u1Present ) /* Approximation, works for us. */
103# define SHW_PTE_IS_RW(Pte) ( (Pte).n.u1Write )
104# define SHW_PTE_IS_US(Pte) ( true )
105# define SHW_PTE_IS_A(Pte) ( true )
106# define SHW_PTE_IS_D(Pte) ( true )
107# define SHW_PTE_IS_P_RW(Pte) ( (Pte).n.u1Present && (Pte).n.u1Write )
108# define SHW_PTE_IS_TRACK_DIRTY(Pte) ( false )
109# define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PG_MASK )
110# define SHW_PTE_LOG64(Pte) ( (Pte).u )
111# define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
112# define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
113# define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU64(&(Pte).u, (uNew)); } while (0)
114# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).u, (Pte2).u); } while (0)
115# define SHW_PTE_SET_RO(Pte) do { (Pte).n.u1Write = 0; } while (0)
116# define SHW_PTE_SET_RW(Pte) do { (Pte).n.u1Write = 1; } while (0)
117# define SHW_PT_SHIFT EPT_PT_SHIFT
118# define SHW_PT_MASK EPT_PT_MASK
119# define SHW_PDPT_SHIFT EPT_PDPT_SHIFT
120# define SHW_PDPT_MASK EPT_PDPT_MASK
121# define SHW_PDPE_PG_MASK EPT_PDPE_PG_MASK
122# define SHW_TOTAL_PD_ENTRIES (EPT_PG_AMD64_ENTRIES*EPT_PG_AMD64_PDPE_ENTRIES)
123# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_NESTED_ROOT /* do not use! exception is real mode & protected mode without paging. */
124
125#else
126# define SHWPT PGMSHWPTPAE
127# define PSHWPT PPGMSHWPTPAE
128# define SHWPTE PGMSHWPTEPAE
129# define PSHWPTE PPGMSHWPTEPAE
130# define SHWPD X86PDPAE
131# define PSHWPD PX86PDPAE
132# define SHWPDE X86PDEPAE
133# define PSHWPDE PX86PDEPAE
134# define SHW_PDE_PG_MASK X86_PDE_PAE_PG_MASK
135# define SHW_PD_SHIFT X86_PD_PAE_SHIFT
136# define SHW_PD_MASK X86_PD_PAE_MASK
137# define SHW_PTE_PG_MASK X86_PTE_PAE_PG_MASK
138# define SHW_PTE_IS_P(Pte) PGMSHWPTEPAE_IS_P(Pte)
139# define SHW_PTE_IS_RW(Pte) PGMSHWPTEPAE_IS_RW(Pte)
140# define SHW_PTE_IS_US(Pte) PGMSHWPTEPAE_IS_US(Pte)
141# define SHW_PTE_IS_A(Pte) PGMSHWPTEPAE_IS_A(Pte)
142# define SHW_PTE_IS_D(Pte) PGMSHWPTEPAE_IS_D(Pte)
143# define SHW_PTE_IS_P_RW(Pte) PGMSHWPTEPAE_IS_P_RW(Pte)
144# define SHW_PTE_IS_TRACK_DIRTY(Pte) PGMSHWPTEPAE_IS_TRACK_DIRTY(Pte)
145# define SHW_PTE_GET_HCPHYS(Pte) PGMSHWPTEPAE_GET_HCPHYS(Pte)
146# define SHW_PTE_LOG64(Pte) PGMSHWPTEPAE_GET_LOG(Pte)
147# define SHW_PTE_GET_U(Pte) PGMSHWPTEPAE_GET_U(Pte) /**< Use with care. */
148# define SHW_PTE_SET(Pte, uNew) PGMSHWPTEPAE_SET(Pte, uNew)
149# define SHW_PTE_ATOMIC_SET(Pte, uNew) PGMSHWPTEPAE_ATOMIC_SET(Pte, uNew)
150# define SHW_PTE_ATOMIC_SET2(Pte, Pte2) PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2)
151# define SHW_PTE_SET_RO(Pte) PGMSHWPTEPAE_SET_RO(Pte)
152# define SHW_PTE_SET_RW(Pte) PGMSHWPTEPAE_SET_RW(Pte)
153# define SHW_PT_SHIFT X86_PT_PAE_SHIFT
154# define SHW_PT_MASK X86_PT_PAE_MASK
155
156# if PGM_SHW_TYPE == PGM_TYPE_AMD64
157# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
158# define SHW_PDPT_MASK X86_PDPT_MASK_AMD64
159# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
160# define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
161# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_AMD64_CR3
162
163# else /* 32 bits PAE mode */
164# define SHW_PDPT_SHIFT X86_PDPT_SHIFT
165# define SHW_PDPT_MASK X86_PDPT_MASK_PAE
166# define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
167# define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
168# define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PDPT
169
170# endif
171#endif
172
173
174
175/*******************************************************************************
176* Internal Functions *
177*******************************************************************************/
178RT_C_DECLS_BEGIN
179PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
180PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags);
181RT_C_DECLS_END
182
183
184
185/**
186 * Gets effective page information (from the VMM page directory).
187 *
188 * @returns VBox status.
189 * @param pVCpu The VMCPU handle.
190 * @param GCPtr Guest Context virtual address of the page.
191 * @param pfFlags Where to store the flags. These are X86_PTE_*.
192 * @param pHCPhys Where to store the HC physical address of the page.
193 * This is page aligned.
194 * @remark You should use PGMMapGetPage() for pages in a mapping.
195 */
196PGM_SHW_DECL(int, GetPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
197{
198#if PGM_SHW_TYPE == PGM_TYPE_NESTED
199 return VERR_PAGE_TABLE_NOT_PRESENT;
200
201#else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT */
202 PVM pVM = pVCpu->CTX_SUFF(pVM);
203
204 Assert(PGMIsLockOwner(pVM));
205
206 /*
207 * Get the PDE.
208 */
209# if PGM_SHW_TYPE == PGM_TYPE_AMD64
210 X86PDEPAE Pde;
211
212 /* PML4 */
213 X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
214 if (!Pml4e.n.u1Present)
215 return VERR_PAGE_TABLE_NOT_PRESENT;
216
217 /* PDPT */
218 PX86PDPT pPDPT;
219 int rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
220 if (RT_FAILURE(rc))
221 return rc;
222 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
223 X86PDPE Pdpe = pPDPT->a[iPDPT];
224 if (!Pdpe.n.u1Present)
225 return VERR_PAGE_TABLE_NOT_PRESENT;
226
227 /* PD */
228 PX86PDPAE pPd;
229 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
230 if (RT_FAILURE(rc))
231 return rc;
232 const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
233 Pde = pPd->a[iPd];
234
235 /* Merge accessed, write, user and no-execute bits into the PDE. */
236 Pde.n.u1Accessed &= Pml4e.n.u1Accessed & Pdpe.lm.u1Accessed;
237 Pde.n.u1Write &= Pml4e.n.u1Write & Pdpe.lm.u1Write;
238 Pde.n.u1User &= Pml4e.n.u1User & Pdpe.lm.u1User;
239 Pde.n.u1NoExecute |= Pml4e.n.u1NoExecute | Pdpe.lm.u1NoExecute;
240
241# elif PGM_SHW_TYPE == PGM_TYPE_PAE
242 X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
243
244# elif PGM_SHW_TYPE == PGM_TYPE_EPT
245 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
246 PEPTPD pPDDst;
247 EPTPDE Pde;
248
249 int rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
250 if (rc != VINF_SUCCESS) /** @todo this function isn't expected to return informational status codes. Check callers / fix. */
251 {
252 AssertRC(rc);
253 return rc;
254 }
255 Assert(pPDDst);
256 Pde = pPDDst->a[iPd];
257
258# else /* PGM_TYPE_32BIT */
259 X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
260# endif
261 if (!Pde.n.u1Present)
262 return VERR_PAGE_TABLE_NOT_PRESENT;
263
264 /** Deal with large pages. */
265 if (Pde.b.u1Size)
266 {
267 /*
268 * Store the results.
269 * RW and US flags depend on the entire page translation hierarchy - except for
270 * legacy PAE which has a simplified PDPE.
271 */
272 if (pfFlags)
273 {
274 *pfFlags = (Pde.u & ~SHW_PDE_PG_MASK);
275# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) /** @todo why do we have to check the guest state here? */
276 if ((Pde.u & X86_PTE_PAE_NX) && CPUMIsGuestNXEnabled(pVCpu))
277 *pfFlags |= X86_PTE_PAE_NX;
278# endif
279 }
280
281 if (pHCPhys)
282 *pHCPhys = (Pde.u & SHW_PDE_PG_MASK) + (GCPtr & (RT_BIT(SHW_PD_SHIFT) - 1) & X86_PAGE_4K_BASE_MASK);
283
284 return VINF_SUCCESS;
285 }
286
287 /*
288 * Get PT entry.
289 */
290 PSHWPT pPT;
291 if (!(Pde.u & PGM_PDFLAGS_MAPPING))
292 {
293 int rc2 = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
294 if (RT_FAILURE(rc2))
295 return rc2;
296 }
297 else /* mapping: */
298 {
299# if PGM_SHW_TYPE == PGM_TYPE_AMD64 \
300 || PGM_SHW_TYPE == PGM_TYPE_EPT
301 AssertFailed(); /* can't happen */
302# else
303 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
304
305 PPGMMAPPING pMap = pgmGetMapping(pVM, (RTGCPTR)GCPtr);
306 AssertMsgReturn(pMap, ("GCPtr=%RGv\n", GCPtr), VERR_INTERNAL_ERROR);
307# if PGM_SHW_TYPE == PGM_TYPE_32BIT
308 pPT = pMap->aPTs[(GCPtr - pMap->GCPtr) >> X86_PD_SHIFT].CTX_SUFF(pPT);
309# else /* PAE */
310 pPT = pMap->aPTs[(GCPtr - pMap->GCPtr) >> X86_PD_SHIFT].CTX_SUFF(paPaePTs);
311# endif
312# endif
313 }
314 const unsigned iPt = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
315 SHWPTE Pte = pPT->a[iPt];
316 if (!SHW_PTE_IS_P(Pte))
317 return VERR_PAGE_NOT_PRESENT;
318
319 /*
320 * Store the results.
321 * RW and US flags depend on the entire page translation hierarchy - except for
322 * legacy PAE which has a simplified PDPE.
323 */
324 if (pfFlags)
325 {
326 *pfFlags = (SHW_PTE_GET_U(Pte) & ~SHW_PTE_PG_MASK)
327 & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
328# if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) /** @todo why do we have to check the guest state here? */
329 /* The NX bit is determined by a bitwise OR between the PT and PD */
330 if (((SHW_PTE_GET_U(Pte) | Pde.u) & X86_PTE_PAE_NX) && CPUMIsGuestNXEnabled(pVCpu))
331 *pfFlags |= X86_PTE_PAE_NX;
332# endif
333 }
334
335 if (pHCPhys)
336 *pHCPhys = SHW_PTE_GET_HCPHYS(Pte);
337
338 return VINF_SUCCESS;
339#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED */
340}
341
342
343/**
344 * Modify page flags for a range of pages in the shadow context.
345 *
346 * The existing flags are ANDed with the fMask and ORed with the fFlags.
347 *
348 * @returns VBox status code.
349 * @param pVCpu The VMCPU handle.
350 * @param GCPtr Virtual address of the first page in the range. Page aligned!
351 * @param cb Size (in bytes) of the range to apply the modification to. Page aligned!
352 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
353 * @param fMask The AND mask - page flags X86_PTE_*.
354 * Be extremely CAREFUL with ~'ing values because they can be 32-bit!
355 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
356 * @remark You must use PGMMapModifyPage() for pages in a mapping.
357 */
358PGM_SHW_DECL(int, ModifyPage)(PVMCPU pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
359{
360# if PGM_SHW_TYPE == PGM_TYPE_NESTED
361 return VERR_PAGE_TABLE_NOT_PRESENT;
362
363# else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT */
364 PVM pVM = pVCpu->CTX_SUFF(pVM);
365 int rc;
366
367 Assert(PGMIsLockOwner(pVM));
368 /*
369 * Walk page tables and pages till we're done.
370 */
371 for (;;)
372 {
373 /*
374 * Get the PDE.
375 */
376# if PGM_SHW_TYPE == PGM_TYPE_AMD64
377 X86PDEPAE Pde;
378 /* PML4 */
379 X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
380 if (!Pml4e.n.u1Present)
381 return VERR_PAGE_TABLE_NOT_PRESENT;
382
383 /* PDPT */
384 PX86PDPT pPDPT;
385 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
386 if (RT_FAILURE(rc))
387 return rc;
388 const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
389 X86PDPE Pdpe = pPDPT->a[iPDPT];
390 if (!Pdpe.n.u1Present)
391 return VERR_PAGE_TABLE_NOT_PRESENT;
392
393 /* PD */
394 PX86PDPAE pPd;
395 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
396 if (RT_FAILURE(rc))
397 return rc;
398 const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
399 Pde = pPd->a[iPd];
400
401# elif PGM_SHW_TYPE == PGM_TYPE_PAE
402 X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
403
404# elif PGM_SHW_TYPE == PGM_TYPE_EPT
405 const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
406 PEPTPD pPDDst;
407 EPTPDE Pde;
408
409 rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
410 if (rc != VINF_SUCCESS)
411 {
412 AssertRC(rc);
413 return rc;
414 }
415 Assert(pPDDst);
416 Pde = pPDDst->a[iPd];
417
418# else /* PGM_TYPE_32BIT */
419 X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
420# endif
421 if (!Pde.n.u1Present)
422 return VERR_PAGE_TABLE_NOT_PRESENT;
423
424 AssertFatal(!Pde.b.u1Size);
425
426 /*
427 * Map the page table.
428 */
429 PSHWPT pPT;
430 rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
431 if (RT_FAILURE(rc))
432 return rc;
433
434 unsigned iPTE = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
435 while (iPTE < RT_ELEMENTS(pPT->a))
436 {
437 if (SHW_PTE_IS_P(pPT->a[iPTE]))
438 {
439 SHWPTE const OrgPte = pPT->a[iPTE];
440 SHWPTE NewPte;
441
442 SHW_PTE_SET(NewPte, (SHW_PTE_GET_U(OrgPte) & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK));
443 if (!SHW_PTE_IS_P(NewPte))
444 {
445 /** @todo Some CSAM code path might end up here and upset
446 * the page pool. */
447 AssertFailed();
448 }
449 else if ( SHW_PTE_IS_RW(NewPte)
450 && !SHW_PTE_IS_RW(OrgPte)
451 && !(fOpFlags & PGM_MK_PG_IS_MMIO2) )
452 {
453 /** @todo Optimize \#PF handling by caching data. We can
454 * then use this when PGM_MK_PG_IS_WRITE_FAULT is
455 * set instead of resolving the guest physical
456 * address yet again. */
457 RTGCPHYS GCPhys;
458 uint64_t fGstPte;
459 rc = PGMGstGetPage(pVCpu, GCPtr, &fGstPte, &GCPhys);
460 AssertRC(rc);
461 if (RT_SUCCESS(rc))
462 {
463 Assert(fGstPte & X86_PTE_RW);
464 PPGMPAGE pPage = pgmPhysGetPage(&pVCpu->CTX_SUFF(pVM)->pgm.s, GCPhys);
465 Assert(pPage);
466 if (pPage)
467 {
468 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
469 AssertRCReturn(rc, rc);
470 Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GCPhys, pPage));
471 }
472 }
473 }
474
475 SHW_PTE_ATOMIC_SET2(pPT->a[iPTE], NewPte);
476# if PGM_SHW_TYPE == PGM_TYPE_EPT
477 HWACCMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr);
478# else
479 PGM_INVL_PG_ALL_VCPU(pVM, GCPtr);
480# endif
481 }
482
483 /* next page */
484 cb -= PAGE_SIZE;
485 if (!cb)
486 return VINF_SUCCESS;
487 GCPtr += PAGE_SIZE;
488 iPTE++;
489 }
490 }
491# endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED */
492}
493
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette