1 | /* $Id: PGMAllShw.h 99748 2023-05-11 11:22:56Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * VBox - Page Manager, Shadow Paging Template - All context code.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2023 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox base platform packages, as
|
---|
10 | * available from https://www.virtualbox.org.
|
---|
11 | *
|
---|
12 | * This program is free software; you can redistribute it and/or
|
---|
13 | * modify it under the terms of the GNU General Public License
|
---|
14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
15 | * License.
|
---|
16 | *
|
---|
17 | * This program is distributed in the hope that it will be useful, but
|
---|
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
20 | * General Public License for more details.
|
---|
21 | *
|
---|
22 | * You should have received a copy of the GNU General Public License
|
---|
23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
24 | *
|
---|
25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
26 | */
|
---|
27 |
|
---|
28 |
|
---|
29 | /*********************************************************************************************************************************
|
---|
30 | * Defined Constants And Macros *
|
---|
31 | *********************************************************************************************************************************/
|
---|
32 | #undef SHWUINT
|
---|
33 | #undef SHWPT
|
---|
34 | #undef PSHWPT
|
---|
35 | #undef SHWPTE
|
---|
36 | #undef PSHWPTE
|
---|
37 | #undef SHWPD
|
---|
38 | #undef PSHWPD
|
---|
39 | #undef SHWPDE
|
---|
40 | #undef PSHWPDE
|
---|
41 | #undef SHW_PDE_PG_MASK
|
---|
42 | #undef SHW_PD_SHIFT
|
---|
43 | #undef SHW_PD_MASK
|
---|
44 | #undef SHW_PDE_ATOMIC_SET
|
---|
45 | #undef SHW_PDE_ATOMIC_SET2
|
---|
46 | #undef SHW_PDE_IS_P
|
---|
47 | #undef SHW_PDE_IS_A
|
---|
48 | #undef SHW_PDE_IS_BIG
|
---|
49 | #undef SHW_PTE_PG_MASK
|
---|
50 | #undef SHW_PTE_IS_P
|
---|
51 | #undef SHW_PTE_IS_RW
|
---|
52 | #undef SHW_PTE_IS_US
|
---|
53 | #undef SHW_PTE_IS_A
|
---|
54 | #undef SHW_PTE_IS_D
|
---|
55 | #undef SHW_PTE_IS_P_RW
|
---|
56 | #undef SHW_PTE_IS_TRACK_DIRTY
|
---|
57 | #undef SHW_PTE_GET_HCPHYS
|
---|
58 | #undef SHW_PTE_GET_U
|
---|
59 | #undef SHW_PTE_LOG64
|
---|
60 | #undef SHW_PTE_SET
|
---|
61 | #undef SHW_PTE_ATOMIC_SET
|
---|
62 | #undef SHW_PTE_ATOMIC_SET2
|
---|
63 | #undef SHW_PTE_SET_RO
|
---|
64 | #undef SHW_PTE_SET_RW
|
---|
65 | #undef SHW_PT_SHIFT
|
---|
66 | #undef SHW_PT_MASK
|
---|
67 | #undef SHW_TOTAL_PD_ENTRIES
|
---|
68 | #undef SHW_PDPT_SHIFT
|
---|
69 | #undef SHW_PDPT_MASK
|
---|
70 | #undef SHW_PDPE_PG_MASK
|
---|
71 |
|
---|
72 | #if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT
|
---|
73 | # define SHWUINT uint32_t
|
---|
74 | # define SHWPT X86PT
|
---|
75 | # define PSHWPT PX86PT
|
---|
76 | # define SHWPTE X86PTE
|
---|
77 | # define PSHWPTE PX86PTE
|
---|
78 | # define SHWPD X86PD
|
---|
79 | # define PSHWPD PX86PD
|
---|
80 | # define SHWPDE X86PDE
|
---|
81 | # define PSHWPDE PX86PDE
|
---|
82 | # define SHW_PDE_PG_MASK X86_PDE_PG_MASK
|
---|
83 | # define SHW_PD_SHIFT X86_PD_SHIFT
|
---|
84 | # define SHW_PD_MASK X86_PD_MASK
|
---|
85 | # define SHW_TOTAL_PD_ENTRIES X86_PG_ENTRIES
|
---|
86 | # define SHW_PDE_IS_P(Pde) ( (Pde).u & X86_PDE_P )
|
---|
87 | # define SHW_PDE_IS_A(Pde) ( (Pde).u & X86_PDE_A )
|
---|
88 | # define SHW_PDE_IS_BIG(Pde) ( (Pde).u & X86_PDE_PS )
|
---|
89 | # define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU32(&(Pde).u, (uNew)); } while (0)
|
---|
90 | # define SHW_PDE_ATOMIC_SET2(Pde, Pde2) do { ASMAtomicWriteU32(&(Pde).u, (Pde2).u); } while (0)
|
---|
91 | # define SHW_PTE_PG_MASK X86_PTE_PG_MASK
|
---|
92 | # define SHW_PTE_IS_P(Pte) ( (Pte).u & X86_PTE_P )
|
---|
93 | # define SHW_PTE_IS_RW(Pte) ( (Pte).u & X86_PTE_RW )
|
---|
94 | # define SHW_PTE_IS_US(Pte) ( (Pte).u & X86_PTE_US )
|
---|
95 | # define SHW_PTE_IS_A(Pte) ( (Pte).u & X86_PTE_A )
|
---|
96 | # define SHW_PTE_IS_D(Pte) ( (Pte).u & X86_PTE_D )
|
---|
97 | # define SHW_PTE_IS_P_RW(Pte) ( ((Pte).u & (X86_PTE_P | X86_PTE_RW)) == (X86_PTE_P | X86_PTE_RW) )
|
---|
98 | # define SHW_PTE_IS_TRACK_DIRTY(Pte) ( !!((Pte).u & PGM_PTFLAGS_TRACK_DIRTY) )
|
---|
99 | # define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & X86_PTE_PG_MASK )
|
---|
100 | # define SHW_PTE_LOG64(Pte) ( (uint64_t)(Pte).u )
|
---|
101 | # define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
|
---|
102 | # define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
|
---|
103 | # define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU32(&(Pte).u, (uNew)); } while (0)
|
---|
104 | # define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU32(&(Pte).u, (Pte2).u); } while (0)
|
---|
105 | # define SHW_PTE_SET_RO(Pte) do { (Pte).u &= ~(X86PGUINT)X86_PTE_RW; } while (0)
|
---|
106 | # define SHW_PTE_SET_RW(Pte) do { (Pte).u |= X86_PTE_RW; } while (0)
|
---|
107 | # define SHW_PT_SHIFT X86_PT_SHIFT
|
---|
108 | # define SHW_PT_MASK X86_PT_MASK
|
---|
109 |
|
---|
110 | #elif PGM_SHW_TYPE == PGM_TYPE_EPT
|
---|
111 | # define SHWUINT uint64_t
|
---|
112 | # define SHWPT EPTPT
|
---|
113 | # define PSHWPT PEPTPT
|
---|
114 | # define SHWPTE EPTPTE
|
---|
115 | # define PSHWPTE PEPTPTE
|
---|
116 | # define SHWPD EPTPD
|
---|
117 | # define PSHWPD PEPTPD
|
---|
118 | # define SHWPDE EPTPDE
|
---|
119 | # define PSHWPDE PEPTPDE
|
---|
120 | # define SHW_PDE_PG_MASK EPT_PDE_PG_MASK
|
---|
121 | # define SHW_PD_SHIFT EPT_PD_SHIFT
|
---|
122 | # define SHW_PD_MASK EPT_PD_MASK
|
---|
123 | # define SHW_PDE_IS_P(Pde) ( (Pde).u & EPT_E_READ /* always set*/ )
|
---|
124 | # define SHW_PDE_IS_A(Pde) ( 1 ) /* We don't use EPT_E_ACCESSED, use with care! */
|
---|
125 | # define SHW_PDE_IS_BIG(Pde) ( (Pde).u & EPT_E_LEAF )
|
---|
126 | # define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU64(&(Pde).u, (uNew)); } while (0)
|
---|
127 | # define SHW_PDE_ATOMIC_SET2(Pde, Pde2) do { ASMAtomicWriteU64(&(Pde).u, (Pde2).u); } while (0)
|
---|
128 | # define SHW_PTE_PG_MASK EPT_PTE_PG_MASK
|
---|
129 | # define SHW_PTE_IS_P(Pte) ( (Pte).u & EPT_E_READ ) /* Approximation, works for us. */
|
---|
130 | # define SHW_PTE_IS_RW(Pte) ( (Pte).u & EPT_E_WRITE )
|
---|
131 | # define SHW_PTE_IS_US(Pte) ( true )
|
---|
132 | # define SHW_PTE_IS_A(Pte) ( true )
|
---|
133 | # define SHW_PTE_IS_D(Pte) ( true )
|
---|
134 | # define SHW_PTE_IS_P_RW(Pte) ( ((Pte).u & (EPT_E_READ | EPT_E_WRITE)) == (EPT_E_READ | EPT_E_WRITE) )
|
---|
135 | # define SHW_PTE_IS_TRACK_DIRTY(Pte) ( false )
|
---|
136 | # define SHW_PTE_GET_HCPHYS(Pte) ( (Pte).u & EPT_PTE_PG_MASK )
|
---|
137 | # define SHW_PTE_LOG64(Pte) ( (Pte).u )
|
---|
138 | # define SHW_PTE_GET_U(Pte) ( (Pte).u ) /**< Use with care. */
|
---|
139 | # define SHW_PTE_SET(Pte, uNew) do { (Pte).u = (uNew); } while (0)
|
---|
140 | # define SHW_PTE_ATOMIC_SET(Pte, uNew) do { ASMAtomicWriteU64(&(Pte).u, (uNew)); } while (0)
|
---|
141 | # define SHW_PTE_ATOMIC_SET2(Pte, Pte2) do { ASMAtomicWriteU64(&(Pte).u, (Pte2).u); } while (0)
|
---|
142 | # define SHW_PTE_SET_RO(Pte) do { (Pte).u &= ~(uint64_t)EPT_E_WRITE; } while (0)
|
---|
143 | # define SHW_PTE_SET_RW(Pte) do { (Pte).u |= EPT_E_WRITE; } while (0)
|
---|
144 | # define SHW_PT_SHIFT EPT_PT_SHIFT
|
---|
145 | # define SHW_PT_MASK EPT_PT_MASK
|
---|
146 | # define SHW_PDPT_SHIFT EPT_PDPT_SHIFT
|
---|
147 | # define SHW_PDPT_MASK EPT_PDPT_MASK
|
---|
148 | # define SHW_PDPE_PG_MASK EPT_PDPE_PG_MASK
|
---|
149 | # define SHW_TOTAL_PD_ENTRIES (EPT_PG_AMD64_ENTRIES * EPT_PG_AMD64_PDPE_ENTRIES)
|
---|
150 |
|
---|
151 | #else
|
---|
152 | # define SHWUINT uint64_t
|
---|
153 | # define SHWPT PGMSHWPTPAE
|
---|
154 | # define PSHWPT PPGMSHWPTPAE
|
---|
155 | # define SHWPTE PGMSHWPTEPAE
|
---|
156 | # define PSHWPTE PPGMSHWPTEPAE
|
---|
157 | # define SHWPD X86PDPAE
|
---|
158 | # define PSHWPD PX86PDPAE
|
---|
159 | # define SHWPDE X86PDEPAE
|
---|
160 | # define PSHWPDE PX86PDEPAE
|
---|
161 | # define SHW_PDE_PG_MASK X86_PDE_PAE_PG_MASK
|
---|
162 | # define SHW_PD_SHIFT X86_PD_PAE_SHIFT
|
---|
163 | # define SHW_PD_MASK X86_PD_PAE_MASK
|
---|
164 | # define SHW_PDE_IS_P(Pde) ( (Pde).u & X86_PDE_P )
|
---|
165 | # define SHW_PDE_IS_A(Pde) ( (Pde).u & X86_PDE_A )
|
---|
166 | # define SHW_PDE_IS_BIG(Pde) ( (Pde).u & X86_PDE_PS )
|
---|
167 | # define SHW_PDE_ATOMIC_SET(Pde, uNew) do { ASMAtomicWriteU64(&(Pde).u, (uNew)); } while (0)
|
---|
168 | # define SHW_PDE_ATOMIC_SET2(Pde, Pde2) do { ASMAtomicWriteU64(&(Pde).u, (Pde2).u); } while (0)
|
---|
169 | # define SHW_PTE_PG_MASK X86_PTE_PAE_PG_MASK
|
---|
170 | # define SHW_PTE_IS_P(Pte) PGMSHWPTEPAE_IS_P(Pte)
|
---|
171 | # define SHW_PTE_IS_RW(Pte) PGMSHWPTEPAE_IS_RW(Pte)
|
---|
172 | # define SHW_PTE_IS_US(Pte) PGMSHWPTEPAE_IS_US(Pte)
|
---|
173 | # define SHW_PTE_IS_A(Pte) PGMSHWPTEPAE_IS_A(Pte)
|
---|
174 | # define SHW_PTE_IS_D(Pte) PGMSHWPTEPAE_IS_D(Pte)
|
---|
175 | # define SHW_PTE_IS_P_RW(Pte) PGMSHWPTEPAE_IS_P_RW(Pte)
|
---|
176 | # define SHW_PTE_IS_TRACK_DIRTY(Pte) PGMSHWPTEPAE_IS_TRACK_DIRTY(Pte)
|
---|
177 | # define SHW_PTE_GET_HCPHYS(Pte) PGMSHWPTEPAE_GET_HCPHYS(Pte)
|
---|
178 | # define SHW_PTE_LOG64(Pte) PGMSHWPTEPAE_GET_LOG(Pte)
|
---|
179 | # define SHW_PTE_GET_U(Pte) PGMSHWPTEPAE_GET_U(Pte) /**< Use with care. */
|
---|
180 | # define SHW_PTE_SET(Pte, uNew) PGMSHWPTEPAE_SET(Pte, uNew)
|
---|
181 | # define SHW_PTE_ATOMIC_SET(Pte, uNew) PGMSHWPTEPAE_ATOMIC_SET(Pte, uNew)
|
---|
182 | # define SHW_PTE_ATOMIC_SET2(Pte, Pte2) PGMSHWPTEPAE_ATOMIC_SET2(Pte, Pte2)
|
---|
183 | # define SHW_PTE_SET_RO(Pte) PGMSHWPTEPAE_SET_RO(Pte)
|
---|
184 | # define SHW_PTE_SET_RW(Pte) PGMSHWPTEPAE_SET_RW(Pte)
|
---|
185 | # define SHW_PT_SHIFT X86_PT_PAE_SHIFT
|
---|
186 | # define SHW_PT_MASK X86_PT_PAE_MASK
|
---|
187 |
|
---|
188 | # if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64 || /* whatever: */ PGM_SHW_TYPE == PGM_TYPE_NONE
|
---|
189 | # define SHW_PDPT_SHIFT X86_PDPT_SHIFT
|
---|
190 | # define SHW_PDPT_MASK X86_PDPT_MASK_AMD64
|
---|
191 | # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
|
---|
192 | # define SHW_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
|
---|
193 |
|
---|
194 | # elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
|
---|
195 | # define SHW_PDPT_SHIFT X86_PDPT_SHIFT
|
---|
196 | # define SHW_PDPT_MASK X86_PDPT_MASK_PAE
|
---|
197 | # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK
|
---|
198 | # define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
|
---|
199 |
|
---|
200 | # else
|
---|
201 | # error "Misconfigured PGM_SHW_TYPE or something..."
|
---|
202 | # endif
|
---|
203 | #endif
|
---|
204 |
|
---|
205 | #if PGM_SHW_TYPE == PGM_TYPE_NONE && PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
|
---|
206 | # error "PGM_TYPE_IS_NESTED_OR_EPT is true for PGM_TYPE_NONE!"
|
---|
207 | #endif
|
---|
208 |
|
---|
209 |
|
---|
210 |
|
---|
211 | /*********************************************************************************************************************************
|
---|
212 | * Internal Functions *
|
---|
213 | *********************************************************************************************************************************/
|
---|
214 | RT_C_DECLS_BEGIN
|
---|
215 | PGM_SHW_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys);
|
---|
216 | PGM_SHW_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags);
|
---|
217 | PGM_SHW_DECL(int, Exit)(PVMCPUCC pVCpu);
|
---|
218 | #ifdef IN_RING3
|
---|
219 | PGM_SHW_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta);
|
---|
220 | #endif
|
---|
221 | RT_C_DECLS_END
|
---|
222 |
|
---|
223 |
|
---|
224 | /**
|
---|
225 | * Enters the shadow mode.
|
---|
226 | *
|
---|
227 | * @returns VBox status code.
|
---|
228 | * @param pVCpu The cross context virtual CPU structure.
|
---|
229 | */
|
---|
230 | PGM_SHW_DECL(int, Enter)(PVMCPUCC pVCpu)
|
---|
231 | {
|
---|
232 | #if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
|
---|
233 |
|
---|
234 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
235 | RTGCPHYS GCPhysCR3;
|
---|
236 | PGMPOOLKIND enmKind;
|
---|
237 | if (pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_EPT)
|
---|
238 | {
|
---|
239 | GCPhysCR3 = RT_BIT_64(63);
|
---|
240 | enmKind = PGMPOOLKIND_ROOT_NESTED;
|
---|
241 | }
|
---|
242 | else
|
---|
243 | {
|
---|
244 | GCPhysCR3 = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
|
---|
245 | enmKind = PGMPOOLKIND_EPT_PML4_FOR_EPT_PML4;
|
---|
246 | }
|
---|
247 | # else
|
---|
248 | RTGCPHYS const GCPhysCR3 = RT_BIT_64(63);
|
---|
249 | PGMPOOLKIND const enmKind = PGMPOOLKIND_ROOT_NESTED;
|
---|
250 | # endif
|
---|
251 | PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
|
---|
252 |
|
---|
253 | Assert(HMIsNestedPagingActive(pVM));
|
---|
254 | Assert(pVM->pgm.s.fNestedPaging);
|
---|
255 | Assert(!pVCpu->pgm.s.pShwPageCR3R3);
|
---|
256 |
|
---|
257 | PGM_LOCK_VOID(pVM);
|
---|
258 |
|
---|
259 | PPGMPOOLPAGE pNewShwPageCR3;
|
---|
260 | int rc = pgmPoolAlloc(pVM, GCPhysCR3, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
|
---|
261 | NIL_PGMPOOL_IDX, UINT32_MAX, true /*fLockPage*/,
|
---|
262 | &pNewShwPageCR3);
|
---|
263 | AssertLogRelRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
|
---|
264 |
|
---|
265 | pVCpu->pgm.s.pShwPageCR3R3 = pgmPoolConvertPageToR3(pVM->pgm.s.CTX_SUFF(pPool), pNewShwPageCR3);
|
---|
266 | pVCpu->pgm.s.pShwPageCR3R0 = pgmPoolConvertPageToR0(pVM->pgm.s.CTX_SUFF(pPool), pNewShwPageCR3);
|
---|
267 |
|
---|
268 | PGM_UNLOCK(pVM);
|
---|
269 |
|
---|
270 | Log(("Enter nested shadow paging mode: root %RHv phys %RHp\n", pVCpu->pgm.s.pShwPageCR3R3, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key));
|
---|
271 | #else
|
---|
272 | NOREF(pVCpu);
|
---|
273 | #endif
|
---|
274 | return VINF_SUCCESS;
|
---|
275 | }
|
---|
276 |
|
---|
277 |
|
---|
278 | /**
|
---|
279 | * Exits the shadow mode.
|
---|
280 | *
|
---|
281 | * @returns VBox status code.
|
---|
282 | * @param pVCpu The cross context virtual CPU structure.
|
---|
283 | */
|
---|
284 | PGM_SHW_DECL(int, Exit)(PVMCPUCC pVCpu)
|
---|
285 | {
|
---|
286 | #if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
|
---|
287 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
288 | if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
|
---|
289 | {
|
---|
290 | PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
|
---|
291 |
|
---|
292 | PGM_LOCK_VOID(pVM);
|
---|
293 |
|
---|
294 | # if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT
|
---|
295 | if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
|
---|
296 | pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
|
---|
297 | # endif
|
---|
298 |
|
---|
299 | /* Do *not* unlock this page as we have two of them floating around in the 32-bit host & 64-bit guest case.
|
---|
300 | * We currently assert when you try to free one of them; don't bother to really allow this.
|
---|
301 | *
|
---|
302 | * Note that this is two nested paging root pages max. This isn't a leak. They are reused.
|
---|
303 | */
|
---|
304 | /* pgmPoolUnlockPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)); */
|
---|
305 |
|
---|
306 | pgmPoolFreeByPage(pPool, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3), NIL_PGMPOOL_IDX, UINT32_MAX);
|
---|
307 | pVCpu->pgm.s.pShwPageCR3R3 = 0;
|
---|
308 | pVCpu->pgm.s.pShwPageCR3R0 = 0;
|
---|
309 |
|
---|
310 | PGM_UNLOCK(pVM);
|
---|
311 |
|
---|
312 | Log(("Leave nested shadow paging mode\n"));
|
---|
313 | }
|
---|
314 | #else
|
---|
315 | RT_NOREF_PV(pVCpu);
|
---|
316 | #endif
|
---|
317 | return VINF_SUCCESS;
|
---|
318 | }
|
---|
319 |
|
---|
320 |
|
---|
321 | #if 0
|
---|
322 | PGM_SHW_DECL(int, NestedGetPage)(PVMCPUCC pVCpu, PEPTPD pEptPd, PPGMPTWALK pWalk, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
|
---|
323 | {
|
---|
324 | #if PGM_SHW_TYPE == PGM_TYPE_EPT
|
---|
325 | RTGCPHYS const GCPhysNested = pWalk->GCPhysNested;
|
---|
326 | unsigned const iEptPd = ((GCPhysNested >> SHW_PD_SHIFT) & SHW_PD_MASK);
|
---|
327 | Assert(iEptPd < EPT_PG_ENTRIES);
|
---|
328 | SHWPDE EptPde = pEptPd->a[iEptPd];
|
---|
329 | if (!SHW_PDE_IS_P(EptPde))
|
---|
330 | {
|
---|
331 | *pfFlags = 0;
|
---|
332 | *pHCPhys = NIL_RTHCPHYS;
|
---|
333 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
334 | }
|
---|
335 |
|
---|
336 | if (SHW_PDE_IS_BIG(EptPde))
|
---|
337 | {
|
---|
338 | Assert(pWalk->fBigPage);
|
---|
339 | if (pfFlags)
|
---|
340 | *pfFlags = (EptPde.u & ~SHW_PDE_PG_MASK);
|
---|
341 | if (pHCPhys)
|
---|
342 | *pHCPhys = (EptPde.u & EPT_PDE2M_PG_MASK) + (pWalk->GCPhys & (RT_BIT(EPT_PD_SHIFT) - 1) & X86_PAGE_4K_BASE_MASK);
|
---|
343 | return VINF_SUCCESS;
|
---|
344 | }
|
---|
345 |
|
---|
346 | PSHWPT pEptPt;
|
---|
347 | int const rc = PGM_HCPHYS_2_PTR(pVCpu->CTX_SUFF(pVM), pVCpu, EptPde.u & EPT_PDE_PG_MASK, &pEptPt);
|
---|
348 | if (RT_FAILURE(rc))
|
---|
349 | {
|
---|
350 | *pfFlags = 0;
|
---|
351 | *pHCPhys = NIL_RTHCPHYS;
|
---|
352 | return rc;
|
---|
353 | }
|
---|
354 |
|
---|
355 | unsigned const iEptPt = (GCPhysNested >> SHW_PT_SHIFT) & SHW_PT_MASK;
|
---|
356 | Assert(iEptPt < EPT_PG_ENTRIES);
|
---|
357 | SHWPTE EptPte = pEptPt->a[iEptPt];
|
---|
358 | if (!SHW_PTE_IS_P(EptPte))
|
---|
359 | {
|
---|
360 | *pfFlags = 0;
|
---|
361 | *pHCPhys = NIL_RTHCPHYS;
|
---|
362 | return VERR_PAGE_NOT_PRESENT;
|
---|
363 | }
|
---|
364 |
|
---|
365 | if (pfFlags)
|
---|
366 | {
|
---|
367 | /* Read, Write and Execute bits (Present mask) are cumulative. */
|
---|
368 | *pfFlags = (SHW_PTE_GET_U(EptPte) & ~SHW_PTE_PG_MASK)
|
---|
369 | & ((EptPde.u & EPT_PRESENT_MASK) | ~(uint64_t)EPT_PRESENT_MASK);
|
---|
370 | }
|
---|
371 | if (pHCPhys)
|
---|
372 | *pHCPhys = SHW_PTE_GET_HCPHYS(EptPte);
|
---|
373 | return VINF_SUCCESS;
|
---|
374 |
|
---|
375 | #else /* PGM_SHW_TYPE != PGM_TYPE_EPT */
|
---|
376 | RT_NOREF(pVCpu, pEptPd, pWalk, *pfFlags, pHCPhys);
|
---|
377 | AssertFailed();
|
---|
378 | return VERR_PGM_SHW_NONE_IPE;
|
---|
379 | #endif /* PGM_SHW_TYPE != PGM_TYPE_EPT */
|
---|
380 | }
|
---|
381 | #endif
|
---|
382 |
|
---|
383 |
|
---|
384 | /**
|
---|
385 | * Gets effective page information (from the VMM page directory).
|
---|
386 | *
|
---|
387 | * @returns VBox status code.
|
---|
388 | * @param pVCpu The cross context virtual CPU structure.
|
---|
389 | * @param GCPtr Guest Context virtual address of the page.
|
---|
390 | * @param pfFlags Where to store the flags. These are X86_PTE_*.
|
---|
391 | * @param pHCPhys Where to store the HC physical address of the page.
|
---|
392 | * This is page aligned.
|
---|
393 | * @remark You should use PGMMapGetPage() for pages in a mapping.
|
---|
394 | */
|
---|
395 | PGM_SHW_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
|
---|
396 | {
|
---|
397 | #if PGM_SHW_TYPE == PGM_TYPE_NONE
|
---|
398 | RT_NOREF(pVCpu, GCPtr);
|
---|
399 | AssertFailed();
|
---|
400 | *pfFlags = 0;
|
---|
401 | *pHCPhys = NIL_RTHCPHYS;
|
---|
402 | return VERR_PGM_SHW_NONE_IPE;
|
---|
403 |
|
---|
404 | #else /* PGM_SHW_TYPE != PGM_TYPE_NONE */
|
---|
405 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
406 |
|
---|
407 | PGM_LOCK_ASSERT_OWNER(pVM);
|
---|
408 |
|
---|
409 | /*
|
---|
410 | * Get the PDE.
|
---|
411 | */
|
---|
412 | # if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
|
---|
413 | X86PDEPAE Pde;
|
---|
414 |
|
---|
415 | /* PML4 */
|
---|
416 | X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
|
---|
417 | if (!(Pml4e.u & X86_PML4E_P))
|
---|
418 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
419 |
|
---|
420 | /* PDPT */
|
---|
421 | PX86PDPT pPDPT;
|
---|
422 | int rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
|
---|
423 | if (RT_FAILURE(rc))
|
---|
424 | return rc;
|
---|
425 | const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
|
---|
426 | X86PDPE Pdpe = pPDPT->a[iPDPT];
|
---|
427 | if (!(Pdpe.u & X86_PDPE_P))
|
---|
428 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
429 |
|
---|
430 | /* PD */
|
---|
431 | PX86PDPAE pPd;
|
---|
432 | rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
|
---|
433 | if (RT_FAILURE(rc))
|
---|
434 | return rc;
|
---|
435 | const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
|
---|
436 | Pde = pPd->a[iPd];
|
---|
437 |
|
---|
438 | /* Merge accessed, write, user and no-execute bits into the PDE. */
|
---|
439 | AssertCompile(X86_PML4E_A == X86_PDPE_A && X86_PML4E_A == X86_PDE_A);
|
---|
440 | AssertCompile(X86_PML4E_RW == X86_PDPE_RW && X86_PML4E_RW == X86_PDE_RW);
|
---|
441 | AssertCompile(X86_PML4E_US == X86_PDPE_US && X86_PML4E_US == X86_PDE_US);
|
---|
442 | AssertCompile(X86_PML4E_NX == X86_PDPE_LM_NX && X86_PML4E_NX == X86_PDE_PAE_NX);
|
---|
443 | Pde.u &= (Pml4e.u & Pdpe.u) | ~(X86PGPAEUINT)(X86_PML4E_A | X86_PML4E_RW | X86_PML4E_US);
|
---|
444 | Pde.u |= (Pml4e.u | Pdpe.u) & X86_PML4E_NX;
|
---|
445 |
|
---|
446 | # elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
|
---|
447 | X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
|
---|
448 |
|
---|
449 | # elif PGM_SHW_TYPE == PGM_TYPE_EPT
|
---|
450 | /*
|
---|
451 | * We're currently ASSUMING that the SLAT mode here is always "direct".
|
---|
452 | * If a guest (e.g., nested Hyper-V) turns out to require this
|
---|
453 | * (probably while modifying shadow non-MMIO2 pages) then handle this
|
---|
454 | * by calling (NestedGetPage). Asserting for now.
|
---|
455 | */
|
---|
456 | Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT);
|
---|
457 | PEPTPD pPDDst;
|
---|
458 | int rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
|
---|
459 | if (rc == VINF_SUCCESS) /** @todo this function isn't expected to return informational status codes. Check callers / fix. */
|
---|
460 | { /* likely */ }
|
---|
461 | else
|
---|
462 | {
|
---|
463 | AssertRC(rc);
|
---|
464 | return rc;
|
---|
465 | }
|
---|
466 | Assert(pPDDst);
|
---|
467 |
|
---|
468 | const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
|
---|
469 | EPTPDE Pde = pPDDst->a[iPd];
|
---|
470 |
|
---|
471 | # elif PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT
|
---|
472 | X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
|
---|
473 |
|
---|
474 | # else
|
---|
475 | # error "Misconfigured PGM_SHW_TYPE or something..."
|
---|
476 | # endif
|
---|
477 | if (!SHW_PDE_IS_P(Pde))
|
---|
478 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
479 |
|
---|
480 | /* Deal with large pages. */
|
---|
481 | if (SHW_PDE_IS_BIG(Pde))
|
---|
482 | {
|
---|
483 | /*
|
---|
484 | * Store the results.
|
---|
485 | * RW and US flags depend on the entire page translation hierarchy - except for
|
---|
486 | * legacy PAE which has a simplified PDPE.
|
---|
487 | */
|
---|
488 | if (pfFlags)
|
---|
489 | {
|
---|
490 | *pfFlags = (Pde.u & ~SHW_PDE_PG_MASK);
|
---|
491 | # if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
|
---|
492 | if ( (Pde.u & X86_PTE_PAE_NX)
|
---|
493 | # if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE)
|
---|
494 | && CPUMIsGuestNXEnabled(pVCpu) /** @todo why do we have to check the guest state here? */
|
---|
495 | # endif
|
---|
496 | )
|
---|
497 | *pfFlags |= X86_PTE_PAE_NX;
|
---|
498 | # endif
|
---|
499 | }
|
---|
500 |
|
---|
501 | if (pHCPhys)
|
---|
502 | *pHCPhys = (Pde.u & SHW_PDE_PG_MASK) + (GCPtr & (RT_BIT(SHW_PD_SHIFT) - 1) & X86_PAGE_4K_BASE_MASK);
|
---|
503 |
|
---|
504 | return VINF_SUCCESS;
|
---|
505 | }
|
---|
506 |
|
---|
507 | /*
|
---|
508 | * Get PT entry.
|
---|
509 | */
|
---|
510 | PSHWPT pPT;
|
---|
511 | int rc2 = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
|
---|
512 | if (RT_FAILURE(rc2))
|
---|
513 | return rc2;
|
---|
514 | const unsigned iPt = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
|
---|
515 | SHWPTE Pte = pPT->a[iPt];
|
---|
516 | if (!SHW_PTE_IS_P(Pte))
|
---|
517 | return VERR_PAGE_NOT_PRESENT;
|
---|
518 |
|
---|
519 | /*
|
---|
520 | * Store the results.
|
---|
521 | * RW and US flags depend on the entire page translation hierarchy - except for
|
---|
522 | * legacy PAE which has a simplified PDPE.
|
---|
523 | */
|
---|
524 | if (pfFlags)
|
---|
525 | {
|
---|
526 | *pfFlags = (SHW_PTE_GET_U(Pte) & ~SHW_PTE_PG_MASK)
|
---|
527 | & ((Pde.u & (X86_PTE_RW | X86_PTE_US)) | ~(uint64_t)(X86_PTE_RW | X86_PTE_US));
|
---|
528 |
|
---|
529 | # if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
|
---|
530 | /* The NX bit is determined by a bitwise OR between the PT and PD */
|
---|
531 | if ( ((SHW_PTE_GET_U(Pte) | Pde.u) & X86_PTE_PAE_NX)
|
---|
532 | # if PGM_WITH_NX(PGM_SHW_TYPE, PGM_SHW_TYPE)
|
---|
533 | && CPUMIsGuestNXEnabled(pVCpu) /** @todo why do we have to check the guest state here? */
|
---|
534 | # endif
|
---|
535 | )
|
---|
536 | *pfFlags |= X86_PTE_PAE_NX;
|
---|
537 | # endif
|
---|
538 | }
|
---|
539 |
|
---|
540 | if (pHCPhys)
|
---|
541 | *pHCPhys = SHW_PTE_GET_HCPHYS(Pte);
|
---|
542 |
|
---|
543 | return VINF_SUCCESS;
|
---|
544 | #endif /* PGM_SHW_TYPE != PGM_TYPE_NONE */
|
---|
545 | }
|
---|
546 |
|
---|
547 |
|
---|
548 | /**
|
---|
549 | * Modify page flags for a range of pages in the shadow context.
|
---|
550 | *
|
---|
551 | * The existing flags are ANDed with the fMask and ORed with the fFlags.
|
---|
552 | *
|
---|
553 | * @returns VBox status code.
|
---|
554 | * @param pVCpu The cross context virtual CPU structure.
|
---|
555 | * @param GCPtr Virtual address of the first page in the range. Page aligned!
|
---|
556 | * @param cb Size (in bytes) of the range to apply the modification to. Page aligned!
|
---|
557 | * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
|
---|
558 | * @param fMask The AND mask - page flags X86_PTE_*.
|
---|
559 | * Be extremely CAREFUL with ~'ing values because they can be 32-bit!
|
---|
560 | * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
|
---|
561 | * @remark You must use PGMMapModifyPage() for pages in a mapping.
|
---|
562 | */
|
---|
563 | PGM_SHW_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
|
---|
564 | {
|
---|
565 | #if PGM_SHW_TYPE == PGM_TYPE_NONE
|
---|
566 | RT_NOREF(pVCpu, GCPtr, cb, fFlags, fMask, fOpFlags);
|
---|
567 | AssertFailed();
|
---|
568 | return VERR_PGM_SHW_NONE_IPE;
|
---|
569 |
|
---|
570 | #else /* PGM_SHW_TYPE != PGM_TYPE_NONE */
|
---|
571 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
572 | PGM_LOCK_ASSERT_OWNER(pVM);
|
---|
573 |
|
---|
574 | /*
|
---|
575 | * Walk page tables and pages till we're done.
|
---|
576 | */
|
---|
577 | int rc;
|
---|
578 | for (;;)
|
---|
579 | {
|
---|
580 | /*
|
---|
581 | * Get the PDE.
|
---|
582 | */
|
---|
583 | # if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_NESTED_AMD64
|
---|
584 | X86PDEPAE Pde;
|
---|
585 | /* PML4 */
|
---|
586 | X86PML4E Pml4e = pgmShwGetLongModePML4E(pVCpu, GCPtr);
|
---|
587 | if (!(Pml4e.u & X86_PML4E_P))
|
---|
588 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
589 |
|
---|
590 | /* PDPT */
|
---|
591 | PX86PDPT pPDPT;
|
---|
592 | rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
|
---|
593 | if (RT_FAILURE(rc))
|
---|
594 | return rc;
|
---|
595 | const unsigned iPDPT = (GCPtr >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
|
---|
596 | X86PDPE Pdpe = pPDPT->a[iPDPT];
|
---|
597 | if (!(Pdpe.u & X86_PDPE_P))
|
---|
598 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
599 |
|
---|
600 | /* PD */
|
---|
601 | PX86PDPAE pPd;
|
---|
602 | rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
|
---|
603 | if (RT_FAILURE(rc))
|
---|
604 | return rc;
|
---|
605 | const unsigned iPd = (GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK;
|
---|
606 | Pde = pPd->a[iPd];
|
---|
607 |
|
---|
608 | # elif PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_NESTED_PAE
|
---|
609 | X86PDEPAE Pde = pgmShwGetPaePDE(pVCpu, GCPtr);
|
---|
610 |
|
---|
611 | # elif PGM_SHW_TYPE == PGM_TYPE_EPT
|
---|
612 | EPTPDE Pde;
|
---|
613 | const unsigned iPd = ((GCPtr >> SHW_PD_SHIFT) & SHW_PD_MASK);
|
---|
614 | if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT)
|
---|
615 | {
|
---|
616 | PEPTPD pPDDst;
|
---|
617 | rc = pgmShwGetEPTPDPtr(pVCpu, GCPtr, NULL, &pPDDst);
|
---|
618 | if (rc != VINF_SUCCESS)
|
---|
619 | {
|
---|
620 | AssertRC(rc);
|
---|
621 | return rc;
|
---|
622 | }
|
---|
623 | Assert(pPDDst);
|
---|
624 | Pde = pPDDst->a[iPd];
|
---|
625 | }
|
---|
626 | else
|
---|
627 | {
|
---|
628 | # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
|
---|
629 | Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT);
|
---|
630 | Assert(!(GCPtr & GUEST_PAGE_OFFSET_MASK));
|
---|
631 | PGMPTWALK Walk;
|
---|
632 | PGMPTWALKGST GstWalkAll;
|
---|
633 | RTGCPHYS const GCPhysNestedPage = GCPtr;
|
---|
634 | rc = pgmGstSlatWalk(pVCpu, GCPhysNestedPage, false /*fIsLinearAddrValid*/, 0 /*GCPtrNestedFault*/, &Walk,
|
---|
635 | &GstWalkAll);
|
---|
636 | if (RT_SUCCESS(rc))
|
---|
637 | {
|
---|
638 | # ifdef DEBUG_ramshankar
|
---|
639 | /* Paranoia. */
|
---|
640 | Assert(GstWalkAll.enmType == PGMPTWALKGSTTYPE_EPT);
|
---|
641 | Assert(Walk.fSucceeded);
|
---|
642 | Assert(Walk.fEffective & (PGM_PTATTRS_EPT_R_MASK | PGM_PTATTRS_EPT_W_MASK | PGM_PTATTRS_EPT_X_SUPER_MASK));
|
---|
643 | Assert(Walk.fIsSlat);
|
---|
644 | Assert(RT_BOOL(Walk.fEffective & PGM_PTATTRS_R_MASK) == RT_BOOL(Walk.fEffective & PGM_PTATTRS_EPT_R_MASK));
|
---|
645 | Assert(RT_BOOL(Walk.fEffective & PGM_PTATTRS_W_MASK) == RT_BOOL(Walk.fEffective & PGM_PTATTRS_EPT_W_MASK));
|
---|
646 | Assert(RT_BOOL(Walk.fEffective & PGM_PTATTRS_NX_MASK) == !RT_BOOL(Walk.fEffective & PGM_PTATTRS_EPT_X_SUPER_MASK));
|
---|
647 | # endif
|
---|
648 | PGM_A20_ASSERT_MASKED(pVCpu, Walk.GCPhys);
|
---|
649 | Assert(!(fFlags & X86_PTE_RW) || (Walk.fEffective & PGM_PTATTRS_W_MASK));
|
---|
650 |
|
---|
651 | /* Update the nested-guest physical address with the translated guest-physical address. */
|
---|
652 | GCPtr = Walk.GCPhys;
|
---|
653 |
|
---|
654 | /* Get the PD. */
|
---|
655 | PSHWPD pEptPd;
|
---|
656 | rc = pgmShwGetNestedEPTPDPtr(pVCpu, GCPhysNestedPage, NULL /*ppPdpt*/, &pEptPd, &GstWalkAll);
|
---|
657 | AssertRCReturn(rc, rc);
|
---|
658 | Assert(pEptPd);
|
---|
659 | Assert(iPd < EPT_PG_ENTRIES);
|
---|
660 | Pde = pEptPd->a[iPd];
|
---|
661 | }
|
---|
662 | else
|
---|
663 | {
|
---|
664 | Log(("Failed to translate nested-guest physical address %#RGp rc=%Rrc\n", GCPhysNestedPage, rc));
|
---|
665 | return rc;
|
---|
666 | }
|
---|
667 |
|
---|
668 | # else /* !VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
|
---|
669 | AssertFailed();
|
---|
670 | return VERR_PGM_SHW_NONE_IPE;
|
---|
671 | # endif /* !VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
|
---|
672 | }
|
---|
673 |
|
---|
674 | # else /* PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_NESTED_32BIT */
|
---|
675 | X86PDE Pde = pgmShwGet32BitPDE(pVCpu, GCPtr);
|
---|
676 | # endif
|
---|
677 | if (!SHW_PDE_IS_P(Pde))
|
---|
678 | return VERR_PAGE_TABLE_NOT_PRESENT;
|
---|
679 |
|
---|
680 | AssertFatalMsg(!SHW_PDE_IS_BIG(Pde), ("Pde=%#RX64\n", (uint64_t)Pde.u));
|
---|
681 |
|
---|
682 | /*
|
---|
683 | * Map the page table.
|
---|
684 | */
|
---|
685 | PSHWPT pPT;
|
---|
686 | rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
|
---|
687 | if (RT_FAILURE(rc))
|
---|
688 | return rc;
|
---|
689 |
|
---|
690 | unsigned iPTE = (GCPtr >> SHW_PT_SHIFT) & SHW_PT_MASK;
|
---|
691 | while (iPTE < RT_ELEMENTS(pPT->a))
|
---|
692 | {
|
---|
693 | if (SHW_PTE_IS_P(pPT->a[iPTE]))
|
---|
694 | {
|
---|
695 | SHWPTE const OrgPte = pPT->a[iPTE];
|
---|
696 | SHWPTE NewPte;
|
---|
697 |
|
---|
698 | SHW_PTE_SET(NewPte, (SHW_PTE_GET_U(OrgPte) & (fMask | SHW_PTE_PG_MASK)) | (fFlags & ~SHW_PTE_PG_MASK));
|
---|
699 | if (!SHW_PTE_IS_P(NewPte))
|
---|
700 | {
|
---|
701 | /** @todo Some CSAM code path might end up here and upset
|
---|
702 | * the page pool. */
|
---|
703 | AssertMsgFailed(("NewPte=%#RX64 OrgPte=%#RX64 GCPtr=%#RGv\n", SHW_PTE_LOG64(NewPte), SHW_PTE_LOG64(OrgPte), GCPtr));
|
---|
704 | }
|
---|
705 | else if ( SHW_PTE_IS_RW(NewPte)
|
---|
706 | && !SHW_PTE_IS_RW(OrgPte)
|
---|
707 | && !(fOpFlags & PGM_MK_PG_IS_MMIO2) )
|
---|
708 | {
|
---|
709 | /** @todo Optimize \#PF handling by caching data. We can
|
---|
710 | * then use this when PGM_MK_PG_IS_WRITE_FAULT is
|
---|
711 | * set instead of resolving the guest physical
|
---|
712 | * address yet again. */
|
---|
713 | PGMPTWALK GstWalk;
|
---|
714 | rc = PGMGstGetPage(pVCpu, GCPtr, &GstWalk);
|
---|
715 | AssertRC(rc);
|
---|
716 | if (RT_SUCCESS(rc))
|
---|
717 | {
|
---|
718 | Assert((GstWalk.fEffective & X86_PTE_RW) || !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP /* allow netware hack */));
|
---|
719 | PPGMPAGE pPage = pgmPhysGetPage(pVM, GstWalk.GCPhys);
|
---|
720 | Assert(pPage);
|
---|
721 | if (pPage)
|
---|
722 | {
|
---|
723 | rc = pgmPhysPageMakeWritable(pVM, pPage, GstWalk.GCPhys);
|
---|
724 | AssertRCReturn(rc, rc);
|
---|
725 | Log(("%s: pgmPhysPageMakeWritable on %RGv / %RGp %R[pgmpage]\n", __PRETTY_FUNCTION__, GCPtr, GstWalk.GCPhys, pPage));
|
---|
726 | }
|
---|
727 | }
|
---|
728 | }
|
---|
729 |
|
---|
730 | SHW_PTE_ATOMIC_SET2(pPT->a[iPTE], NewPte);
|
---|
731 | Assert((SHW_PTE_GET_U(NewPte) & EPT_E_LEAF) == (SHW_PTE_GET_U(OrgPte) & EPT_E_LEAF));
|
---|
732 |
|
---|
733 | # if PGM_SHW_TYPE == PGM_TYPE_EPT
|
---|
734 | HMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr);
|
---|
735 | # else
|
---|
736 | PGM_INVL_PG_ALL_VCPU(pVM, GCPtr);
|
---|
737 | # endif
|
---|
738 | }
|
---|
739 |
|
---|
740 | /* next page */
|
---|
741 | cb -= HOST_PAGE_SIZE;
|
---|
742 | if (!cb)
|
---|
743 | return VINF_SUCCESS;
|
---|
744 | GCPtr += HOST_PAGE_SIZE;
|
---|
745 | iPTE++;
|
---|
746 | }
|
---|
747 | }
|
---|
748 | #endif /* PGM_SHW_TYPE != PGM_TYPE_NONE */
|
---|
749 | }
|
---|
750 |
|
---|
751 |
|
---|
752 | #ifdef IN_RING3
|
---|
753 | /**
|
---|
754 | * Relocate any GC pointers related to shadow mode paging.
|
---|
755 | *
|
---|
756 | * @returns VBox status code.
|
---|
757 | * @param pVCpu The cross context virtual CPU structure.
|
---|
758 | * @param offDelta The relocation offset.
|
---|
759 | */
|
---|
760 | PGM_SHW_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta)
|
---|
761 | {
|
---|
762 | RT_NOREF(pVCpu, offDelta);
|
---|
763 | return VINF_SUCCESS;
|
---|
764 | }
|
---|
765 | #endif
|
---|
766 |
|
---|