VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMGst.h@ 14515

Last change on this file since 14515 was 14301, checked in by vboxsync, 16 years ago

Synced some (inactive) new paging code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 20.6 KB
Line 
1/* $Id: PGMGst.h 14301 2008-11-18 13:31:42Z vboxsync $ */
2/** @file
3 * VBox - Page Manager / Monitor, Guest Paging Template.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Defined Constants And Macros *
24*******************************************************************************/
25#undef GSTPT
26#undef PGSTPT
27#undef GSTPTE
28#undef PGSTPTE
29#undef GSTPD
30#undef PGSTPD
31#undef GSTPDE
32#undef PGSTPDE
33#undef GST_BIG_PAGE_SIZE
34#undef GST_BIG_PAGE_OFFSET_MASK
35#undef GST_PDE_PG_MASK
36#undef GST_PDE_BIG_PG_MASK
37#undef GST_PD_SHIFT
38#undef GST_PD_MASK
39#undef GST_PTE_PG_MASK
40#undef GST_PT_SHIFT
41#undef GST_PT_MASK
42#undef GST_TOTAL_PD_ENTRIES
43#undef GST_CR3_PAGE_MASK
44#undef GST_PDPE_ENTRIES
45#undef GST_GET_PDE_BIG_PG_GCPHYS
46
47#if PGM_GST_TYPE == PGM_TYPE_32BIT \
48 || PGM_GST_TYPE == PGM_TYPE_REAL \
49 || PGM_GST_TYPE == PGM_TYPE_PROT
50# define GSTPT X86PT
51# define PGSTPT PX86PT
52# define GSTPTE X86PTE
53# define PGSTPTE PX86PTE
54# define GSTPD X86PD
55# define PGSTPD PX86PD
56# define GSTPDE X86PDE
57# define PGSTPDE PX86PDE
58# define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE
59# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK
60# define GST_PDE_PG_MASK X86_PDE_PG_MASK
61# define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK
62# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) pgmGstGet4MBPhysPage(&pVM->pgm.s, PdeGst)
63# define GST_PD_SHIFT X86_PD_SHIFT
64# define GST_PD_MASK X86_PD_MASK
65# define GST_TOTAL_PD_ENTRIES X86_PG_ENTRIES
66# define GST_PTE_PG_MASK X86_PTE_PG_MASK
67# define GST_PT_SHIFT X86_PT_SHIFT
68# define GST_PT_MASK X86_PT_MASK
69# define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK
70
71#elif PGM_GST_TYPE == PGM_TYPE_PAE \
72 || PGM_GST_TYPE == PGM_TYPE_AMD64
73# define GSTPT X86PTPAE
74# define PGSTPT PX86PTPAE
75# define GSTPTE X86PTEPAE
76# define PGSTPTE PX86PTEPAE
77# define GSTPD X86PDPAE
78# define PGSTPD PX86PDPAE
79# define GSTPDE X86PDEPAE
80# define PGSTPDE PX86PDEPAE
81# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
82# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
83# define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK
84# define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK
85# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) (PdeGst.u & GST_PDE_BIG_PG_MASK)
86# define GST_PD_SHIFT X86_PD_PAE_SHIFT
87# define GST_PD_MASK X86_PD_PAE_MASK
88# if PGM_GST_TYPE == PGM_TYPE_PAE
89# define GST_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
90# define GST_PDPE_ENTRIES X86_PG_PAE_PDPE_ENTRIES
91# else
92# define GST_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
93# define GST_PDPE_ENTRIES X86_PG_AMD64_PDPE_ENTRIES
94# endif
95# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
96# define GST_PT_SHIFT X86_PT_PAE_SHIFT
97# define GST_PT_MASK X86_PT_PAE_MASK
98# define GST_CR3_PAGE_MASK X86_CR3_PAE_PAGE_MASK
99#endif
100
101
102/*******************************************************************************
103* Internal Functions *
104*******************************************************************************/
105__BEGIN_DECLS
106/* r3 */
107PGM_GST_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0);
108PGM_GST_DECL(int, Enter)(PVM pVM, RTGCPHYS GCPhysCR3);
109PGM_GST_DECL(int, Relocate)(PVM pVM, RTGCPTR offDelta);
110PGM_GST_DECL(int, Exit)(PVM pVM);
111
112#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
113static DECLCALLBACK(int) pgmR3Gst32BitWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
114static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
115static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerPD(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
116#endif
117
118/* all */
119PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
120PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
121PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE);
122PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
123PGM_GST_DECL(int, UnmapCR3)(PVM pVM);
124#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
125PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
126PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);
127#endif
128__END_DECLS
129
130
131/**
132 * Initializes the guest bit of the paging mode data.
133 *
134 * @returns VBox status code.
135 * @param pVM The VM handle.
136 * @param fResolveGCAndR0 Indicate whether or not GC and Ring-0 symbols can be resolved now.
137 * This is used early in the init process to avoid trouble with PDM
138 * not being initialized yet.
139 */
140PGM_GST_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0)
141{
142 Assert(pModeData->uGstType == PGM_GST_TYPE);
143
144 /* Ring-3 */
145 pModeData->pfnR3GstRelocate = PGM_GST_NAME(Relocate);
146 pModeData->pfnR3GstExit = PGM_GST_NAME(Exit);
147 pModeData->pfnR3GstGetPDE = PGM_GST_NAME(GetPDE);
148 pModeData->pfnR3GstGetPage = PGM_GST_NAME(GetPage);
149 pModeData->pfnR3GstModifyPage = PGM_GST_NAME(ModifyPage);
150 pModeData->pfnR3GstMapCR3 = PGM_GST_NAME(MapCR3);
151 pModeData->pfnR3GstUnmapCR3 = PGM_GST_NAME(UnmapCR3);
152#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
153 pModeData->pfnR3GstMonitorCR3 = PGM_GST_NAME(MonitorCR3);
154 pModeData->pfnR3GstUnmonitorCR3 = PGM_GST_NAME(UnmonitorCR3);
155#endif
156
157#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
158# if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
159 pModeData->pfnR3GstWriteHandlerCR3 = PGM_GST_NAME(WriteHandlerCR3);
160 pModeData->pszR3GstWriteHandlerCR3 = "Guest CR3 Write access handler";
161 pModeData->pfnR3GstPAEWriteHandlerCR3 = PGM_GST_NAME(WriteHandlerCR3);
162 pModeData->pszR3GstPAEWriteHandlerCR3 = "Guest CR3 Write access handler (PAE)";
163# else
164 pModeData->pfnR3GstWriteHandlerCR3 = NULL;
165 pModeData->pszR3GstWriteHandlerCR3 = NULL;
166 pModeData->pfnR3GstPAEWriteHandlerCR3 = NULL;
167 pModeData->pszR3GstPAEWriteHandlerCR3 = NULL;
168# endif
169#endif
170
171 if (fResolveGCAndR0)
172 {
173 int rc;
174
175#if PGM_SHW_TYPE != PGM_TYPE_AMD64 /* No AMD64 for traditional virtualization, only VT-x and AMD-V. */
176 /* GC */
177 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(GetPage), &pModeData->pfnRCGstGetPage);
178 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(GetPage), rc), rc);
179 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(ModifyPage), &pModeData->pfnRCGstModifyPage);
180 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(ModifyPage), rc), rc);
181 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(GetPDE), &pModeData->pfnRCGstGetPDE);
182 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(GetPDE), rc), rc);
183# ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
184 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(MonitorCR3), &pModeData->pfnRCGstMonitorCR3);
185 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(MonitorCR3), rc), rc);
186 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(UnmonitorCR3), &pModeData->pfnRCGstUnmonitorCR3);
187 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(UnmonitorCR3), rc), rc);
188# endif
189 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(MapCR3), &pModeData->pfnRCGstMapCR3);
190 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(MapCR3), rc), rc);
191 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(UnmapCR3), &pModeData->pfnRCGstUnmapCR3);
192 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(UnmapCR3), rc), rc);
193# ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
194# if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
195 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(WriteHandlerCR3), &pModeData->pfnRCGstWriteHandlerCR3);
196 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(WriteHandlerCR3), rc), rc);
197 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(WriteHandlerCR3), &pModeData->pfnRCGstPAEWriteHandlerCR3);
198 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(WriteHandlerCR3), rc), rc);
199# endif
200# endif
201#endif /* Not AMD64 shadow paging. */
202
203 /* Ring-0 */
204 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(GetPage), &pModeData->pfnR0GstGetPage);
205 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(GetPage), rc), rc);
206 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(ModifyPage), &pModeData->pfnR0GstModifyPage);
207 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(ModifyPage), rc), rc);
208 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(GetPDE), &pModeData->pfnR0GstGetPDE);
209 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(GetPDE), rc), rc);
210#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
211 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(MonitorCR3), &pModeData->pfnR0GstMonitorCR3);
212 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(MonitorCR3), rc), rc);
213 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(UnmonitorCR3), &pModeData->pfnR0GstUnmonitorCR3);
214 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(UnmonitorCR3), rc), rc);
215#endif
216 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(MapCR3), &pModeData->pfnR0GstMapCR3);
217 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(MapCR3), rc), rc);
218 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(UnmapCR3), &pModeData->pfnR0GstUnmapCR3);
219 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(UnmapCR3), rc), rc);
220#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
221# if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
222 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(WriteHandlerCR3), &pModeData->pfnR0GstWriteHandlerCR3);
223 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(WriteHandlerCR3), rc), rc);
224 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(WriteHandlerCR3), &pModeData->pfnR0GstPAEWriteHandlerCR3);
225 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(WriteHandlerCR3), rc), rc);
226# endif
227#endif
228 }
229
230 return VINF_SUCCESS;
231}
232
233
234/**
235 * Enters the guest mode.
236 *
237 * @returns VBox status code.
238 * @param pVM VM handle.
239 * @param GCPhysCR3 The physical address from the CR3 register.
240 */
241PGM_GST_DECL(int, Enter)(PVM pVM, RTGCPHYS GCPhysCR3)
242{
243 /*
244 * Map and monitor CR3
245 */
246 int rc = PGM_GST_NAME(MapCR3)(pVM, GCPhysCR3);
247#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
248 if (RT_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
249 rc = PGM_GST_NAME(MonitorCR3)(pVM, GCPhysCR3);
250#endif
251 return rc;
252}
253
254
255/**
256 * Relocate any GC pointers related to guest mode paging.
257 *
258 * @returns VBox status code.
259 * @param pVM The VM handle.
260 * @param offDelta The reloation offset.
261 */
262PGM_GST_DECL(int, Relocate)(PVM pVM, RTGCPTR offDelta)
263{
264 /* nothing special to do here - InitData does the job. */
265 return VINF_SUCCESS;
266}
267
268
269/**
270 * Exits the guest mode.
271 *
272 * @returns VBox status code.
273 * @param pVM VM handle.
274 */
275PGM_GST_DECL(int, Exit)(PVM pVM)
276{
277 int rc;
278
279#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
280 rc = PGM_GST_NAME(UnmonitorCR3)(pVM);
281 if (RT_SUCCESS(rc))
282#endif
283 rc = PGM_GST_NAME(UnmapCR3)(pVM);
284 return rc;
285}
286
287
288#if PGM_GST_TYPE == PGM_TYPE_32BIT
289/**
290 * Physical write access for the Guest CR3 in 32-bit mode.
291 *
292 * @returns VINF_SUCCESS if the handler have carried out the operation.
293 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
294 * @param pVM VM Handle.
295 * @param GCPhys The physical address the guest is writing to.
296 * @param pvPhys The HC mapping of that address.
297 * @param pvBuf What the guest is reading/writing.
298 * @param cbBuf How much it's reading/writing.
299 * @param enmAccessType The access type.
300 * @param pvUser User argument.
301 */
302static DECLCALLBACK(int) pgmR3Gst32BitWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
303{
304 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
305 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
306 Log2(("pgmR3Gst32BitWriteHandlerCR3: ff=%#x GCPhys=%RGp pvPhys=%p cbBuf=%d pvBuf={%.*Rhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));
307
308 /*
309 * Do the write operation.
310 */
311 memcpy(pvPhys, pvBuf, cbBuf);
312 if ( !pVM->pgm.s.fMappingsFixed
313 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
314 {
315 /*
316 * Check for conflicts.
317 */
318 const RTGCPTR offPD = GCPhys & PAGE_OFFSET_MASK;
319 const unsigned iPD1 = offPD / sizeof(X86PDE);
320 const unsigned iPD2 = (unsigned)(offPD + cbBuf - 1) / sizeof(X86PDE);
321 Assert(iPD1 - iPD2 <= 1);
322 if ( ( pVM->pgm.s.pGst32BitPdR3->a[iPD1].n.u1Present
323 && pgmGetMapping(pVM, iPD1 << X86_PD_SHIFT) )
324 || ( iPD1 != iPD2
325 && pVM->pgm.s.pGst32BitPdR3->a[iPD2].n.u1Present
326 && pgmGetMapping(pVM, iPD2 << X86_PD_SHIFT) )
327 )
328 {
329 Log(("pgmR3Gst32BitWriteHandlerCR3: detected conflict. iPD1=%#x iPD2=%#x GCPhys=%RGp\n", iPD1, iPD2, GCPhys));
330 STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWriteConflict);
331 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
332 }
333 }
334
335 STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWrite);
336 return VINF_SUCCESS;
337}
338#endif /* 32BIT */
339
340#if PGM_GST_TYPE == PGM_TYPE_PAE
341
342/**
343 * Physical write access handler for the Guest CR3 in PAE mode.
344 *
345 * @returns VINF_SUCCESS if the handler have carried out the operation.
346 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
347 * @param pVM VM Handle.
348 * @param GCPhys The physical address the guest is writing to.
349 * @param pvPhys The HC mapping of that address.
350 * @param pvBuf What the guest is reading/writing.
351 * @param cbBuf How much it's reading/writing.
352 * @param enmAccessType The access type.
353 * @param pvUser User argument.
354 */
355static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
356{
357 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
358 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
359 Log2(("pgmR3GstPAEWriteHandlerCR3: ff=%#x GCPhys=%RGp pvPhys=%p cbBuf=%d pvBuf={%.*Rhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));
360
361 /*
362 * Do the write operation.
363 */
364 memcpy(pvPhys, pvBuf, cbBuf);
365 if ( !pVM->pgm.s.fMappingsFixed
366 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
367 {
368 /*
369 * Check if any of the PDs have changed.
370 * We'll simply check all of them instead of figuring out which one/two to check.
371 */
372 for (unsigned i = 0; i < 4; i++)
373 {
374 if ( pVM->pgm.s.pGstPaePdptR3->a[i].n.u1Present
375 && (pVM->pgm.s.pGstPaePdptR3->a[i].u & X86_PDPE_PG_MASK) != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
376 {
377 Log(("pgmR3GstPAEWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%RGp\n",
378 i, pVM->pgm.s.pGstPaePdptR3->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
379 /*
380 * The PD has changed.
381 * We will schedule a monitoring update for the next TLB Flush,
382 * InvalidatePage or SyncCR3.
383 *
384 * This isn't perfect, because a lazy page sync might be dealing with an half
385 * updated PDPE. However, we assume that the guest OS is disabling interrupts
386 * and being extremely careful (cmpxchg8b) when updating a PDPE where it's
387 * executing.
388 */
389 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
390 }
391 }
392 }
393 /*
394 * Flag a updating of the monitor at the next crossroad so we don't monitor the
395 * wrong pages for soo long that they can be reused as code pages and freak out
396 * the recompiler or something.
397 */
398 else
399 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
400
401
402 STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWrite);
403 return VINF_SUCCESS;
404}
405
406# if 0
407/**
408 * Physical write access for Guest CR3.
409 *
410 * @returns VINF_SUCCESS if the handler have carried out the operation.
411 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
412 * @param pVM VM Handle.
413 * @param GCPhys The physical address the guest is writing to.
414 * @param pvPhys The HC mapping of that address.
415 * @param pvBuf What the guest is reading/writing.
416 * @param cbBuf How much it's reading/writing.
417 * @param enmAccessType The access type.
418 * @param pvUser User argument.
419 */
420static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerPD(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
421{
422 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
423 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
424 Log2(("pgmR3GstPAEWriteHandlerPD: ff=%#x GCPhys=%RGp pvPhys=%p cbBuf=%d pvBuf={%.*Rhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));
425
426 /*
427 * Do the write operation.
428 */
429 memcpy(pvPhys, pvBuf, cbBuf);
430 if ( !pVM->pgm.s.fMappingsFixed
431 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
432 {
433 /*
434 * Figure out which of the 4 PDs this is.
435 */
436 unsigned i;
437 for (i = 0; i < 4; i++)
438 if (pVM->pgm.s.pGstPaePdptHC->a[i].u == (GCPhys & X86_PTE_PAE_PG_MASK))
439 {
440 PX86PDPAE pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT);
441 const RTGCPTR offPD = GCPhys & PAGE_OFFSET_MASK;
442 const unsigned iPD1 = offPD / sizeof(X86PDEPAE);
443 const unsigned iPD2 = (offPD + cbBuf - 1) / sizeof(X86PDEPAE);
444 Assert(iPD1 - iPD2 <= 1);
445 if ( ( pPDSrc->a[iPD1].n.u1Present
446 && pgmGetMapping(pVM, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)) )
447 || ( iPD1 != iPD2
448 && pPDSrc->a[iPD2].n.u1Present
449 && pgmGetMapping(pVM, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)) )
450 )
451 {
452 Log(("pgmR3GstPaePD3WriteHandler: detected conflict. i=%d iPD1=%#x iPD2=%#x GCPhys=%RGp\n",
453 i, iPD1, iPD2, GCPhys));
454 STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWriteConflict);
455 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
456 }
457 break; /* ASSUMES no duplicate entries... */
458 }
459 Assert(i < 4);
460 }
461
462 STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWrite);
463 return VINF_SUCCESS;
464}
465# endif
466
467#endif /* PAE */
468
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette