VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMGC/PGMGCGst.h@ 5605

Last change on this file since 5605 was 4071, checked in by vboxsync, 17 years ago

Biggest check-in ever. New source code headers for all (C) innotek files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 12.8 KB
Line 
1/* $Id: PGMGCGst.h 4071 2007-08-07 17:07:59Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - Guest Context.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Defined Constants And Macros *
20*******************************************************************************/
21#undef GSTPT
22#undef PGSTPT
23#undef GSTPTE
24#undef PGSTPTE
25#undef GSTPD
26#undef PGSTPD
27#undef GSTPDE
28#undef PGSTPDE
29#undef GST_BIG_PAGE_SIZE
30#undef GST_BIG_PAGE_OFFSET_MASK
31#undef GST_PDE_PG_MASK
32#undef GST_PDE4M_PG_MASK
33#undef GST_PD_SHIFT
34#undef GST_PD_MASK
35#undef GST_PTE_PG_MASK
36#undef GST_PT_SHIFT
37#undef GST_PT_MASK
38
39#if PGM_GST_TYPE == PGM_TYPE_32BIT
40# define GSTPT X86PT
41# define PGSTPT PX86PT
42# define GSTPTE X86PTE
43# define PGSTPTE PX86PTE
44# define GSTPD X86PD
45# define PGSTPD PX86PD
46# define GSTPDE X86PDE
47# define PGSTPDE PX86PDE
48# define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE
49# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK
50# define GST_PDE_PG_MASK X86_PDE_PG_MASK
51# define GST_PDE4M_PG_MASK X86_PDE4M_PG_MASK
52# define GST_PD_SHIFT X86_PD_SHIFT
53# define GST_PD_MASK X86_PD_MASK
54# define GST_PTE_PG_MASK X86_PTE_PG_MASK
55# define GST_PT_SHIFT X86_PT_SHIFT
56# define GST_PT_MASK X86_PT_MASK
57#else
58# define GSTPT X86PTPAE
59# define PGSTPT PX86PTPAE
60# define GSTPTE X86PTEPAE
61# define PGSTPTE PX86PTEPAE
62# define GSTPD X86PDPAE
63# define PGSTPD PX86PDPAE
64# define GSTPDE X86PDEPAE
65# define PGSTPDE PX86PDEPAE
66# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
67# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
68# define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK
69# define GST_PDE4M_PG_MASK X86_PDE4M_PAE_PG_MASK
70# define GST_PD_SHIFT X86_PD_PAE_SHIFT
71# define GST_PD_MASK X86_PD_PAE_MASK
72# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
73# define GST_PT_SHIFT X86_PT_PAE_SHIFT
74# define GST_PT_MASK X86_PT_PAE_MASK
75#endif
76
77
78/*******************************************************************************
79* Internal Functions *
80*******************************************************************************/
81__BEGIN_DECLS
82PGMGCDECL(int) pgmGCGst32BitWriteHandlerCR3(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
83PGMGCDECL(int) pgmGCGstPAEWriteHandlerCR3(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
84PGMGCDECL(int) pgmGCGstPAEWriteHandlerPD(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
85__END_DECLS
86
87
88#if PGM_GST_TYPE == PGM_TYPE_32BIT
89
90/**
91 * Write access handler for the Guest CR3 page in 32-bit mode.
92 *
93 * This will try interpret the instruction, if failure fail back to the recompiler.
94 * Check if the changed PDEs are marked present and conflicts with our
95 * mappings. If conflict, we'll switch to the host context and resolve it there
96 *
97 * @returns VBox status code (appropritate for trap handling and GC return).
98 * @param pVM VM Handle.
99 * @param uErrorCode CPU Error code.
100 * @param pRegFrame Trap register frame.
101 * @param pvFault The fault address (cr2).
102 * @param GCPhysFault The GC physical address corresponding to pvFault.
103 * @param pvUser User argument.
104 */
105PGMGCDECL(int) pgmGCGst32BitWriteHandlerCR3(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser)
106{
107 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
108
109 /*
110 * Try interpret the instruction.
111 */
112 uint32_t cb;
113 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
114 if (VBOX_SUCCESS(rc) && cb)
115 {
116 /*
117 * Check if the modified PDEs are present and mappings.
118 */
119 const RTGCUINTPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
120 const unsigned iPD1 = offPD / sizeof(X86PDE);
121 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDE);
122
123 Assert(cb > 0 && cb <= 8);
124 Assert(iPD1 < ELEMENTS(pVM->pgm.s.pGuestPDGC->a));
125 Assert(iPD2 < ELEMENTS(pVM->pgm.s.pGuestPDGC->a));
126
127#ifdef DEBUG
128 Log(("pgmGCGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%VGv\n", iPD1, iPD1 << X86_PD_SHIFT));
129 if (iPD1 != iPD2)
130 Log(("pgmGCGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%VGv\n", iPD2, iPD2 << X86_PD_SHIFT));
131#endif
132
133 if (!pVM->pgm.s.fMappingsFixed)
134 {
135 PX86PD pPDSrc = pVM->pgm.s.pGuestPDGC;
136 if ( ( pPDSrc->a[iPD1].n.u1Present
137 && pgmGetMapping(pVM, (RTGCPTR)(iPD1 << X86_PD_SHIFT)) )
138 || ( iPD1 != iPD2
139 && pPDSrc->a[iPD2].n.u1Present
140 && pgmGetMapping(pVM, (RTGCPTR)(iPD2 << X86_PD_SHIFT)) )
141 )
142 {
143 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteConflict);
144 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
145 if (rc == VINF_SUCCESS)
146 rc = VINF_PGM_SYNC_CR3;
147 Log(("pgmGCGst32BitWriteHandlerCR3: detected conflict iPD1=%#x iPD2=%#x - returns %Rrc\n", iPD1, iPD2, rc));
148 return rc;
149 }
150 }
151
152 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteHandled);
153 }
154 else
155 {
156 Assert(VBOX_FAILURE(rc));
157 if (rc == VERR_EM_INTERPRETER)
158 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
159 Log(("pgmGCGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
160 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteUnhandled);
161 }
162 return rc;
163}
164
165#endif /* PGM_TYPE_32BIT */
166
167
168#if PGM_GST_TYPE == PGM_TYPE_PAE
169
170/**
171 * Write access handler for the Guest CR3 page in PAE mode.
172 *
173 * This will try interpret the instruction, if failure fail back to the recompiler.
174 * Check if the changed PDEs are marked present and conflicts with our
175 * mappings. If conflict, we'll switch to the host context and resolve it there
176 *
177 * @returns VBox status code (appropritate for trap handling and GC return).
178 * @param pVM VM Handle.
179 * @param uErrorCode CPU Error code.
180 * @param pRegFrame Trap register frame.
181 * @param pvFault The fault address (cr2).
182 * @param GCPhysFault The GC physical address corresponding to pvFault.
183 * @param pvUser User argument.
184 */
185PGMGCDECL(int) pgmGCGstPAEWriteHandlerCR3(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser)
186{
187 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
188
189 /*
190 * Try interpret the instruction.
191 */
192 uint32_t cb;
193 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
194 if (VBOX_SUCCESS(rc) && cb)
195 {
196 /*
197 * Check if any of the PDs have changed.
198 * We'll simply check all of them instead of figuring out which one/two to check.
199 */
200 for (unsigned i = 0; i < 4; i++)
201 {
202 if ( CTXSUFF(pVM->pgm.s.pGstPaePDPTR)->a[i].n.u1Present
203 && ( CTXSUFF(pVM->pgm.s.pGstPaePDPTR)->a[i].u & X86_PDPE_PG_MASK)
204 != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
205 {
206 /*
207 * The PDPE has changed.
208 * We will schedule a monitoring update for the next TLB Flush,
209 * InvalidatePage or SyncCR3.
210 *
211 * This isn't perfect, because a lazy page sync might be dealing with an half
212 * updated PDPE. However, we assume that the guest OS is disabling interrupts
213 * and being extremely careful (cmpxchg8b) when updating a PDPE where it's
214 * executing.
215 */
216 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
217 Log(("pgmGCGstPaeWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%VGp\n",
218 i, CTXSUFF(pVM->pgm.s.pGstPaePDPTR)->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
219 }
220 }
221
222 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteHandled);
223 }
224 else
225 {
226 Assert(VBOX_FAILURE(rc));
227 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteUnhandled);
228 if (rc == VERR_EM_INTERPRETER)
229 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
230 }
231 Log(("pgmGCGstPaeWriteHandlerCR3: returns %Rrc\n", rc));
232 return rc;
233}
234
235
236/**
237 * Write access handler for the Guest PDs in PAE mode.
238 *
239 * This will try interpret the instruction, if failure fail back to the recompiler.
240 * Check if the changed PDEs are marked present and conflicts with our
241 * mappings. If conflict, we'll switch to the host context and resolve it there
242 *
243 * @returns VBox status code (appropritate for trap handling and GC return).
244 * @param pVM VM Handle.
245 * @param uErrorCode CPU Error code.
246 * @param pRegFrame Trap register frame.
247 * @param pvFault The fault address (cr2).
248 * @param GCPhysFault The GC physical address corresponding to pvFault.
249 * @param pvUser User argument.
250 */
251PGMGCDECL(int) pgmGCGstPAEWriteHandlerPD(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser)
252{
253 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
254
255 /*
256 * Try interpret the instruction.
257 */
258 uint32_t cb;
259 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
260 if (VBOX_SUCCESS(rc) && cb)
261 {
262 /*
263 * Figure out which of the 4 PDs this is.
264 */
265 RTGCUINTPTR i;
266 for (i = 0; i < 4; i++)
267 if (CTXSUFF(pVM->pgm.s.pGstPaePDPTR)->a[i].u == (GCPhysFault & X86_PTE_PAE_PG_MASK))
268 {
269 PX86PDPAE pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPTR_SHIFT);
270 const RTGCUINTPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;
271 const unsigned iPD1 = offPD / sizeof(X86PDEPAE);
272 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDEPAE);
273
274 Assert(cb > 0 && cb <= 8);
275 Assert(iPD1 < X86_PG_PAE_ENTRIES);
276 Assert(iPD2 < X86_PG_PAE_ENTRIES);
277
278#ifdef DEBUG
279 Log(("pgmGCGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%VGv)\n",
280 i, iPD1, (i << X86_PDPTR_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)));
281 if (iPD1 != iPD2)
282 Log(("pgmGCGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%VGv)\n",
283 i, iPD2, (i << X86_PDPTR_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)));
284#endif
285
286 if (!pVM->pgm.s.fMappingsFixed)
287 {
288 if ( ( pPDSrc->a[iPD1].n.u1Present
289 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPTR_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT))) )
290 || ( iPD1 != iPD2
291 && pPDSrc->a[iPD2].n.u1Present
292 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPTR_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT))) )
293 )
294 {
295 Log(("pgmGCGstPaeWriteHandlerPD: detected conflict iPD1=%#x iPD2=%#x\n", iPD1, iPD2));
296 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteConflict);
297 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
298 return VINF_PGM_SYNC_CR3;
299 }
300 }
301 break; /* ASSUMES no duplicate entries... */
302 }
303 Assert(i < 4);
304
305 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteHandled);
306 }
307 else
308 {
309 Assert(VBOX_FAILURE(rc));
310 if (rc == VERR_EM_INTERPRETER)
311 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
312 else
313 Log(("pgmGCGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
314 STAM_COUNTER_INC(&pVM->pgm.s.StatGCGuestCR3WriteUnhandled);
315 }
316 return rc;
317}
318
319#endif /* PGM_TYPE_PAE */
320
321
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette