VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/SELMRC.cpp@ 46727

Last change on this file since 46727 was 45485, checked in by vboxsync, 12 years ago
  • *: Where possible, drop the #ifdef VBOX_WITH_RAW_RING1 when EMIsRawRing1Enabled is used.
  • SELM: Don't shadow TSS.esp1/ss1 unless ring-1 compression is enabled (also fixed a log statement there).
  • SELM: selmGuestToShadowDesc should not push ring-1 selectors into ring-2 unless EMIsRawRing1Enabled() holds true.
  • REM: Don't set CPU_INTERRUPT_EXTERNAL_EXIT in helper_ltr() for now.
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.1 KB
Line 
1/* $Id: SELMRC.cpp 45485 2013-04-11 14:46:04Z vboxsync $ */
2/** @file
3 * SELM - The Selector Manager, Guest Context.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_SELM
22#include <VBox/vmm/selm.h>
23#include <VBox/vmm/mm.h>
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/trpm.h>
26#include "SELMInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/vmm.h>
29#include <VBox/vmm/pgm.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <iprt/assert.h>
35#include <iprt/asm.h>
36
37#include "SELMInline.h"
38
39
40/*******************************************************************************
41* Global Variables *
42*******************************************************************************/
43#ifdef LOG_ENABLED
44/** Segment register names. */
45static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
46#endif
47
48
49#ifdef SELM_TRACK_GUEST_GDT_CHANGES
50/**
51 * Synchronizes one GDT entry (guest -> shadow).
52 *
53 * @returns VBox strict status code (appropriate for trap handling and GC
54 * return).
55 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
56 * @retval VINF_SELM_SYNC_GDT
57 * @retval VINF_EM_RESCHEDULE_REM
58 *
59 * @param pVM Pointer to the VM.
60 * @param pVCpu The current virtual CPU.
61 * @param pRegFrame Trap register frame.
62 * @param iGDTEntry The GDT entry to sync.
63 *
64 * @remarks Caller checks that this isn't the LDT entry!
65 */
66static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
67{
68 Log2(("GDT %04X LDTR=%04X\n", iGDTEntry, CPUMGetGuestLDTR(pVCpu)));
69
70 /*
71 * Validate the offset.
72 */
73 VBOXGDTR GdtrGuest;
74 CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
75 unsigned offEntry = iGDTEntry * sizeof(X86DESC);
76 if ( iGDTEntry >= SELM_GDT_ELEMENTS
77 || offEntry > GdtrGuest.cbGdt)
78 return VINF_SUCCESS; /* ignore */
79
80 /*
81 * Read the guest descriptor.
82 */
83 X86DESC Desc;
84 int rc = MMGCRamRead(pVM, &Desc, (uint8_t *)(uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
85 if (RT_FAILURE(rc))
86 {
87 rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, (uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
88 if (RT_FAILURE(rc))
89 {
90 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
91 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
92 return VINF_EM_RESCHEDULE_REM;
93 }
94 }
95
96 /*
97 * Check for conflicts.
98 */
99 RTSEL Sel = iGDTEntry << X86_SEL_SHIFT;
100 Assert( !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] & ~X86_SEL_MASK_OFF_RPL)
101 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] & ~X86_SEL_MASK_OFF_RPL)
102 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] & ~X86_SEL_MASK_OFF_RPL)
103 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] & ~X86_SEL_MASK_OFF_RPL)
104 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] & ~X86_SEL_MASK_OFF_RPL));
105 if ( pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == Sel
106 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == Sel
107 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == Sel
108 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == Sel
109 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == Sel)
110 {
111 if (Desc.Gen.u1Present)
112 {
113 Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: detected conflict!!\n", Sel, &Desc));
114 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
115 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
116 return VINF_SELM_SYNC_GDT; /** @todo this status code is ignored, unfortunately. */
117 }
118 Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: potential conflict (still not present)!\n", Sel, &Desc));
119
120 /* Note: we can't continue below or else we'll change the shadow descriptor!! */
121 /* When the guest makes the selector present, then we'll do a GDT sync. */
122 return VINF_SUCCESS;
123 }
124
125 /*
126 * Convert the guest selector to a shadow selector and update the shadow GDT.
127 */
128 selmGuestToShadowDesc(pVM, &Desc);
129 PX86DESC pShwDescr = &pVM->selm.s.paGdtRC[iGDTEntry];
130 //Log(("O: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(*pShwDescr)), X86DESC_LIMIT(*pShwDescr), (pShwDescr->au32[1] >> 8) & 0xFFFF ));
131 //Log(("N: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(Desc)), X86DESC_LIMIT(Desc), (Desc.au32[1] >> 8) & 0xFFFF ));
132 *pShwDescr = Desc;
133
134 /*
135 * Detect and mark stale registers.
136 */
137 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
138 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
139 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx);
140 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
141 {
142 if (Sel == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
143 {
144 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
145 {
146 if (selmIsSRegStale32(&paSReg[iSReg], &Desc, iSReg))
147 {
148 Log(("GDT write to selector in %s register %04X (now stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
149 paSReg[iSReg].fFlags |= CPUMSELREG_FLAGS_STALE;
150 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
151 rcStrict = VINF_EM_RESCHEDULE_REM;
152 }
153 else if (paSReg[iSReg].fFlags & CPUMSELREG_FLAGS_STALE)
154 {
155 Log(("GDT write to selector in %s register %04X (no longer stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
156 paSReg[iSReg].fFlags &= ~CPUMSELREG_FLAGS_STALE;
157 }
158 else
159 Log(("GDT write to selector in %s register %04X (no important change)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
160 }
161 else
162 Log(("GDT write to selector in %s register %04X (out of sync)\n", paSReg[iSReg].Sel));
163 }
164 }
165
166 /** @todo Detect stale LDTR as well? */
167
168 return rcStrict;
169}
170
171
172/**
173 * Synchronizes any segment registers refering to the given GDT entry.
174 *
175 * This is called before any changes performed and shadowed, so it's possible to
176 * look in both the shadow and guest descriptor table entries for hidden
177 * register content.
178 *
179 * @param pVM Pointer to the VM.
180 * @param pVCpu The current virtual CPU.
181 * @param pRegFrame Trap register frame.
182 * @param iGDTEntry The GDT entry to sync.
183 */
184static void selmRCSyncGDTSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
185{
186 /*
187 * Validate the offset.
188 */
189 VBOXGDTR GdtrGuest;
190 CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
191 unsigned offEntry = iGDTEntry * sizeof(X86DESC);
192 if ( iGDTEntry >= SELM_GDT_ELEMENTS
193 || offEntry > GdtrGuest.cbGdt)
194 return;
195
196 /*
197 * Sync outdated segment registers using this entry.
198 */
199 PCX86DESC pDesc = &pVM->selm.s.CTX_SUFF(paGdt)[iGDTEntry];
200 uint32_t uCpl = CPUMGetGuestCPL(pVCpu);
201 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
202 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx);
203 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
204 {
205 if (iGDTEntry == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
206 {
207 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
208 {
209 if (selmIsShwDescGoodForSReg(&paSReg[iSReg], pDesc, iSReg, uCpl))
210 {
211 selmLoadHiddenSRegFromShadowDesc(&paSReg[iSReg], pDesc);
212 Log(("selmRCSyncGDTSegRegs: Updated %s\n", g_aszSRegNms[iSReg]));
213 }
214 else
215 Log(("selmRCSyncGDTSegRegs: Bad shadow descriptor %#x (for %s): %.8Rhxs \n",
216 iGDTEntry, g_aszSRegNms[iSReg], pDesc));
217 }
218 }
219 }
220
221}
222
223
224
225/**
226 * \#PF Virtual Handler callback for Guest write access to the Guest's own GDT.
227 *
228 * @returns VBox status code (appropriate for trap handling and GC return).
229 * @param pVM Pointer to the VM.
230 * @param uErrorCode CPU Error code.
231 * @param pRegFrame Trap register frame.
232 * @param pvFault The fault address (cr2).
233 * @param pvRange The base address of the handled virtual range.
234 * @param offRange The offset of the access into this range.
235 * (If it's a EIP range this is the EIP, if not it's pvFault.)
236 */
237VMMRCDECL(int) selmRCGuestGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
238{
239 PVMCPU pVCpu = VMMGetCpu0(pVM);
240 LogFlow(("selmRCGuestGDTWriteHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
241 NOREF(pvRange);
242
243 /*
244 * Check if any selectors might be affected.
245 */
246 unsigned const iGDTE1 = offRange >> X86_SEL_SHIFT;
247 selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1);
248 if (((offRange + 8) >> X86_SEL_SHIFT) != iGDTE1)
249 selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1 + 1);
250
251 /*
252 * Attempt to emulate the instruction and sync the affected entries.
253 */
254 uint32_t cb;
255 int rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
256 if (RT_SUCCESS(rc) && cb)
257 {
258 /* Check if the LDT was in any way affected. Do not sync the
259 shadow GDT if that's the case or we might have trouble in
260 the world switcher (or so they say). */
261 unsigned const iLdt = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT;
262 unsigned const iGDTE2 = (offRange + cb - 1) >> X86_SEL_SHIFT;
263 if ( iGDTE1 == iLdt
264 || iGDTE2 == iLdt)
265 {
266 Log(("LDTR selector change -> fall back to HC!!\n"));
267 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
268 rc = VINF_SELM_SYNC_GDT;
269 /** @todo Implement correct stale LDT handling. */
270 }
271 else
272 {
273 /* Sync the shadow GDT and continue provided the update didn't
274 cause any segment registers to go stale in any way. */
275 int rc2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE1);
276 if (rc2 == VINF_SUCCESS || rc2 == VINF_EM_RESCHEDULE_REM)
277 {
278 if (rc == VINF_SUCCESS)
279 rc = rc2;
280
281 if (iGDTE1 != iGDTE2)
282 {
283 rc2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE2);
284 if (rc == VINF_SUCCESS)
285 rc = rc2;
286 }
287
288 if (rc2 == VINF_SUCCESS || rc2 == VINF_EM_RESCHEDULE_REM)
289 {
290 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
291 return rc;
292 }
293 }
294
295 /* sync failed, return to ring-3 and resync the GDT. */
296 if (rc == VINF_SUCCESS || RT_FAILURE(rc2))
297 rc = rc2;
298 }
299 }
300 else
301 {
302 Assert(RT_FAILURE(rc));
303 if (rc == VERR_EM_INTERPRETER)
304 rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
305 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
306 }
307
308 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
309 return rc;
310}
311#endif /* SELM_TRACK_GUEST_GDT_CHANGES */
312
313
314#ifdef SELM_TRACK_GUEST_LDT_CHANGES
315/**
316 * \#PF Virtual Handler callback for Guest write access to the Guest's own LDT.
317 *
318 * @returns VBox status code (appropriate for trap handling and GC return).
319 * @param pVM Pointer to the VM.
320 * @param uErrorCode CPU Error code.
321 * @param pRegFrame Trap register frame.
322 * @param pvFault The fault address (cr2).
323 * @param pvRange The base address of the handled virtual range.
324 * @param offRange The offset of the access into this range.
325 * (If it's a EIP range this is the EIP, if not it's pvFault.)
326 */
327VMMRCDECL(int) selmRCGuestLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
328{
329 /** @todo To be implemented. */
330 ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
331 NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange);
332
333 VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_SELM_SYNC_LDT);
334 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
335 return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
336}
337#endif
338
339
340#ifdef SELM_TRACK_GUEST_TSS_CHANGES
341/**
342 * Read wrapper used by selmRCGuestTSSWriteHandler.
343 * @returns VBox status code (appropriate for trap handling and GC return).
344 * @param pVM Pointer to the VM.
345 * @param pvDst Where to put the bits we read.
346 * @param pvSrc Guest address to read from.
347 * @param cb The number of bytes to read.
348 */
349DECLINLINE(int) selmRCReadTssBits(PVM pVM, void *pvDst, void const *pvSrc, size_t cb)
350{
351 PVMCPU pVCpu = VMMGetCpu0(pVM);
352
353 int rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
354 if (RT_SUCCESS(rc))
355 return VINF_SUCCESS;
356
357 /** @todo use different fallback? */
358 rc = PGMPrefetchPage(pVCpu, (uintptr_t)pvSrc);
359 AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc));
360 if (rc == VINF_SUCCESS)
361 {
362 rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
363 AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %p failed with %Rrc\n", &pvSrc, rc));
364 }
365 return rc;
366}
367
368/**
369 * \#PF Virtual Handler callback for Guest write access to the Guest's own current TSS.
370 *
371 * @returns VBox status code (appropriate for trap handling and GC return).
372 * @param pVM Pointer to the VM.
373 * @param uErrorCode CPU Error code.
374 * @param pRegFrame Trap register frame.
375 * @param pvFault The fault address (cr2).
376 * @param pvRange The base address of the handled virtual range.
377 * @param offRange The offset of the access into this range.
378 * (If it's a EIP range this is the EIP, if not it's pvFault.)
379 */
380VMMRCDECL(int) selmRCGuestTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
381{
382 PVMCPU pVCpu = VMMGetCpu0(pVM);
383 LogFlow(("selmRCGuestTSSWriteHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
384 NOREF(pvRange);
385
386 /*
387 * Try emulate the access.
388 */
389 uint32_t cb;
390 int rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
391 if ( RT_SUCCESS(rc)
392 && cb)
393 {
394 rc = VINF_SUCCESS;
395
396 /*
397 * If it's on the same page as the esp0 and ss0 fields or actually one of them,
398 * then check if any of these has changed.
399 */
400 PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss;
401 if ( PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0)
402 && PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
403 && ( pGuestTss->esp0 != pVM->selm.s.Tss.esp1
404 || pGuestTss->ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
405 )
406 {
407 Log(("selmRCGuestTSSWriteHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
408 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0));
409 pVM->selm.s.Tss.esp1 = pGuestTss->esp0;
410 pVM->selm.s.Tss.ss1 = pGuestTss->ss0 | 1;
411 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
412 }
413#ifdef VBOX_WITH_RAW_RING1
414 else if ( EMIsRawRing1Enabled(pVM)
415 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1)
416 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
417 && ( pGuestTss->esp1 != pVM->selm.s.Tss.esp2
418 || pGuestTss->ss1 != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */
419 )
420 {
421 Log(("selmRCGuestTSSWriteHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
422 (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1));
423 pVM->selm.s.Tss.esp2 = pGuestTss->esp1;
424 pVM->selm.s.Tss.ss2 = (pGuestTss->ss1 & ~1) | 2;
425 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
426 }
427#endif
428 /* Handle misaligned TSS in a safe manner (just in case). */
429 else if ( offRange >= RT_UOFFSETOF(VBOXTSS, esp0)
430 && offRange < RT_UOFFSETOF(VBOXTSS, padding_ss0))
431 {
432 struct
433 {
434 uint32_t esp0;
435 uint16_t ss0;
436 uint16_t padding_ss0;
437 } s;
438 AssertCompileSize(s, 8);
439 rc = selmRCReadTssBits(pVM, &s, &pGuestTss->esp0, sizeof(s));
440 if ( rc == VINF_SUCCESS
441 && ( s.esp0 != pVM->selm.s.Tss.esp1
442 || s.ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
443 )
444 {
445 Log(("selmRCGuestTSSWriteHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n",
446 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0));
447 pVM->selm.s.Tss.esp1 = s.esp0;
448 pVM->selm.s.Tss.ss1 = s.ss0 | 1;
449 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
450 }
451 }
452
453 /*
454 * If VME is enabled we need to check if the interrupt redirection bitmap
455 * needs updating.
456 */
457 if ( offRange >= RT_UOFFSETOF(VBOXTSS, offIoBitmap)
458 && (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME))
459 {
460 if (offRange - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap))
461 {
462 uint16_t offIoBitmap = pGuestTss->offIoBitmap;
463 if (offIoBitmap != pVM->selm.s.offGuestIoBitmap)
464 {
465 Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
466 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
467 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
468 }
469 else
470 Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
471 }
472 else
473 {
474 /** @todo not sure how the partial case is handled; probably not allowed */
475 uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap);
476 if ( offIntRedirBitmap <= offRange
477 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offRange + cb
478 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss)
479 {
480 Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offRange=%x cb=%x\n",
481 pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offRange, cb));
482
483 /** @todo only update the changed part. */
484 for (uint32_t i = 0; i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++)
485 {
486 rc = selmRCReadTssBits(pVM, &pVM->selm.s.Tss.IntRedirBitmap[i * 8],
487 (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8);
488 if (rc != VINF_SUCCESS)
489 break;
490 }
491 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir);
492 }
493 }
494 }
495
496 /* Return to ring-3 for a full resync if any of the above fails... (?) */
497 if (rc != VINF_SUCCESS)
498 {
499 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
500 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
501 if (RT_SUCCESS(rc))
502 rc = VINF_SUCCESS;
503 }
504
505 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled);
506 }
507 else
508 {
509 AssertMsg(RT_FAILURE(rc), ("cb=%u rc=%#x\n", cb, rc));
510 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
511 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled);
512 if (rc == VERR_EM_INTERPRETER)
513 rc = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT;
514 }
515 return rc;
516}
517#endif /* SELM_TRACK_GUEST_TSS_CHANGES */
518
519
520#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
521/**
522 * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT.
523 *
524 * @returns VBox status code (appropriate for trap handling and GC return).
525 * @param pVM Pointer to the VM.
526 * @param uErrorCode CPU Error code.
527 * @param pRegFrame Trap register frame.
528 * @param pvFault The fault address (cr2).
529 * @param pvRange The base address of the handled virtual range.
530 * @param offRange The offset of the access into this range.
531 * (If it's a EIP range this is the EIP, if not it's pvFault.)
532 */
533VMMRCDECL(int) selmRCShadowGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
534{
535 LogRel(("FATAL ERROR: selmRCShadowGDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
536 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange);
537 return VERR_SELM_SHADOW_GDT_WRITE;
538}
539#endif
540
541
542#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
543/**
544 * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT.
545 *
546 * @returns VBox status code (appropriate for trap handling and GC return).
547 * @param pVM Pointer to the VM.
548 * @param uErrorCode CPU Error code.
549 * @param pRegFrame Trap register frame.
550 * @param pvFault The fault address (cr2).
551 * @param pvRange The base address of the handled virtual range.
552 * @param offRange The offset of the access into this range.
553 * (If it's a EIP range this is the EIP, if not it's pvFault.)
554 */
555VMMRCDECL(int) selmRCShadowLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
556{
557 LogRel(("FATAL ERROR: selmRCShadowLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
558 Assert(pvFault - (uintptr_t)pVM->selm.s.pvLdtRC < (unsigned)(65536U + PAGE_SIZE));
559 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange);
560 return VERR_SELM_SHADOW_LDT_WRITE;
561}
562#endif
563
564
565#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
566/**
567 * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS.
568 *
569 * @returns VBox status code (appropriate for trap handling and GC return).
570 * @param pVM Pointer to the VM.
571 * @param uErrorCode CPU Error code.
572 * @param pRegFrame Trap register frame.
573 * @param pvFault The fault address (cr2).
574 * @param pvRange The base address of the handled virtual range.
575 * @param offRange The offset of the access into this range.
576 * (If it's a EIP range this is the EIP, if not it's pvFault.)
577 */
578VMMRCDECL(int) selmRCShadowTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
579{
580 LogRel(("FATAL ERROR: selmRCShadowTSSWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
581 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange);
582 return VERR_SELM_SHADOW_TSS_WRITE;
583}
584#endif
585
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette