VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/SELMRC.cpp@ 45276

Last change on this file since 45276 was 45276, checked in by vboxsync, 12 years ago

Ring-1 compression patches, courtesy of trivirt AG:

  • main: diff to remove the hwvirt requirement for QNX
  • rem: diff for dealing with raw ring 0/1 selectors and general changes to allowed guest execution states
  • vmm: changes for using the guest's TSS selector index as our hypervisor TSS selector (makes str safe) (VBOX_WITH_SAFE_STR )
  • vmm: changes for dealing with guest ring 1 code (VBOX_WITH_RAW_RING1)
  • vmm: change to emulate smsw in RC/R0 (QNX uses this old style instruction a lot so going to qemu for emulation is very expensive)
  • vmm: change (hack) to kick out patm virtual handlers in case they conflict with guest GDT/TSS write monitors; we should allow multiple handlers per page, but that change would be rather invasive
  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.1 KB
Line 
1/* $Id: SELMRC.cpp 45276 2013-04-02 08:17:11Z vboxsync $ */
2/** @file
3 * SELM - The Selector Manager, Guest Context.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_SELM
22#include <VBox/vmm/selm.h>
23#include <VBox/vmm/mm.h>
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/trpm.h>
26#include "SELMInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/vmm.h>
29#include <VBox/vmm/pgm.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <iprt/assert.h>
35#include <iprt/asm.h>
36
37
38/*******************************************************************************
39* Global Variables *
40*******************************************************************************/
41#ifdef LOG_ENABLED
42/** Segment register names. */
43static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
44#endif
45
46#ifdef SELM_TRACK_GUEST_GDT_CHANGES
47/**
48 * Synchronizes one GDT entry (guest -> shadow).
49 *
50 * @returns VBox strict status code (appropriate for trap handling and GC
51 * return).
52 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
53 * @retval VINF_SELM_SYNC_GDT
54 * @retval VINF_EM_RESCHEDULE_REM
55 *
56 * @param pVM Pointer to the VM.
57 * @param pVCpu The current virtual CPU.
58 * @param pRegFrame Trap register frame.
59 * @param iGDTEntry The GDT entry to sync.
60 *
61 * @remarks Caller checks that this isn't the LDT entry!
62 */
63static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
64{
65 Log2(("GDT %04X LDTR=%04X\n", iGDTEntry, CPUMGetGuestLDTR(pVCpu)));
66
67 /*
68 * Validate the offset.
69 */
70 VBOXGDTR GdtrGuest;
71 CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
72 unsigned offEntry = iGDTEntry * sizeof(X86DESC);
73 if ( iGDTEntry >= SELM_GDT_ELEMENTS
74 || offEntry > GdtrGuest.cbGdt)
75 return VINF_SUCCESS; /* ignore */
76
77 /*
78 * Read the guest descriptor.
79 */
80 X86DESC Desc;
81 int rc = MMGCRamRead(pVM, &Desc, (uint8_t *)(uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
82 if (RT_FAILURE(rc))
83 {
84 rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, (uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
85 if (RT_FAILURE(rc))
86 {
87 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
88 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
89 return VINF_EM_RESCHEDULE_REM;
90 }
91 }
92
93 /*
94 * Check for conflicts.
95 */
96 RTSEL Sel = iGDTEntry << X86_SEL_SHIFT;
97 Assert( !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] & ~X86_SEL_MASK_OFF_RPL)
98 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] & ~X86_SEL_MASK_OFF_RPL)
99 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] & ~X86_SEL_MASK_OFF_RPL)
100 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] & ~X86_SEL_MASK_OFF_RPL)
101 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] & ~X86_SEL_MASK_OFF_RPL));
102 if ( pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == Sel
103 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == Sel
104 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == Sel
105 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == Sel
106 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == Sel)
107 {
108 if (Desc.Gen.u1Present)
109 {
110 Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: detected conflict!!\n", Sel, &Desc));
111 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
112 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
113 return VINF_SELM_SYNC_GDT; /** @todo this status code is ignored, unfortunately. */
114 }
115 Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: potential conflict (still not present)!\n", Sel, &Desc));
116
117 /* Note: we can't continue below or else we'll change the shadow descriptor!! */
118 /* When the guest makes the selector present, then we'll do a GDT sync. */
119 return VINF_SUCCESS;
120 }
121
122 /*
123 * Convert the guest selector to a shadow selector and update the shadow GDT.
124 */
125 selmGuestToShadowDesc(pVM, &Desc);
126 PX86DESC pShwDescr = &pVM->selm.s.paGdtRC[iGDTEntry];
127 //Log(("O: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(*pShwDescr)), X86DESC_LIMIT(*pShwDescr), (pShwDescr->au32[1] >> 8) & 0xFFFF ));
128 //Log(("N: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(Desc)), X86DESC_LIMIT(Desc), (Desc.au32[1] >> 8) & 0xFFFF ));
129 *pShwDescr = Desc;
130
131 /*
132 * Detect and mark stale registers.
133 */
134 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
135 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
136 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx);
137 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
138 {
139 if (Sel == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
140 {
141 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
142 {
143 if (selmIsSRegStale32(&paSReg[iSReg], &Desc, iSReg))
144 {
145 Log(("GDT write to selector in %s register %04X (now stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
146 paSReg[iSReg].fFlags |= CPUMSELREG_FLAGS_STALE;
147 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
148 rcStrict = VINF_EM_RESCHEDULE_REM;
149 }
150 else if (paSReg[iSReg].fFlags & CPUMSELREG_FLAGS_STALE)
151 {
152 Log(("GDT write to selector in %s register %04X (no longer stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
153 paSReg[iSReg].fFlags &= ~CPUMSELREG_FLAGS_STALE;
154 }
155 else
156 Log(("GDT write to selector in %s register %04X (no important change)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
157 }
158 else
159 Log(("GDT write to selector in %s register %04X (out of sync)\n", paSReg[iSReg].Sel));
160 }
161 }
162
163 /** @todo Detect stale LDTR as well? */
164
165 return rcStrict;
166}
167
168
169/**
170 * Synchronizes any segment registers refering to the given GDT entry.
171 *
172 * This is called before any changes performed and shadowed, so it's possible to
173 * look in both the shadow and guest descriptor table entries for hidden
174 * register content.
175 *
176 * @param pVM Pointer to the VM.
177 * @param pVCpu The current virtual CPU.
178 * @param pRegFrame Trap register frame.
179 * @param iGDTEntry The GDT entry to sync.
180 */
181static void selmRCSyncGDTSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
182{
183 /*
184 * Validate the offset.
185 */
186 VBOXGDTR GdtrGuest;
187 CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
188 unsigned offEntry = iGDTEntry * sizeof(X86DESC);
189 if ( iGDTEntry >= SELM_GDT_ELEMENTS
190 || offEntry > GdtrGuest.cbGdt)
191 return;
192
193 /*
194 * Sync outdated segment registers using this entry.
195 */
196 PCX86DESC pDesc = &pVM->selm.s.CTX_SUFF(paGdt)[iGDTEntry];
197 uint32_t uCpl = CPUMGetGuestCPL(pVCpu);
198 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
199 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx);
200 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
201 {
202 if (iGDTEntry == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
203 {
204 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
205 {
206 if (selmIsShwDescGoodForSReg(&paSReg[iSReg], pDesc, iSReg, uCpl))
207 {
208 selmLoadHiddenSRegFromShadowDesc(&paSReg[iSReg], pDesc);
209 Log(("selmRCSyncGDTSegRegs: Updated %s\n", g_aszSRegNms[iSReg]));
210 }
211 else
212 Log(("selmRCSyncGDTSegRegs: Bad shadow descriptor %#x (for %s): %.8Rhxs \n",
213 iGDTEntry, g_aszSRegNms[iSReg], pDesc));
214 }
215 }
216 }
217
218}
219
220
221
222/**
223 * \#PF Virtual Handler callback for Guest write access to the Guest's own GDT.
224 *
225 * @returns VBox status code (appropriate for trap handling and GC return).
226 * @param pVM Pointer to the VM.
227 * @param uErrorCode CPU Error code.
228 * @param pRegFrame Trap register frame.
229 * @param pvFault The fault address (cr2).
230 * @param pvRange The base address of the handled virtual range.
231 * @param offRange The offset of the access into this range.
232 * (If it's a EIP range this is the EIP, if not it's pvFault.)
233 */
234VMMRCDECL(int) selmRCGuestGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
235{
236 PVMCPU pVCpu = VMMGetCpu0(pVM);
237 LogFlow(("selmRCGuestGDTWriteHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
238 NOREF(pvRange);
239
240 /*
241 * Check if any selectors might be affected.
242 */
243 unsigned const iGDTE1 = offRange >> X86_SEL_SHIFT;
244 selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1);
245 if (((offRange + 8) >> X86_SEL_SHIFT) != iGDTE1)
246 selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1 + 1);
247
248 /*
249 * Attempt to emulate the instruction and sync the affected entries.
250 */
251 uint32_t cb;
252 int rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
253 if (RT_SUCCESS(rc) && cb)
254 {
255 /* Check if the LDT was in any way affected. Do not sync the
256 shadow GDT if that's the case or we might have trouble in
257 the world switcher (or so they say). */
258 unsigned const iLdt = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT;
259 unsigned const iGDTE2 = (offRange + cb - 1) >> X86_SEL_SHIFT;
260 if ( iGDTE1 == iLdt
261 || iGDTE2 == iLdt)
262 {
263 Log(("LDTR selector change -> fall back to HC!!\n"));
264 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
265 rc = VINF_SELM_SYNC_GDT;
266 /** @todo Implement correct stale LDT handling. */
267 }
268 else
269 {
270 /* Sync the shadow GDT and continue provided the update didn't
271 cause any segment registers to go stale in any way. */
272 int rc2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE1);
273 if (rc2 == VINF_SUCCESS || rc2 == VINF_EM_RESCHEDULE_REM)
274 {
275 if (rc == VINF_SUCCESS)
276 rc = rc2;
277
278 if (iGDTE1 != iGDTE2)
279 {
280 rc2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE2);
281 if (rc == VINF_SUCCESS)
282 rc = rc2;
283 }
284
285 if (rc2 == VINF_SUCCESS || rc2 == VINF_EM_RESCHEDULE_REM)
286 {
287 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
288 return rc;
289 }
290 }
291
292 /* sync failed, return to ring-3 and resync the GDT. */
293 if (rc == VINF_SUCCESS || RT_FAILURE(rc2))
294 rc = rc2;
295 }
296 }
297 else
298 {
299 Assert(RT_FAILURE(rc));
300 if (rc == VERR_EM_INTERPRETER)
301 rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
302 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
303 }
304
305 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
306 return rc;
307}
308#endif /* SELM_TRACK_GUEST_GDT_CHANGES */
309
310#ifdef SELM_TRACK_GUEST_LDT_CHANGES
311/**
312 * \#PF Virtual Handler callback for Guest write access to the Guest's own LDT.
313 *
314 * @returns VBox status code (appropriate for trap handling and GC return).
315 * @param pVM Pointer to the VM.
316 * @param uErrorCode CPU Error code.
317 * @param pRegFrame Trap register frame.
318 * @param pvFault The fault address (cr2).
319 * @param pvRange The base address of the handled virtual range.
320 * @param offRange The offset of the access into this range.
321 * (If it's a EIP range this is the EIP, if not it's pvFault.)
322 */
323VMMRCDECL(int) selmRCGuestLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
324{
325 /** @todo To be implemented. */
326 ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
327 NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange);
328
329 VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_SELM_SYNC_LDT);
330 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
331 return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
332}
333#endif
334
335#ifdef SELM_TRACK_GUEST_TSS_CHANGES
336/**
337 * Read wrapper used by selmRCGuestTSSWriteHandler.
338 * @returns VBox status code (appropriate for trap handling and GC return).
339 * @param pVM Pointer to the VM.
340 * @param pvDst Where to put the bits we read.
341 * @param pvSrc Guest address to read from.
342 * @param cb The number of bytes to read.
343 */
344DECLINLINE(int) selmRCReadTssBits(PVM pVM, void *pvDst, void const *pvSrc, size_t cb)
345{
346 PVMCPU pVCpu = VMMGetCpu0(pVM);
347
348 int rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
349 if (RT_SUCCESS(rc))
350 return VINF_SUCCESS;
351
352 /** @todo use different fallback? */
353 rc = PGMPrefetchPage(pVCpu, (uintptr_t)pvSrc);
354 AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc));
355 if (rc == VINF_SUCCESS)
356 {
357 rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
358 AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %p failed with %Rrc\n", &pvSrc, rc));
359 }
360 return rc;
361}
362
363/**
364 * \#PF Virtual Handler callback for Guest write access to the Guest's own current TSS.
365 *
366 * @returns VBox status code (appropriate for trap handling and GC return).
367 * @param pVM Pointer to the VM.
368 * @param uErrorCode CPU Error code.
369 * @param pRegFrame Trap register frame.
370 * @param pvFault The fault address (cr2).
371 * @param pvRange The base address of the handled virtual range.
372 * @param offRange The offset of the access into this range.
373 * (If it's a EIP range this is the EIP, if not it's pvFault.)
374 */
375VMMRCDECL(int) selmRCGuestTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
376{
377 PVMCPU pVCpu = VMMGetCpu0(pVM);
378 LogFlow(("selmRCGuestTSSWriteHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
379 NOREF(pvRange);
380
381 /*
382 * Try emulate the access.
383 */
384 uint32_t cb;
385 int rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
386 if ( RT_SUCCESS(rc)
387 && cb)
388 {
389 rc = VINF_SUCCESS;
390
391 /*
392 * If it's on the same page as the esp0 and ss0 fields or actually one of them,
393 * then check if any of these has changed.
394 */
395 PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss;
396 if ( PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0)
397 && PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
398 && ( pGuestTss->esp0 != pVM->selm.s.Tss.esp1
399 || pGuestTss->ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
400 )
401 {
402 Log(("selmRCGuestTSSWriteHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
403 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0));
404 pVM->selm.s.Tss.esp1 = pGuestTss->esp0;
405 pVM->selm.s.Tss.ss1 = pGuestTss->ss0 | 1;
406 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
407 }
408#ifdef VBOX_WITH_RAW_RING1
409 else
410 if ( EMIsRawRing1Enabled(pVM)
411 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1)
412 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
413 && ( pGuestTss->esp1 != pVM->selm.s.Tss.esp2
414 || pGuestTss->ss1 != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */
415 )
416 {
417 Log(("selmRCGuestTSSWriteHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
418 (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1));
419 pVM->selm.s.Tss.esp2 = pGuestTss->esp1;
420 pVM->selm.s.Tss.ss2 = (pGuestTss->ss1 & ~1) | 2;
421 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
422 }
423#endif
424 /* Handle misaligned TSS in a safe manner (just in case). */
425 else if ( offRange >= RT_UOFFSETOF(VBOXTSS, esp0)
426 && offRange < RT_UOFFSETOF(VBOXTSS, padding_ss0))
427 {
428 struct
429 {
430 uint32_t esp0;
431 uint16_t ss0;
432 uint16_t padding_ss0;
433 } s;
434 AssertCompileSize(s, 8);
435 rc = selmRCReadTssBits(pVM, &s, &pGuestTss->esp0, sizeof(s));
436 if ( rc == VINF_SUCCESS
437 && ( s.esp0 != pVM->selm.s.Tss.esp1
438 || s.ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
439 )
440 {
441 Log(("selmRCGuestTSSWriteHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n",
442 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0));
443 pVM->selm.s.Tss.esp1 = s.esp0;
444 pVM->selm.s.Tss.ss1 = s.ss0 | 1;
445 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
446 }
447 }
448
449 /*
450 * If VME is enabled we need to check if the interrupt redirection bitmap
451 * needs updating.
452 */
453 if ( offRange >= RT_UOFFSETOF(VBOXTSS, offIoBitmap)
454 && (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME))
455 {
456 if (offRange - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap))
457 {
458 uint16_t offIoBitmap = pGuestTss->offIoBitmap;
459 if (offIoBitmap != pVM->selm.s.offGuestIoBitmap)
460 {
461 Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
462 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
463 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
464 }
465 else
466 Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
467 }
468 else
469 {
470 /** @todo not sure how the partial case is handled; probably not allowed */
471 uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap);
472 if ( offIntRedirBitmap <= offRange
473 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offRange + cb
474 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss)
475 {
476 Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offRange=%x cb=%x\n",
477 pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offRange, cb));
478
479 /** @todo only update the changed part. */
480 for (uint32_t i = 0; i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++)
481 {
482 rc = selmRCReadTssBits(pVM, &pVM->selm.s.Tss.IntRedirBitmap[i * 8],
483 (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8);
484 if (rc != VINF_SUCCESS)
485 break;
486 }
487 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir);
488 }
489 }
490 }
491
492 /* Return to ring-3 for a full resync if any of the above fails... (?) */
493 if (rc != VINF_SUCCESS)
494 {
495 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
496 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
497 if (RT_SUCCESS(rc))
498 rc = VINF_SUCCESS;
499 }
500
501 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled);
502 }
503 else
504 {
505 AssertMsg(RT_FAILURE(rc), ("cb=%u rc=%#x\n", cb, rc));
506 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
507 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled);
508 if (rc == VERR_EM_INTERPRETER)
509 rc = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT;
510 }
511 return rc;
512}
513#endif /* SELM_TRACK_GUEST_TSS_CHANGES */
514
515#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
516/**
517 * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT.
518 *
519 * @returns VBox status code (appropriate for trap handling and GC return).
520 * @param pVM Pointer to the VM.
521 * @param uErrorCode CPU Error code.
522 * @param pRegFrame Trap register frame.
523 * @param pvFault The fault address (cr2).
524 * @param pvRange The base address of the handled virtual range.
525 * @param offRange The offset of the access into this range.
526 * (If it's a EIP range this is the EIP, if not it's pvFault.)
527 */
528VMMRCDECL(int) selmRCShadowGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
529{
530 LogRel(("FATAL ERROR: selmRCShadowGDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
531 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange);
532 return VERR_SELM_SHADOW_GDT_WRITE;
533}
534#endif
535
536#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
537/**
538 * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT.
539 *
540 * @returns VBox status code (appropriate for trap handling and GC return).
541 * @param pVM Pointer to the VM.
542 * @param uErrorCode CPU Error code.
543 * @param pRegFrame Trap register frame.
544 * @param pvFault The fault address (cr2).
545 * @param pvRange The base address of the handled virtual range.
546 * @param offRange The offset of the access into this range.
547 * (If it's a EIP range this is the EIP, if not it's pvFault.)
548 */
549VMMRCDECL(int) selmRCShadowLDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
550{
551 LogRel(("FATAL ERROR: selmRCShadowLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
552 Assert(pvFault - (uintptr_t)pVM->selm.s.pvLdtRC < (unsigned)(65536U + PAGE_SIZE));
553 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange);
554 return VERR_SELM_SHADOW_LDT_WRITE;
555}
556#endif
557
558#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
559/**
560 * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS.
561 *
562 * @returns VBox status code (appropriate for trap handling and GC return).
563 * @param pVM Pointer to the VM.
564 * @param uErrorCode CPU Error code.
565 * @param pRegFrame Trap register frame.
566 * @param pvFault The fault address (cr2).
567 * @param pvRange The base address of the handled virtual range.
568 * @param offRange The offset of the access into this range.
569 * (If it's a EIP range this is the EIP, if not it's pvFault.)
570 */
571VMMRCDECL(int) selmRCShadowTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange)
572{
573 LogRel(("FATAL ERROR: selmRCShadowTSSWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
574 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange);
575 return VERR_SELM_SHADOW_TSS_WRITE;
576}
577#endif
578
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette