VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/SELMRC.cpp@ 55909

Last change on this file since 55909 was 55900, checked in by vboxsync, 10 years ago

PGM: Added a pVCpu parameter to all virtual handler callouts and also a PGMACCESSORIGIN parameter to the ring-3 one.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 25.7 KB
Line 
1/* $Id: SELMRC.cpp 55900 2015-05-18 10:17:35Z vboxsync $ */
2/** @file
3 * SELM - The Selector Manager, Guest Context.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_SELM
22#include <VBox/vmm/selm.h>
23#include <VBox/vmm/mm.h>
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/trpm.h>
26#include "SELMInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/vmm.h>
29#include <VBox/vmm/pgm.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <iprt/assert.h>
35#include <iprt/asm.h>
36
37#include "SELMInline.h"
38
39
40/*******************************************************************************
41* Global Variables *
42*******************************************************************************/
43#ifdef LOG_ENABLED
44/** Segment register names. */
45static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
46#endif
47
48
49#ifdef SELM_TRACK_GUEST_GDT_CHANGES
50/**
51 * Synchronizes one GDT entry (guest -> shadow).
52 *
53 * @returns VBox strict status code (appropriate for trap handling and GC
54 * return).
55 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
56 * @retval VINF_SELM_SYNC_GDT
57 * @retval VINF_EM_RESCHEDULE_REM
58 *
59 * @param pVM Pointer to the VM.
60 * @param pVCpu The current virtual CPU.
61 * @param pRegFrame Trap register frame.
62 * @param iGDTEntry The GDT entry to sync.
63 *
64 * @remarks Caller checks that this isn't the LDT entry!
65 */
66static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
67{
68 Log2(("GDT %04X LDTR=%04X\n", iGDTEntry, CPUMGetGuestLDTR(pVCpu)));
69
70 /*
71 * Validate the offset.
72 */
73 VBOXGDTR GdtrGuest;
74 CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
75 unsigned offEntry = iGDTEntry * sizeof(X86DESC);
76 if ( iGDTEntry >= SELM_GDT_ELEMENTS
77 || offEntry > GdtrGuest.cbGdt)
78 return VINF_SUCCESS; /* ignore */
79
80 /*
81 * Read the guest descriptor.
82 */
83 X86DESC Desc;
84 int rc = MMGCRamRead(pVM, &Desc, (uint8_t *)(uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
85 if (RT_FAILURE(rc))
86 {
87 rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, (uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
88 if (RT_FAILURE(rc))
89 {
90 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
91 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
92 return VINF_EM_RESCHEDULE_REM;
93 }
94 }
95
96 /*
97 * Check for conflicts.
98 */
99 RTSEL Sel = iGDTEntry << X86_SEL_SHIFT;
100 Assert( !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] & ~X86_SEL_MASK_OFF_RPL)
101 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] & ~X86_SEL_MASK_OFF_RPL)
102 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] & ~X86_SEL_MASK_OFF_RPL)
103 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] & ~X86_SEL_MASK_OFF_RPL)
104 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] & ~X86_SEL_MASK_OFF_RPL));
105 if ( pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == Sel
106 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == Sel
107 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == Sel
108 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == Sel
109 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == Sel)
110 {
111 if (Desc.Gen.u1Present)
112 {
113 Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: detected conflict!!\n", Sel, &Desc));
114 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
115 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
116 return VINF_SELM_SYNC_GDT; /** @todo this status code is ignored, unfortunately. */
117 }
118 Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: potential conflict (still not present)!\n", Sel, &Desc));
119
120 /* Note: we can't continue below or else we'll change the shadow descriptor!! */
121 /* When the guest makes the selector present, then we'll do a GDT sync. */
122 return VINF_SUCCESS;
123 }
124
125 /*
126 * Convert the guest selector to a shadow selector and update the shadow GDT.
127 */
128 selmGuestToShadowDesc(pVM, &Desc);
129 PX86DESC pShwDescr = &pVM->selm.s.paGdtRC[iGDTEntry];
130 //Log(("O: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(*pShwDescr)), X86DESC_LIMIT(*pShwDescr), (pShwDescr->au32[1] >> 8) & 0xFFFF ));
131 //Log(("N: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(Desc)), X86DESC_LIMIT(Desc), (Desc.au32[1] >> 8) & 0xFFFF ));
132 *pShwDescr = Desc;
133
134 /*
135 * Detect and mark stale registers.
136 */
137 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
138 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
139 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx);
140 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
141 {
142 if (Sel == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
143 {
144 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
145 {
146 if (selmIsSRegStale32(&paSReg[iSReg], &Desc, iSReg))
147 {
148 Log(("GDT write to selector in %s register %04X (now stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
149 paSReg[iSReg].fFlags |= CPUMSELREG_FLAGS_STALE;
150 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
151 /* rcStrict = VINF_EM_RESCHEDULE_REM; - bad idea if we're in a patch. */
152 rcStrict = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
153 }
154 else if (paSReg[iSReg].fFlags & CPUMSELREG_FLAGS_STALE)
155 {
156 Log(("GDT write to selector in %s register %04X (no longer stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
157 paSReg[iSReg].fFlags &= ~CPUMSELREG_FLAGS_STALE;
158 }
159 else
160 Log(("GDT write to selector in %s register %04X (no important change)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
161 }
162 else
163 Log(("GDT write to selector in %s register %04X (out of sync)\n", paSReg[iSReg].Sel));
164 }
165 }
166
167 /** @todo Detect stale LDTR as well? */
168
169 return rcStrict;
170}
171
172
173/**
174 * Synchronizes any segment registers refering to the given GDT entry.
175 *
176 * This is called before any changes performed and shadowed, so it's possible to
177 * look in both the shadow and guest descriptor table entries for hidden
178 * register content.
179 *
180 * @param pVM Pointer to the VM.
181 * @param pVCpu The current virtual CPU.
182 * @param pRegFrame Trap register frame.
183 * @param iGDTEntry The GDT entry to sync.
184 */
185static void selmRCSyncGDTSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
186{
187 /*
188 * Validate the offset.
189 */
190 VBOXGDTR GdtrGuest;
191 CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
192 unsigned offEntry = iGDTEntry * sizeof(X86DESC);
193 if ( iGDTEntry >= SELM_GDT_ELEMENTS
194 || offEntry > GdtrGuest.cbGdt)
195 return;
196
197 /*
198 * Sync outdated segment registers using this entry.
199 */
200 PCX86DESC pDesc = &pVM->selm.s.CTX_SUFF(paGdt)[iGDTEntry];
201 uint32_t uCpl = CPUMGetGuestCPL(pVCpu);
202 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
203 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx);
204 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
205 {
206 if (iGDTEntry == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
207 {
208 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
209 {
210 if (selmIsShwDescGoodForSReg(&paSReg[iSReg], pDesc, iSReg, uCpl))
211 {
212 selmLoadHiddenSRegFromShadowDesc(&paSReg[iSReg], pDesc);
213 Log(("selmRCSyncGDTSegRegs: Updated %s\n", g_aszSRegNms[iSReg]));
214 }
215 else
216 Log(("selmRCSyncGDTSegRegs: Bad shadow descriptor %#x (for %s): %.8Rhxs \n",
217 iGDTEntry, g_aszSRegNms[iSReg], pDesc));
218 }
219 }
220 }
221
222}
223
224
225/**
226 * \#PF Virtual Handler callback for Guest write access to the Guest's own GDT.
227 *
228 * @returns VBox status code (appropriate for trap handling and GC return).
229 * @param pVM Pointer to the VM.
230 * @param pVCpu Pointer to the cross context CPU context for the
231 * calling EMT.
232 * @param uErrorCode CPU Error code.
233 * @param pRegFrame Trap register frame.
234 * @param pvFault The fault address (cr2).
235 * @param pvRange The base address of the handled virtual range.
236 * @param offRange The offset of the access into this range.
237 * (If it's a EIP range this is the EIP, if not it's pvFault.)
238 */
239DECLEXPORT(int) selmRCGuestGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
240 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
241{
242 LogFlow(("selmRCGuestGDTWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
243 NOREF(pvRange); NOREF(pvUser);
244
245 /*
246 * Check if any selectors might be affected.
247 */
248 unsigned const iGDTE1 = offRange >> X86_SEL_SHIFT;
249 selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1);
250 if (((offRange + 8) >> X86_SEL_SHIFT) != iGDTE1)
251 selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1 + 1);
252
253 /*
254 * Attempt to emulate the instruction and sync the affected entries.
255 */
256 uint32_t cb;
257 int rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
258 if (RT_SUCCESS(rc) && cb)
259 {
260 /* Check if the LDT was in any way affected. Do not sync the
261 shadow GDT if that's the case or we might have trouble in
262 the world switcher (or so they say). */
263 unsigned const iLdt = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT;
264 unsigned const iGDTE2 = (offRange + cb - 1) >> X86_SEL_SHIFT;
265 if ( iGDTE1 == iLdt
266 || iGDTE2 == iLdt)
267 {
268 Log(("LDTR selector change -> fall back to HC!!\n"));
269 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
270 rc = VINF_SELM_SYNC_GDT;
271 /** @todo Implement correct stale LDT handling. */
272 }
273 else
274 {
275 /* Sync the shadow GDT and continue provided the update didn't
276 cause any segment registers to go stale in any way. */
277 int rc2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE1);
278 if (rc2 == VINF_SUCCESS || rc2 == VINF_EM_RESCHEDULE_REM)
279 {
280 if (rc == VINF_SUCCESS)
281 rc = rc2;
282
283 if (iGDTE1 != iGDTE2)
284 {
285 rc2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE2);
286 if (rc == VINF_SUCCESS)
287 rc = rc2;
288 }
289
290 if (rc2 == VINF_SUCCESS || rc2 == VINF_EM_RESCHEDULE_REM)
291 {
292 /* VINF_EM_RESCHEDULE_REM - bad idea if we're in a patch. */
293 if (rc2 == VINF_EM_RESCHEDULE_REM)
294 rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
295 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
296 return rc;
297 }
298 }
299
300 /* sync failed, return to ring-3 and resync the GDT. */
301 if (rc == VINF_SUCCESS || RT_FAILURE(rc2))
302 rc = rc2;
303 }
304 }
305 else
306 {
307 Assert(RT_FAILURE(rc));
308 if (rc == VERR_EM_INTERPRETER)
309 rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
310 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
311 }
312
313 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
314 return rc;
315}
316#endif /* SELM_TRACK_GUEST_GDT_CHANGES */
317
318
319#ifdef SELM_TRACK_GUEST_LDT_CHANGES
320/**
321 * \#PF Virtual Handler callback for Guest write access to the Guest's own LDT.
322 *
323 * @returns VBox status code (appropriate for trap handling and GC return).
324 * @param pVM Pointer to the VM.
325 * @param pVCpu Pointer to the cross context CPU context for the
326 * calling EMT.
327 * @param uErrorCode CPU Error code.
328 * @param pRegFrame Trap register frame.
329 * @param pvFault The fault address (cr2).
330 * @param pvRange The base address of the handled virtual range.
331 * @param offRange The offset of the access into this range.
332 * (If it's a EIP range this is the EIP, if not it's pvFault.)
333 * @param pvUser Unused.
334 */
335DECLEXPORT(int) selmRCGuestLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
336 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
337{
338 /** @todo To be implemented. */
339 ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
340 NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
341
342 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
343 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
344 return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
345}
346#endif
347
348
349#ifdef SELM_TRACK_GUEST_TSS_CHANGES
350/**
351 * Read wrapper used by selmRCGuestTSSWriteHandler.
352 * @returns VBox status code (appropriate for trap handling and GC return).
353 * @param pVM Pointer to the VM.
354 * @param pvDst Where to put the bits we read.
355 * @param pvSrc Guest address to read from.
356 * @param cb The number of bytes to read.
357 */
358DECLINLINE(int) selmRCReadTssBits(PVM pVM, void *pvDst, void const *pvSrc, size_t cb)
359{
360 PVMCPU pVCpu = VMMGetCpu0(pVM);
361
362 int rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
363 if (RT_SUCCESS(rc))
364 return VINF_SUCCESS;
365
366 /** @todo use different fallback? */
367 rc = PGMPrefetchPage(pVCpu, (uintptr_t)pvSrc);
368 AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc));
369 if (rc == VINF_SUCCESS)
370 {
371 rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
372 AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %p failed with %Rrc\n", &pvSrc, rc));
373 }
374 return rc;
375}
376
377/**
378 * \#PF Virtual Handler callback for Guest write access to the Guest's own current TSS.
379 *
380 * @returns VBox status code (appropriate for trap handling and GC return).
381 * @param pVM Pointer to the VM.
382 * @param pVCpu Pointer to the cross context CPU context for the
383 * calling EMT.
384 * @param uErrorCode CPU Error code.
385 * @param pRegFrame Trap register frame.
386 * @param pvFault The fault address (cr2).
387 * @param pvRange The base address of the handled virtual range.
388 * @param offRange The offset of the access into this range.
389 * (If it's a EIP range this is the EIP, if not it's pvFault.)
390 * @param pvUser Unused.
391 */
392DECLEXPORT(int) selmRCGuestTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
393 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
394{
395 LogFlow(("selmRCGuestTSSWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
396 NOREF(pvRange); NOREF(pvUser);
397
398 /*
399 * Try emulate the access.
400 */
401 uint32_t cb;
402 int rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
403 if ( RT_SUCCESS(rc)
404 && cb)
405 {
406 rc = VINF_SUCCESS;
407
408 /*
409 * If it's on the same page as the esp0 and ss0 fields or actually one of them,
410 * then check if any of these has changed.
411 */
412 PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss;
413 if ( PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0)
414 && PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
415 && ( pGuestTss->esp0 != pVM->selm.s.Tss.esp1
416 || pGuestTss->ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
417 )
418 {
419 Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
420 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0));
421 pVM->selm.s.Tss.esp1 = pGuestTss->esp0;
422 pVM->selm.s.Tss.ss1 = pGuestTss->ss0 | 1;
423 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
424 }
425#ifdef VBOX_WITH_RAW_RING1
426 else if ( EMIsRawRing1Enabled(pVM)
427 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1)
428 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
429 && ( pGuestTss->esp1 != pVM->selm.s.Tss.esp2
430 || pGuestTss->ss1 != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */
431 )
432 {
433 Log(("selmRCGuestTSSWritePfHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
434 (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1));
435 pVM->selm.s.Tss.esp2 = pGuestTss->esp1;
436 pVM->selm.s.Tss.ss2 = (pGuestTss->ss1 & ~1) | 2;
437 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
438 }
439#endif
440 /* Handle misaligned TSS in a safe manner (just in case). */
441 else if ( offRange >= RT_UOFFSETOF(VBOXTSS, esp0)
442 && offRange < RT_UOFFSETOF(VBOXTSS, padding_ss0))
443 {
444 struct
445 {
446 uint32_t esp0;
447 uint16_t ss0;
448 uint16_t padding_ss0;
449 } s;
450 AssertCompileSize(s, 8);
451 rc = selmRCReadTssBits(pVM, &s, &pGuestTss->esp0, sizeof(s));
452 if ( rc == VINF_SUCCESS
453 && ( s.esp0 != pVM->selm.s.Tss.esp1
454 || s.ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
455 )
456 {
457 Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n",
458 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0));
459 pVM->selm.s.Tss.esp1 = s.esp0;
460 pVM->selm.s.Tss.ss1 = s.ss0 | 1;
461 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
462 }
463 }
464
465 /*
466 * If VME is enabled we need to check if the interrupt redirection bitmap
467 * needs updating.
468 */
469 if ( offRange >= RT_UOFFSETOF(VBOXTSS, offIoBitmap)
470 && (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME))
471 {
472 if (offRange - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap))
473 {
474 uint16_t offIoBitmap = pGuestTss->offIoBitmap;
475 if (offIoBitmap != pVM->selm.s.offGuestIoBitmap)
476 {
477 Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
478 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
479 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
480 }
481 else
482 Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
483 }
484 else
485 {
486 /** @todo not sure how the partial case is handled; probably not allowed */
487 uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap);
488 if ( offIntRedirBitmap <= offRange
489 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offRange + cb
490 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss)
491 {
492 Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offRange=%x cb=%x\n",
493 pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offRange, cb));
494
495 /** @todo only update the changed part. */
496 for (uint32_t i = 0; i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++)
497 {
498 rc = selmRCReadTssBits(pVM, &pVM->selm.s.Tss.IntRedirBitmap[i * 8],
499 (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8);
500 if (rc != VINF_SUCCESS)
501 break;
502 }
503 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir);
504 }
505 }
506 }
507
508 /* Return to ring-3 for a full resync if any of the above fails... (?) */
509 if (rc != VINF_SUCCESS)
510 {
511 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
512 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
513 if (RT_SUCCESS(rc))
514 rc = VINF_SUCCESS;
515 }
516
517 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled);
518 }
519 else
520 {
521 AssertMsg(RT_FAILURE(rc), ("cb=%u rc=%#x\n", cb, rc));
522 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
523 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled);
524 if (rc == VERR_EM_INTERPRETER)
525 rc = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT;
526 }
527 return rc;
528}
529#endif /* SELM_TRACK_GUEST_TSS_CHANGES */
530
531
532#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
533/**
534 * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT.
535 *
536 * @returns VBox status code (appropriate for trap handling and GC return).
537 * @param pVM Pointer to the VM.
538 * @param pVCpu Pointer to the cross context CPU context for the
539 * calling EMT.
540 * @param uErrorCode CPU Error code.
541 * @param pRegFrame Trap register frame.
542 * @param pvFault The fault address (cr2).
543 * @param pvRange The base address of the handled virtual range.
544 * @param offRange The offset of the access into this range.
545 * (If it's a EIP range this is the EIP, if not it's pvFault.)
546 * @param pvUser Unused.
547 */
548DECLEXPORT(int) selmRCShadowGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
549 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
550{
551 LogRel(("FATAL ERROR: selmRCShadowGDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
552 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
553 return VERR_SELM_SHADOW_GDT_WRITE;
554}
555#endif
556
557
558#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
559/**
560 * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT.
561 *
562 * @returns VBox status code (appropriate for trap handling and GC return).
563 * @param pVM Pointer to the VM.
564 * @param pVCpu Pointer to the cross context CPU context for the
565 * calling EMT.
566 * @param uErrorCode CPU Error code.
567 * @param pRegFrame Trap register frame.
568 * @param pvFault The fault address (cr2).
569 * @param pvRange The base address of the handled virtual range.
570 * @param offRange The offset of the access into this range.
571 * (If it's a EIP range this is the EIP, if not it's pvFault.)
572 * @param pvUser Unused.
573 */
574DECLEXPORT(int) selmRCShadowLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
575 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
576{
577 LogRel(("FATAL ERROR: selmRCShadowLDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
578 Assert(pvFault - (uintptr_t)pVM->selm.s.pvLdtRC < (unsigned)(65536U + PAGE_SIZE));
579 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
580 return VERR_SELM_SHADOW_LDT_WRITE;
581}
582#endif
583
584
585#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
586/**
587 * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS.
588 *
589 * @returns VBox status code (appropriate for trap handling and GC return).
590 * @param pVM Pointer to the VM.
591 * @param pVCpu Pointer to the cross context CPU context for the
592 * calling EMT.
593 * @param uErrorCode CPU Error code.
594 * @param pRegFrame Trap register frame.
595 * @param pvFault The fault address (cr2).
596 * @param pvRange The base address of the handled virtual range.
597 * @param offRange The offset of the access into this range.
598 * (If it's a EIP range this is the EIP, if not it's pvFault.)
599 * @param pvUser Unused.
600 */
601DECLEXPORT(int) selmRCShadowTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
602 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
603{
604 LogRel(("FATAL ERROR: selmRCShadowTSSWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
605 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
606 return VERR_SELM_SHADOW_TSS_WRITE;
607}
608#endif
609
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette