VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllXcpt-x86.cpp@ 108641

Last change on this file since 108641 was 108278, checked in by vboxsync, 3 months ago

VMM/IEM: Removed the #ifndef IEM_WITH_SETJMP code. We've had IEM_WITH_SETJMP defined unconditionally since 7.0 and the code probably doesn't even compile w/o it, so best remove the unused code. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 125.2 KB
Line 
1/* $Id: IEMAllXcpt-x86.cpp 108278 2025-02-18 15:46:53Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - x86 target, exceptions & interrupts.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/gcm.h>
42#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
43# include <VBox/vmm/hm_svm.h>
44#endif
45#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
46# include <VBox/vmm/hmvmxinline.h>
47#endif
48#include <VBox/vmm/tm.h>
49#include <VBox/vmm/dbgf.h>
50#include <VBox/vmm/dbgftrace.h>
51#include "IEMInternal.h"
52#include <VBox/vmm/vmcc.h>
53#include <VBox/log.h>
54#include <VBox/err.h>
55#include <VBox/param.h>
56#include <iprt/assert.h>
57#include <iprt/string.h>
58#include <iprt/x86.h>
59
60#include "IEMInline-x86.h"
61
62
63/*********************************************************************************************************************************
64* Structures and Typedefs *
65*********************************************************************************************************************************/
66/**
67 * CPU exception classes.
68 */
69typedef enum IEMXCPTCLASS
70{
71 IEMXCPTCLASS_BENIGN,
72 IEMXCPTCLASS_CONTRIBUTORY,
73 IEMXCPTCLASS_PAGE_FAULT,
74 IEMXCPTCLASS_DOUBLE_FAULT
75} IEMXCPTCLASS;
76
77
78
79/** @name Misc Worker Functions.
80 * @{
81 */
82
83/**
84 * Gets the exception class for the specified exception vector.
85 *
86 * @returns The class of the specified exception.
87 * @param uVector The exception vector.
88 */
89static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
90{
91 Assert(uVector <= X86_XCPT_LAST);
92 switch (uVector)
93 {
94 case X86_XCPT_DE:
95 case X86_XCPT_TS:
96 case X86_XCPT_NP:
97 case X86_XCPT_SS:
98 case X86_XCPT_GP:
99 case X86_XCPT_SX: /* AMD only */
100 return IEMXCPTCLASS_CONTRIBUTORY;
101
102 case X86_XCPT_PF:
103 case X86_XCPT_VE: /* Intel only */
104 return IEMXCPTCLASS_PAGE_FAULT;
105
106 case X86_XCPT_DF:
107 return IEMXCPTCLASS_DOUBLE_FAULT;
108 }
109 return IEMXCPTCLASS_BENIGN;
110}
111
112
113/**
114 * Evaluates how to handle an exception caused during delivery of another event
115 * (exception / interrupt).
116 *
117 * @returns How to handle the recursive exception.
118 * @param pVCpu The cross context virtual CPU structure of the
119 * calling thread.
120 * @param fPrevFlags The flags of the previous event.
121 * @param uPrevVector The vector of the previous event.
122 * @param fCurFlags The flags of the current exception.
123 * @param uCurVector The vector of the current exception.
124 * @param pfXcptRaiseInfo Where to store additional information about the
125 * exception condition. Optional.
126 */
127VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
128 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
129{
130 /*
131 * Only CPU exceptions can be raised while delivering other events, software interrupt
132 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
133 */
134 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
135 Assert(pVCpu); RT_NOREF(pVCpu);
136 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
137
138 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
139 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
140 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
141 {
142 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
143 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
144 {
145 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
146 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
147 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
148 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
149 {
150 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
151 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
152 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
153 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
154 uCurVector, pVCpu->cpum.GstCtx.cr2));
155 }
156 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
157 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
158 {
159 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
160 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
161 }
162 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
163 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
164 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
165 {
166 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
167 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
168 }
169 }
170 else
171 {
172 if (uPrevVector == X86_XCPT_NMI)
173 {
174 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
175 if (uCurVector == X86_XCPT_PF)
176 {
177 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
178 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
179 }
180 }
181 else if ( uPrevVector == X86_XCPT_AC
182 && uCurVector == X86_XCPT_AC)
183 {
184 enmRaise = IEMXCPTRAISE_CPU_HANG;
185 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
186 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
187 }
188 }
189 }
190 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
191 {
192 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
193 if (uCurVector == X86_XCPT_PF)
194 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
195 }
196 else
197 {
198 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
199 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
200 }
201
202 if (pfXcptRaiseInfo)
203 *pfXcptRaiseInfo = fRaiseInfo;
204 return enmRaise;
205}
206
207
208/**
209 * Enters the CPU shutdown state initiated by a triple fault or other
210 * unrecoverable conditions.
211 *
212 * @returns Strict VBox status code.
213 * @param pVCpu The cross context virtual CPU structure of the
214 * calling thread.
215 */
216static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
217{
218 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
219 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
220
221 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
222 {
223 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
224 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
225 }
226
227 RT_NOREF(pVCpu);
228 return VINF_EM_TRIPLE_FAULT;
229}
230
231
232/**
233 * Validates a new SS segment.
234 *
235 * @returns VBox strict status code.
236 * @param pVCpu The cross context virtual CPU structure of the
237 * calling thread.
238 * @param NewSS The new SS selctor.
239 * @param uCpl The CPL to load the stack for.
240 * @param pDesc Where to return the descriptor.
241 */
242static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
243{
244 /* Null selectors are not allowed (we're not called for dispatching
245 interrupts with SS=0 in long mode). */
246 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
247 {
248 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
249 return iemRaiseTaskSwitchFault0(pVCpu);
250 }
251
252 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
253 if ((NewSS & X86_SEL_RPL) != uCpl)
254 {
255 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
256 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
257 }
258
259 /*
260 * Read the descriptor.
261 */
262 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
263 if (rcStrict != VINF_SUCCESS)
264 return rcStrict;
265
266 /*
267 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
268 */
269 if (!pDesc->Legacy.Gen.u1DescType)
270 {
271 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
272 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
273 }
274
275 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
276 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
277 {
278 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
279 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
280 }
281 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
282 {
283 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
284 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
285 }
286
287 /* Is it there? */
288 /** @todo testcase: Is this checked before the canonical / limit check below? */
289 if (!pDesc->Legacy.Gen.u1Present)
290 {
291 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
292 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
293 }
294
295 return VINF_SUCCESS;
296}
297
298/** @} */
299
300
301/** @name Raising Exceptions.
302 *
303 * @{
304 */
305
306
307/**
308 * Loads the specified stack far pointer from the TSS.
309 *
310 * @returns VBox strict status code.
311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
312 * @param uCpl The CPL to load the stack for.
313 * @param pSelSS Where to return the new stack segment.
314 * @param puEsp Where to return the new stack pointer.
315 */
316static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
317{
318 VBOXSTRICTRC rcStrict;
319 Assert(uCpl < 4);
320
321 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
322 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
323 {
324 /*
325 * 16-bit TSS (X86TSS16).
326 */
327 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
328 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
329 {
330 uint32_t off = uCpl * 4 + 2;
331 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
332 {
333 /** @todo check actual access pattern here. */
334 uint32_t u32Tmp = 0; /* gcc maybe... */
335 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
336 if (rcStrict == VINF_SUCCESS)
337 {
338 *puEsp = RT_LOWORD(u32Tmp);
339 *pSelSS = RT_HIWORD(u32Tmp);
340 return VINF_SUCCESS;
341 }
342 }
343 else
344 {
345 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
346 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
347 }
348 break;
349 }
350
351 /*
352 * 32-bit TSS (X86TSS32).
353 */
354 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
355 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
356 {
357 uint32_t off = uCpl * 8 + 4;
358 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
359 {
360/** @todo check actual access pattern here. */
361 uint64_t u64Tmp;
362 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
363 if (rcStrict == VINF_SUCCESS)
364 {
365 *puEsp = u64Tmp & UINT32_MAX;
366 *pSelSS = (RTSEL)(u64Tmp >> 32);
367 return VINF_SUCCESS;
368 }
369 }
370 else
371 {
372 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
373 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
374 }
375 break;
376 }
377
378 default:
379 AssertFailed();
380 rcStrict = VERR_IEM_IPE_4;
381 break;
382 }
383
384 *puEsp = 0; /* make gcc happy */
385 *pSelSS = 0; /* make gcc happy */
386 return rcStrict;
387}
388
389
390/**
391 * Loads the specified stack pointer from the 64-bit TSS.
392 *
393 * @returns VBox strict status code.
394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
395 * @param uCpl The CPL to load the stack for.
396 * @param uIst The interrupt stack table index, 0 if to use uCpl.
397 * @param puRsp Where to return the new stack pointer.
398 */
399static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
400{
401 Assert(uCpl < 4);
402 Assert(uIst < 8);
403 *puRsp = 0; /* make gcc happy */
404
405 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
406 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
407
408 uint32_t off;
409 if (uIst)
410 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
411 else
412 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
413 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
414 {
415 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
416 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
417 }
418
419 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
420}
421
422
423/**
424 * Adjust the CPU state according to the exception being raised.
425 *
426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
427 * @param u8Vector The exception that has been raised.
428 */
429DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
430{
431 switch (u8Vector)
432 {
433 case X86_XCPT_DB:
434 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
435 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
436 break;
437 /** @todo Read the AMD and Intel exception reference... */
438 }
439}
440
441
442/**
443 * Implements exceptions and interrupts for real mode.
444 *
445 * @returns VBox strict status code.
446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
447 * @param cbInstr The number of bytes to offset rIP by in the return
448 * address.
449 * @param u8Vector The interrupt / exception vector number.
450 * @param fFlags The flags.
451 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
452 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
453 */
454static VBOXSTRICTRC
455iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
456 uint8_t cbInstr,
457 uint8_t u8Vector,
458 uint32_t fFlags,
459 uint16_t uErr,
460 uint64_t uCr2) RT_NOEXCEPT
461{
462 NOREF(uErr); NOREF(uCr2);
463 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
464
465 /*
466 * Read the IDT entry.
467 */
468 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
469 {
470 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
471 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
472 }
473 RTFAR16 Idte;
474 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
475 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
476 {
477 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
478 return rcStrict;
479 }
480
481#ifdef LOG_ENABLED
482 /* If software interrupt, try decode it if logging is enabled and such. */
483 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
484 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
485 iemLogSyscallRealModeInt(pVCpu, u8Vector, cbInstr);
486#endif
487
488 /*
489 * Push the stack frame.
490 */
491 uint8_t bUnmapInfo;
492 uint16_t *pu16Frame;
493 uint64_t uNewRsp;
494 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &bUnmapInfo, &uNewRsp);
495 if (rcStrict != VINF_SUCCESS)
496 return rcStrict;
497
498 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
499#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
500 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
501 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
502 fEfl |= UINT16_C(0xf000);
503#endif
504 pu16Frame[2] = (uint16_t)fEfl;
505 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
506 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
507 rcStrict = iemMemStackPushCommitSpecial(pVCpu, bUnmapInfo, uNewRsp);
508 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
509 return rcStrict;
510
511 /*
512 * Load the vector address into cs:ip and make exception specific state
513 * adjustments.
514 */
515 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
516 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
517 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
518 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
519 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
520 pVCpu->cpum.GstCtx.rip = Idte.off;
521 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
522 IEMMISC_SET_EFL(pVCpu, fEfl);
523
524 /** @todo do we actually do this in real mode? */
525 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
526 iemRaiseXcptAdjustState(pVCpu, u8Vector);
527
528 /*
529 * Deal with debug events that follows the exception and clear inhibit flags.
530 */
531 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
532 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
533 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
534 else
535 {
536 Log(("iemRaiseXcptOrIntInRealMode: Raising #DB after %#x; pending=%#x\n",
537 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
538 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
539 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
540 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
541 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
542 return iemRaiseDebugException(pVCpu);
543 }
544
545 /* The IEM_F_MODE_XXX and IEM_F_X86_CPL_MASK doesn't really change here,
546 so best leave them alone in case we're in a weird kind of real mode... */
547
548 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
549}
550
551
552/**
553 * Loads a NULL data selector into when coming from V8086 mode.
554 *
555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
556 * @param pSReg Pointer to the segment register.
557 */
558DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
559{
560 pSReg->Sel = 0;
561 pSReg->ValidSel = 0;
562 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
563 {
564 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
565 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
566 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
567 }
568 else
569 {
570 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
571 /** @todo check this on AMD-V */
572 pSReg->u64Base = 0;
573 pSReg->u32Limit = 0;
574 }
575}
576
577
578/**
579 * Loads a segment selector during a task switch in V8086 mode.
580 *
581 * @param pSReg Pointer to the segment register.
582 * @param uSel The selector value to load.
583 */
584DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
585{
586 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
587 pSReg->Sel = uSel;
588 pSReg->ValidSel = uSel;
589 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
590 pSReg->u64Base = uSel << 4;
591 pSReg->u32Limit = 0xffff;
592 pSReg->Attr.u = 0xf3;
593}
594
595
596/**
597 * Loads a segment selector during a task switch in protected mode.
598 *
599 * In this task switch scenario, we would throw \#TS exceptions rather than
600 * \#GPs.
601 *
602 * @returns VBox strict status code.
603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
604 * @param pSReg Pointer to the segment register.
605 * @param uSel The new selector value.
606 *
607 * @remarks This does _not_ handle CS or SS.
608 * @remarks This expects IEM_GET_CPL(pVCpu) to return an up to date value.
609 */
610static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
611{
612 Assert(!IEM_IS_64BIT_CODE(pVCpu));
613
614 /* Null data selector. */
615 if (!(uSel & X86_SEL_MASK_OFF_RPL))
616 {
617 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
618 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
619 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
620 return VINF_SUCCESS;
621 }
622
623 /* Fetch the descriptor. */
624 IEMSELDESC Desc;
625 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
626 if (rcStrict != VINF_SUCCESS)
627 {
628 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
629 VBOXSTRICTRC_VAL(rcStrict)));
630 return rcStrict;
631 }
632
633 /* Must be a data segment or readable code segment. */
634 if ( !Desc.Legacy.Gen.u1DescType
635 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
636 {
637 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
638 Desc.Legacy.Gen.u4Type));
639 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
640 }
641
642 /* Check privileges for data segments and non-conforming code segments. */
643 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
644 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
645 {
646 /* The RPL and the new CPL must be less than or equal to the DPL. */
647 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
648 || (IEM_GET_CPL(pVCpu) > Desc.Legacy.Gen.u2Dpl))
649 {
650 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
651 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
652 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
653 }
654 }
655
656 /* Is it there? */
657 if (!Desc.Legacy.Gen.u1Present)
658 {
659 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
660 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
661 }
662
663 /* The base and limit. */
664 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
665 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
666
667 /*
668 * Ok, everything checked out fine. Now set the accessed bit before
669 * committing the result into the registers.
670 */
671 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
672 {
673 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
674 if (rcStrict != VINF_SUCCESS)
675 return rcStrict;
676 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
677 }
678
679 /* Commit */
680 pSReg->Sel = uSel;
681 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
682 pSReg->u32Limit = cbLimit;
683 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
684 pSReg->ValidSel = uSel;
685 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
686 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
687 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
688
689 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
690 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
691 return VINF_SUCCESS;
692}
693
694
695/**
696 * Performs a task switch.
697 *
698 * If the task switch is the result of a JMP, CALL or IRET instruction, the
699 * caller is responsible for performing the necessary checks (like DPL, TSS
700 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
701 * reference for JMP, CALL, IRET.
702 *
703 * If the task switch is the due to a software interrupt or hardware exception,
704 * the caller is responsible for validating the TSS selector and descriptor. See
705 * Intel Instruction reference for INT n.
706 *
707 * @returns VBox strict status code.
708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
709 * @param enmTaskSwitch The cause of the task switch.
710 * @param uNextEip The EIP effective after the task switch.
711 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
712 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
713 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
714 * @param SelTss The TSS selector of the new task.
715 * @param pNewDescTss Pointer to the new TSS descriptor.
716 */
717VBOXSTRICTRC
718iemTaskSwitch(PVMCPUCC pVCpu,
719 IEMTASKSWITCH enmTaskSwitch,
720 uint32_t uNextEip,
721 uint32_t fFlags,
722 uint16_t uErr,
723 uint64_t uCr2,
724 RTSEL SelTss,
725 PIEMSELDESC pNewDescTss) RT_NOEXCEPT
726{
727 Assert(!IEM_IS_REAL_MODE(pVCpu));
728 Assert(!IEM_IS_64BIT_CODE(pVCpu));
729 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
730
731 uint32_t const uNewTssType = pNewDescTss->Legacy.Gate.u4Type;
732 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
733 || uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
734 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
735 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
736
737 bool const fIsNewTss386 = ( uNewTssType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
738 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
739
740 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTss=%#x fIsNewTss386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTss,
741 fIsNewTss386, pVCpu->cpum.GstCtx.eip, uNextEip));
742
743 /* Update CR2 in case it's a page-fault. */
744 /** @todo This should probably be done much earlier in IEM/PGM. See
745 * @bugref{5653#c49}. */
746 if (fFlags & IEM_XCPT_FLAGS_CR2)
747 pVCpu->cpum.GstCtx.cr2 = uCr2;
748
749 /*
750 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
751 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
752 */
753 uint32_t const uNewTssLimit = pNewDescTss->Legacy.Gen.u16LimitLow | (pNewDescTss->Legacy.Gen.u4LimitHigh << 16);
754 uint32_t const uNewTssLimitMin = fIsNewTss386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
755 if (uNewTssLimit < uNewTssLimitMin)
756 {
757 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTssLimit=%#x uNewTssLimitMin=%#x -> #TS\n",
758 enmTaskSwitch, uNewTssLimit, uNewTssLimitMin));
759 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
760 }
761
762 /*
763 * Task switches in VMX non-root mode always cause task switches.
764 * The new TSS must have been read and validated (DPL, limits etc.) before a
765 * task-switch VM-exit commences.
766 *
767 * See Intel spec. 25.4.2 "Treatment of Task Switches".
768 */
769 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
770 {
771 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTss));
772 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTss, uNextEip - pVCpu->cpum.GstCtx.eip);
773 }
774
775 /*
776 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
777 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
778 */
779 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
780 {
781 uint64_t const uExitInfo1 = SelTss;
782 uint64_t uExitInfo2 = uErr;
783 switch (enmTaskSwitch)
784 {
785 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
786 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
787 default: break;
788 }
789 if (fFlags & IEM_XCPT_FLAGS_ERR)
790 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
791 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
792 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
793
794 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
795 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
796 RT_NOREF2(uExitInfo1, uExitInfo2);
797 }
798
799 /*
800 * Check the current TSS limit. The last written byte to the current TSS during the
801 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
802 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
803 *
804 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
805 * end up with smaller than "legal" TSS limits.
806 */
807 uint32_t const uCurTssLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
808 uint32_t const uCurTssLimitMin = fIsNewTss386 ? 0x5F : 0x29;
809 if (uCurTssLimit < uCurTssLimitMin)
810 {
811 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTssLimit=%#x uCurTssLimitMin=%#x -> #TS\n",
812 enmTaskSwitch, uCurTssLimit, uCurTssLimitMin));
813 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTss & X86_SEL_MASK_OFF_RPL);
814 }
815
816 /*
817 * Verify that the new TSS can be accessed and map it. Map only the required contents
818 * and not the entire TSS.
819 */
820 uint8_t bUnmapInfoNewTss;
821 void *pvNewTss;
822 uint32_t const cbNewTss = uNewTssLimitMin + 1;
823 RTGCPTR const GCPtrNewTss = X86DESC_BASE(&pNewDescTss->Legacy);
824 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
825 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
826 * not perform correct translation if this happens. See Intel spec. 7.2.1
827 * "Task-State Segment". */
828 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTss, &bUnmapInfoNewTss, cbNewTss, UINT8_MAX, GCPtrNewTss, IEM_ACCESS_SYS_RW, 0);
829/** @todo Not cleaning up bUnmapInfoNewTss mapping in any early exits here.
830 * Consider wrapping the remainder into a function for simpler cleanup. */
831 if (rcStrict != VINF_SUCCESS)
832 {
833 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTss=%u uNewTssLimit=%u rc=%Rrc\n", enmTaskSwitch,
834 cbNewTss, uNewTssLimit, VBOXSTRICTRC_VAL(rcStrict)));
835 return rcStrict;
836 }
837
838 /*
839 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
840 */
841 uint32_t fEFlags = pVCpu->cpum.GstCtx.eflags.u;
842 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
843 || enmTaskSwitch == IEMTASKSWITCH_IRET)
844 {
845 uint8_t bUnmapInfoDescCurTss;
846 PX86DESC pDescCurTss;
847 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTss, &bUnmapInfoDescCurTss, sizeof(*pDescCurTss), UINT8_MAX,
848 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
849 if (rcStrict != VINF_SUCCESS)
850 {
851 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
852 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
853 return rcStrict;
854 }
855
856 pDescCurTss->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
857 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoDescCurTss);
858 if (rcStrict != VINF_SUCCESS)
859 {
860 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
861 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
862 return rcStrict;
863 }
864
865 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
866 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
867 {
868 Assert( uNewTssType == X86_SEL_TYPE_SYS_286_TSS_BUSY
869 || uNewTssType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
870 fEFlags &= ~X86_EFL_NT;
871 }
872 }
873
874 /*
875 * Save the CPU state into the current TSS.
876 */
877 RTGCPTR const GCPtrCurTss = pVCpu->cpum.GstCtx.tr.u64Base;
878 if (GCPtrNewTss == GCPtrCurTss)
879 {
880 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTss));
881 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
882 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->cpum.GstCtx.eax,
883 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
884 pVCpu->cpum.GstCtx.ldtr.Sel));
885 }
886 if (fIsNewTss386)
887 {
888 /*
889 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
890 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
891 */
892 uint8_t bUnmapInfoCurTss32;
893 void *pvCurTss32;
894 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS32, eip);
895 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
896 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
897 rcStrict = iemMemMap(pVCpu, &pvCurTss32, &bUnmapInfoCurTss32, cbCurTss, UINT8_MAX,
898 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
899 if (rcStrict != VINF_SUCCESS)
900 {
901 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
902 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
903 return rcStrict;
904 }
905
906 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
907 PX86TSS32 pCurTss32 = (PX86TSS32)((uintptr_t)pvCurTss32 - offCurTss);
908 pCurTss32->eip = uNextEip;
909 pCurTss32->eflags = fEFlags;
910 pCurTss32->eax = pVCpu->cpum.GstCtx.eax;
911 pCurTss32->ecx = pVCpu->cpum.GstCtx.ecx;
912 pCurTss32->edx = pVCpu->cpum.GstCtx.edx;
913 pCurTss32->ebx = pVCpu->cpum.GstCtx.ebx;
914 pCurTss32->esp = pVCpu->cpum.GstCtx.esp;
915 pCurTss32->ebp = pVCpu->cpum.GstCtx.ebp;
916 pCurTss32->esi = pVCpu->cpum.GstCtx.esi;
917 pCurTss32->edi = pVCpu->cpum.GstCtx.edi;
918 pCurTss32->es = pVCpu->cpum.GstCtx.es.Sel;
919 pCurTss32->cs = pVCpu->cpum.GstCtx.cs.Sel;
920 pCurTss32->ss = pVCpu->cpum.GstCtx.ss.Sel;
921 pCurTss32->ds = pVCpu->cpum.GstCtx.ds.Sel;
922 pCurTss32->fs = pVCpu->cpum.GstCtx.fs.Sel;
923 pCurTss32->gs = pVCpu->cpum.GstCtx.gs.Sel;
924
925 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss32);
926 if (rcStrict != VINF_SUCCESS)
927 {
928 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
929 VBOXSTRICTRC_VAL(rcStrict)));
930 return rcStrict;
931 }
932 }
933 else
934 {
935 /*
936 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
937 */
938 uint8_t bUnmapInfoCurTss16;
939 void *pvCurTss16;
940 uint32_t const offCurTss = RT_UOFFSETOF(X86TSS16, ip);
941 uint32_t const cbCurTss = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
942 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
943 rcStrict = iemMemMap(pVCpu, &pvCurTss16, &bUnmapInfoCurTss16, cbCurTss, UINT8_MAX,
944 GCPtrCurTss + offCurTss, IEM_ACCESS_SYS_RW, 0);
945 if (rcStrict != VINF_SUCCESS)
946 {
947 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTss=%#RGv cb=%u rc=%Rrc\n",
948 enmTaskSwitch, GCPtrCurTss, cbCurTss, VBOXSTRICTRC_VAL(rcStrict)));
949 return rcStrict;
950 }
951
952 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTss..cbCurTss). */
953 PX86TSS16 pCurTss16 = (PX86TSS16)((uintptr_t)pvCurTss16 - offCurTss);
954 pCurTss16->ip = uNextEip;
955 pCurTss16->flags = (uint16_t)fEFlags;
956 pCurTss16->ax = pVCpu->cpum.GstCtx.ax;
957 pCurTss16->cx = pVCpu->cpum.GstCtx.cx;
958 pCurTss16->dx = pVCpu->cpum.GstCtx.dx;
959 pCurTss16->bx = pVCpu->cpum.GstCtx.bx;
960 pCurTss16->sp = pVCpu->cpum.GstCtx.sp;
961 pCurTss16->bp = pVCpu->cpum.GstCtx.bp;
962 pCurTss16->si = pVCpu->cpum.GstCtx.si;
963 pCurTss16->di = pVCpu->cpum.GstCtx.di;
964 pCurTss16->es = pVCpu->cpum.GstCtx.es.Sel;
965 pCurTss16->cs = pVCpu->cpum.GstCtx.cs.Sel;
966 pCurTss16->ss = pVCpu->cpum.GstCtx.ss.Sel;
967 pCurTss16->ds = pVCpu->cpum.GstCtx.ds.Sel;
968
969 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoCurTss16);
970 if (rcStrict != VINF_SUCCESS)
971 {
972 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
973 VBOXSTRICTRC_VAL(rcStrict)));
974 return rcStrict;
975 }
976 }
977
978 /*
979 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
980 */
981 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
982 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
983 {
984 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
985 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTss;
986 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
987 }
988
989 /*
990 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
991 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
992 */
993 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
994 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
995 bool fNewDebugTrap;
996 if (fIsNewTss386)
997 {
998 PCX86TSS32 pNewTss32 = (PCX86TSS32)pvNewTss;
999 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTss32->cr3 : 0;
1000 uNewEip = pNewTss32->eip;
1001 uNewEflags = pNewTss32->eflags;
1002 uNewEax = pNewTss32->eax;
1003 uNewEcx = pNewTss32->ecx;
1004 uNewEdx = pNewTss32->edx;
1005 uNewEbx = pNewTss32->ebx;
1006 uNewEsp = pNewTss32->esp;
1007 uNewEbp = pNewTss32->ebp;
1008 uNewEsi = pNewTss32->esi;
1009 uNewEdi = pNewTss32->edi;
1010 uNewES = pNewTss32->es;
1011 uNewCS = pNewTss32->cs;
1012 uNewSS = pNewTss32->ss;
1013 uNewDS = pNewTss32->ds;
1014 uNewFS = pNewTss32->fs;
1015 uNewGS = pNewTss32->gs;
1016 uNewLdt = pNewTss32->selLdt;
1017 fNewDebugTrap = RT_BOOL(pNewTss32->fDebugTrap);
1018 }
1019 else
1020 {
1021 PCX86TSS16 pNewTss16 = (PCX86TSS16)pvNewTss;
1022 uNewCr3 = 0;
1023 uNewEip = pNewTss16->ip;
1024 uNewEflags = pNewTss16->flags;
1025 uNewEax = UINT32_C(0xffff0000) | pNewTss16->ax;
1026 uNewEcx = UINT32_C(0xffff0000) | pNewTss16->cx;
1027 uNewEdx = UINT32_C(0xffff0000) | pNewTss16->dx;
1028 uNewEbx = UINT32_C(0xffff0000) | pNewTss16->bx;
1029 uNewEsp = UINT32_C(0xffff0000) | pNewTss16->sp;
1030 uNewEbp = UINT32_C(0xffff0000) | pNewTss16->bp;
1031 uNewEsi = UINT32_C(0xffff0000) | pNewTss16->si;
1032 uNewEdi = UINT32_C(0xffff0000) | pNewTss16->di;
1033 uNewES = pNewTss16->es;
1034 uNewCS = pNewTss16->cs;
1035 uNewSS = pNewTss16->ss;
1036 uNewDS = pNewTss16->ds;
1037 uNewFS = 0;
1038 uNewGS = 0;
1039 uNewLdt = pNewTss16->selLdt;
1040 fNewDebugTrap = false;
1041 }
1042
1043 if (GCPtrNewTss == GCPtrCurTss)
1044 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
1045 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
1046
1047 /*
1048 * We're done accessing the new TSS.
1049 */
1050 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
1051 if (rcStrict != VINF_SUCCESS)
1052 {
1053 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
1054 return rcStrict;
1055 }
1056
1057 /*
1058 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
1059 */
1060 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
1061 {
1062 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTss, &bUnmapInfoNewTss, sizeof(*pNewDescTss), UINT8_MAX,
1063 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTss & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
1064 if (rcStrict != VINF_SUCCESS)
1065 {
1066 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
1067 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
1068 return rcStrict;
1069 }
1070
1071 /* Check that the descriptor indicates the new TSS is available (not busy). */
1072 AssertMsg( pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
1073 || pNewDescTss->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
1074 ("Invalid TSS descriptor type=%#x", pNewDescTss->Legacy.Gate.u4Type));
1075
1076 pNewDescTss->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
1077 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoNewTss);
1078 if (rcStrict != VINF_SUCCESS)
1079 {
1080 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
1081 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
1082 return rcStrict;
1083 }
1084 }
1085
1086 /*
1087 * From this point on, we're technically in the new task. We will defer exceptions
1088 * until the completion of the task switch but before executing any instructions in the new task.
1089 */
1090 pVCpu->cpum.GstCtx.tr.Sel = SelTss;
1091 pVCpu->cpum.GstCtx.tr.ValidSel = SelTss;
1092 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1093 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTss->Legacy);
1094 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTss->Legacy);
1095 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTss->Legacy);
1096 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
1097
1098 /* Set the busy bit in TR. */
1099 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
1100
1101 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
1102 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
1103 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
1104 {
1105 uNewEflags |= X86_EFL_NT;
1106 }
1107
1108 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
1109 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
1110 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
1111
1112 pVCpu->cpum.GstCtx.eip = uNewEip;
1113 pVCpu->cpum.GstCtx.eax = uNewEax;
1114 pVCpu->cpum.GstCtx.ecx = uNewEcx;
1115 pVCpu->cpum.GstCtx.edx = uNewEdx;
1116 pVCpu->cpum.GstCtx.ebx = uNewEbx;
1117 pVCpu->cpum.GstCtx.esp = uNewEsp;
1118 pVCpu->cpum.GstCtx.ebp = uNewEbp;
1119 pVCpu->cpum.GstCtx.esi = uNewEsi;
1120 pVCpu->cpum.GstCtx.edi = uNewEdi;
1121
1122 uNewEflags &= X86_EFL_LIVE_MASK;
1123 uNewEflags |= X86_EFL_RA1_MASK;
1124 IEMMISC_SET_EFL(pVCpu, uNewEflags);
1125
1126 /*
1127 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
1128 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
1129 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
1130 */
1131 pVCpu->cpum.GstCtx.es.Sel = uNewES;
1132 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
1133
1134 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
1135 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
1136
1137 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1138 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
1139
1140 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
1141 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
1142
1143 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
1144 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
1145
1146 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
1147 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
1148 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
1149
1150 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
1151 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
1152 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
1153 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
1154
1155 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1156 {
1157 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
1158 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
1159 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
1160 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
1161 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
1162 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
1163 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
1164 }
1165
1166 /*
1167 * Switch CR3 for the new task.
1168 */
1169 if ( fIsNewTss386
1170 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
1171 {
1172 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
1173 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
1174 AssertRCSuccessReturn(rc, rc);
1175
1176 /* Inform PGM. */
1177 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
1178 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
1179 AssertRCReturn(rc, rc);
1180 /* ignore informational status codes */
1181
1182 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
1183 }
1184
1185 /*
1186 * Switch LDTR for the new task.
1187 */
1188 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
1189 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
1190 else
1191 {
1192 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
1193
1194 IEMSELDESC DescNewLdt;
1195 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
1196 if (rcStrict != VINF_SUCCESS)
1197 {
1198 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
1199 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
1200 return rcStrict;
1201 }
1202 if ( !DescNewLdt.Legacy.Gen.u1Present
1203 || DescNewLdt.Legacy.Gen.u1DescType
1204 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
1205 {
1206 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
1207 uNewLdt, DescNewLdt.Legacy.u));
1208 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
1209 }
1210
1211 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
1212 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1213 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
1214 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
1215 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
1216 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1217 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
1218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1219 }
1220
1221 IEMSELDESC DescSS;
1222 if (IEM_IS_V86_MODE(pVCpu))
1223 {
1224 IEM_SET_CPL(pVCpu, 3);
1225 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
1226 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
1227 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
1228 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
1229 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
1230 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
1231
1232 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
1233 DescSS.Legacy.u = 0;
1234 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
1235 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
1236 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
1237 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
1238 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
1239 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
1240 DescSS.Legacy.Gen.u2Dpl = 3;
1241 }
1242 else
1243 {
1244 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
1245
1246 /*
1247 * Load the stack segment for the new task.
1248 */
1249 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
1250 {
1251 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
1252 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
1253 }
1254
1255 /* Fetch the descriptor. */
1256 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
1257 if (rcStrict != VINF_SUCCESS)
1258 {
1259 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
1260 VBOXSTRICTRC_VAL(rcStrict)));
1261 return rcStrict;
1262 }
1263
1264 /* SS must be a data segment and writable. */
1265 if ( !DescSS.Legacy.Gen.u1DescType
1266 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1267 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
1268 {
1269 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
1270 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
1271 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
1272 }
1273
1274 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
1275 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
1276 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
1277 {
1278 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
1279 uNewCpl));
1280 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
1281 }
1282
1283 /* Is it there? */
1284 if (!DescSS.Legacy.Gen.u1Present)
1285 {
1286 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
1287 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
1288 }
1289
1290 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
1291 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
1292
1293 /* Set the accessed bit before committing the result into SS. */
1294 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1295 {
1296 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
1297 if (rcStrict != VINF_SUCCESS)
1298 return rcStrict;
1299 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1300 }
1301
1302 /* Commit SS. */
1303 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
1304 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
1305 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1306 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
1307 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
1308 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1309 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1310
1311 /* CPL has changed, update IEM before loading rest of segments. */
1312 IEM_SET_CPL(pVCpu, uNewCpl);
1313
1314 /*
1315 * Load the data segments for the new task.
1316 */
1317 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
1318 if (rcStrict != VINF_SUCCESS)
1319 return rcStrict;
1320 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
1321 if (rcStrict != VINF_SUCCESS)
1322 return rcStrict;
1323 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
1324 if (rcStrict != VINF_SUCCESS)
1325 return rcStrict;
1326 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
1327 if (rcStrict != VINF_SUCCESS)
1328 return rcStrict;
1329
1330 /*
1331 * Load the code segment for the new task.
1332 */
1333 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
1334 {
1335 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
1336 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1337 }
1338
1339 /* Fetch the descriptor. */
1340 IEMSELDESC DescCS;
1341 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
1342 if (rcStrict != VINF_SUCCESS)
1343 {
1344 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
1345 return rcStrict;
1346 }
1347
1348 /* CS must be a code segment. */
1349 if ( !DescCS.Legacy.Gen.u1DescType
1350 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1351 {
1352 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
1353 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
1354 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1355 }
1356
1357 /* For conforming CS, DPL must be less than or equal to the RPL. */
1358 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1359 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
1360 {
1361 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
1362 DescCS.Legacy.Gen.u2Dpl));
1363 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1364 }
1365
1366 /* For non-conforming CS, DPL must match RPL. */
1367 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1368 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
1369 {
1370 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
1371 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
1372 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1373 }
1374
1375 /* Is it there? */
1376 if (!DescCS.Legacy.Gen.u1Present)
1377 {
1378 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
1379 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
1380 }
1381
1382 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
1383 u64Base = X86DESC_BASE(&DescCS.Legacy);
1384
1385 /* Set the accessed bit before committing the result into CS. */
1386 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1387 {
1388 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
1389 if (rcStrict != VINF_SUCCESS)
1390 return rcStrict;
1391 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1392 }
1393
1394 /* Commit CS. */
1395 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
1396 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
1397 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1398 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
1399 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
1400 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1401 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1402 }
1403
1404 /* Make sure the CPU mode is correct. */
1405 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
1406 if (fExecNew != pVCpu->iem.s.fExec)
1407 Log(("iemTaskSwitch: fExec %#x -> %#x (xor %#x)\n", pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
1408 pVCpu->iem.s.fExec = fExecNew;
1409
1410 /** @todo Debug trap. */
1411 if (fIsNewTss386 && fNewDebugTrap)
1412 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
1413
1414 /*
1415 * Construct the error code masks based on what caused this task switch.
1416 * See Intel Instruction reference for INT.
1417 */
1418 uint16_t uExt;
1419 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
1420 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1421 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
1422 uExt = 1;
1423 else
1424 uExt = 0;
1425
1426 /*
1427 * Push any error code on to the new stack.
1428 */
1429 if (fFlags & IEM_XCPT_FLAGS_ERR)
1430 {
1431 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
1432 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
1433 uint8_t const cbStackFrame = fIsNewTss386 ? 4 : 2;
1434
1435 /* Check that there is sufficient space on the stack. */
1436 /** @todo Factor out segment limit checking for normal/expand down segments
1437 * into a separate function. */
1438 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
1439 {
1440 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
1441 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
1442 {
1443 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
1444 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
1445 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
1446 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
1447 }
1448 }
1449 else
1450 {
1451 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
1452 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
1453 {
1454 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
1455 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
1456 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
1457 }
1458 }
1459
1460
1461 if (fIsNewTss386)
1462 rcStrict = iemMemStackPushU32(pVCpu, uErr);
1463 else
1464 rcStrict = iemMemStackPushU16(pVCpu, uErr);
1465 if (rcStrict != VINF_SUCCESS)
1466 {
1467 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
1468 fIsNewTss386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
1469 return rcStrict;
1470 }
1471 }
1472
1473 /* Check the new EIP against the new CS limit. */
1474 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
1475 {
1476 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
1477 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
1478 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
1479 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
1480 }
1481
1482 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
1483 pVCpu->cpum.GstCtx.ss.Sel));
1484 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1485}
1486
1487
1488/**
1489 * Implements exceptions and interrupts for protected mode.
1490 *
1491 * @returns VBox strict status code.
1492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1493 * @param cbInstr The number of bytes to offset rIP by in the return
1494 * address.
1495 * @param u8Vector The interrupt / exception vector number.
1496 * @param fFlags The flags.
1497 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1498 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1499 */
1500static VBOXSTRICTRC
1501iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
1502 uint8_t cbInstr,
1503 uint8_t u8Vector,
1504 uint32_t fFlags,
1505 uint16_t uErr,
1506 uint64_t uCr2) RT_NOEXCEPT
1507{
1508 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1509
1510 /*
1511 * Read the IDT entry.
1512 */
1513 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
1514 {
1515 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1516 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1517 }
1518 X86DESC Idte;
1519 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
1520 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
1521 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1522 {
1523 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1524 return rcStrict;
1525 }
1526 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x - from %04x:%08RX64 efl=%#x depth=%d\n",
1527 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
1528 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow,
1529 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags.u, pVCpu->iem.s.cXcptRecursions));
1530
1531 /*
1532 * Check the descriptor type, DPL and such.
1533 * ASSUMES this is done in the same order as described for call-gate calls.
1534 */
1535 if (Idte.Gate.u1DescType)
1536 {
1537 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1538 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1539 }
1540 bool fTaskGate = false;
1541 uint8_t f32BitGate = true;
1542 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
1543 switch (Idte.Gate.u4Type)
1544 {
1545 case X86_SEL_TYPE_SYS_UNDEFINED:
1546 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1547 case X86_SEL_TYPE_SYS_LDT:
1548 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1549 case X86_SEL_TYPE_SYS_286_CALL_GATE:
1550 case X86_SEL_TYPE_SYS_UNDEFINED2:
1551 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1552 case X86_SEL_TYPE_SYS_UNDEFINED3:
1553 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1554 case X86_SEL_TYPE_SYS_386_CALL_GATE:
1555 case X86_SEL_TYPE_SYS_UNDEFINED4:
1556 {
1557 /** @todo check what actually happens when the type is wrong...
1558 * esp. call gates. */
1559 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
1560 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1561 }
1562
1563 case X86_SEL_TYPE_SYS_286_INT_GATE:
1564 f32BitGate = false;
1565 RT_FALL_THRU();
1566 case X86_SEL_TYPE_SYS_386_INT_GATE:
1567 fEflToClear |= X86_EFL_IF;
1568 break;
1569
1570 case X86_SEL_TYPE_SYS_TASK_GATE:
1571 fTaskGate = true;
1572#ifndef IEM_IMPLEMENTS_TASKSWITCH
1573 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
1574#endif
1575 break;
1576
1577 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
1578 f32BitGate = false;
1579 break;
1580 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
1581 break;
1582
1583 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1584 }
1585
1586 /* Check DPL against CPL if applicable. */
1587 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
1588 {
1589 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
1590 {
1591 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
1592 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1593 }
1594 }
1595
1596 /* Is it there? */
1597 if (!Idte.Gate.u1Present)
1598 {
1599 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
1600 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1601 }
1602
1603 /* Is it a task-gate? */
1604 if (fTaskGate)
1605 {
1606 /*
1607 * Construct the error code masks based on what caused this task switch.
1608 * See Intel Instruction reference for INT.
1609 */
1610 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1611 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
1612 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
1613 RTSEL SelTss = Idte.Gate.u16Sel;
1614
1615 /*
1616 * Fetch the TSS descriptor in the GDT.
1617 */
1618 IEMSELDESC DescTSS;
1619 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTss, X86_XCPT_GP, (SelTss & uSelMask) | uExt);
1620 if (rcStrict != VINF_SUCCESS)
1621 {
1622 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTss,
1623 VBOXSTRICTRC_VAL(rcStrict)));
1624 return rcStrict;
1625 }
1626
1627 /* The TSS descriptor must be a system segment and be available (not busy). */
1628 if ( DescTSS.Legacy.Gen.u1DescType
1629 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
1630 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
1631 {
1632 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
1633 u8Vector, SelTss, DescTSS.Legacy.au64));
1634 return iemRaiseGeneralProtectionFault(pVCpu, (SelTss & uSelMask) | uExt);
1635 }
1636
1637 /* The TSS must be present. */
1638 if (!DescTSS.Legacy.Gen.u1Present)
1639 {
1640 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTss, DescTSS.Legacy.au64));
1641 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTss & uSelMask) | uExt);
1642 }
1643
1644 /* Do the actual task switch. */
1645 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
1646 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
1647 fFlags, uErr, uCr2, SelTss, &DescTSS);
1648 }
1649
1650 /* A null CS is bad. */
1651 RTSEL NewCS = Idte.Gate.u16Sel;
1652 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
1653 {
1654 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
1655 return iemRaiseGeneralProtectionFault0(pVCpu);
1656 }
1657
1658 /* Fetch the descriptor for the new CS. */
1659 IEMSELDESC DescCS;
1660 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
1661 if (rcStrict != VINF_SUCCESS)
1662 {
1663 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
1664 return rcStrict;
1665 }
1666
1667 /* Must be a code segment. */
1668 if (!DescCS.Legacy.Gen.u1DescType)
1669 {
1670 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1671 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
1672 }
1673 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1674 {
1675 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
1676 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
1677 }
1678
1679 /* Don't allow lowering the privilege level. */
1680 /** @todo Does the lowering of privileges apply to software interrupts
1681 * only? This has bearings on the more-privileged or
1682 * same-privilege stack behavior further down. A testcase would
1683 * be nice. */
1684 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
1685 {
1686 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
1687 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
1688 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
1689 }
1690
1691 /* Make sure the selector is present. */
1692 if (!DescCS.Legacy.Gen.u1Present)
1693 {
1694 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
1695 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
1696 }
1697
1698#ifdef LOG_ENABLED
1699 /* If software interrupt, try decode it if logging is enabled and such. */
1700 if ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1701 && LogIsItEnabled(RTLOGGRPFLAGS_ENABLED, LOG_GROUP_IEM_SYSCALL))
1702 iemLogSyscallProtModeInt(pVCpu, u8Vector, cbInstr);
1703#endif
1704
1705 /* Check the new EIP against the new CS limit. */
1706 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
1707 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
1708 ? Idte.Gate.u16OffsetLow
1709 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
1710 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
1711 if (uNewEip > cbLimitCS)
1712 {
1713 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
1714 u8Vector, uNewEip, cbLimitCS, NewCS));
1715 return iemRaiseGeneralProtectionFault(pVCpu, 0);
1716 }
1717 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
1718
1719 /* Calc the flag image to push. */
1720 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1721 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
1722 fEfl &= ~X86_EFL_RF;
1723 else
1724 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
1725
1726 /* From V8086 mode only go to CPL 0. */
1727 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
1728 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
1729 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
1730 {
1731 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
1732 return iemRaiseGeneralProtectionFault(pVCpu, 0);
1733 }
1734
1735 /*
1736 * If the privilege level changes, we need to get a new stack from the TSS.
1737 * This in turns means validating the new SS and ESP...
1738 */
1739 if (uNewCpl != IEM_GET_CPL(pVCpu))
1740 {
1741 RTSEL NewSS;
1742 uint32_t uNewEsp;
1743 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
1744 if (rcStrict != VINF_SUCCESS)
1745 return rcStrict;
1746
1747 IEMSELDESC DescSS;
1748 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
1749 if (rcStrict != VINF_SUCCESS)
1750 return rcStrict;
1751 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
1752 if (!DescSS.Legacy.Gen.u1DefBig)
1753 {
1754 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
1755 uNewEsp = (uint16_t)uNewEsp;
1756 }
1757
1758 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
1759
1760 /* Check that there is sufficient space for the stack frame. */
1761 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
1762 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
1763 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
1764 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
1765
1766 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
1767 {
1768 if ( uNewEsp - 1 > cbLimitSS
1769 || uNewEsp < cbStackFrame)
1770 {
1771 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
1772 u8Vector, NewSS, uNewEsp, cbStackFrame));
1773 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
1774 }
1775 }
1776 else
1777 {
1778 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
1779 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
1780 {
1781 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
1782 u8Vector, NewSS, uNewEsp, cbStackFrame));
1783 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
1784 }
1785 }
1786
1787 /*
1788 * Start making changes.
1789 */
1790
1791 /* Set the new CPL so that stack accesses use it. */
1792 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
1793 IEM_SET_CPL(pVCpu, uNewCpl);
1794
1795 /* Create the stack frame. */
1796 uint8_t bUnmapInfoStackFrame;
1797 RTPTRUNION uStackFrame;
1798 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
1799 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
1800 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
1801 if (rcStrict != VINF_SUCCESS)
1802 return rcStrict;
1803 if (f32BitGate)
1804 {
1805 if (fFlags & IEM_XCPT_FLAGS_ERR)
1806 *uStackFrame.pu32++ = uErr;
1807 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
1808 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
1809 uStackFrame.pu32[2] = fEfl;
1810 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
1811 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
1812 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
1813 if (fEfl & X86_EFL_VM)
1814 {
1815 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
1816 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
1817 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
1818 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
1819 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
1820 }
1821 }
1822 else
1823 {
1824 if (fFlags & IEM_XCPT_FLAGS_ERR)
1825 *uStackFrame.pu16++ = uErr;
1826 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1827 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
1828 uStackFrame.pu16[2] = fEfl;
1829 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
1830 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
1831 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
1832 if (fEfl & X86_EFL_VM)
1833 {
1834 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
1835 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
1836 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
1837 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
1838 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
1839 }
1840 }
1841 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
1842 if (rcStrict != VINF_SUCCESS)
1843 return rcStrict;
1844
1845 /* Mark the selectors 'accessed' (hope this is the correct time). */
1846 /** @todo testcase: excatly _when_ are the accessed bits set - before or
1847 * after pushing the stack frame? (Write protect the gdt + stack to
1848 * find out.) */
1849 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1850 {
1851 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
1852 if (rcStrict != VINF_SUCCESS)
1853 return rcStrict;
1854 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1855 }
1856
1857 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1858 {
1859 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
1860 if (rcStrict != VINF_SUCCESS)
1861 return rcStrict;
1862 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1863 }
1864
1865 /*
1866 * Start comitting the register changes (joins with the DPL=CPL branch).
1867 */
1868 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
1869 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
1870 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
1871 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
1872 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
1873 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
1874 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
1875 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
1876 * SP is loaded).
1877 * Need to check the other combinations too:
1878 * - 16-bit TSS, 32-bit handler
1879 * - 32-bit TSS, 16-bit handler */
1880 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1881 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
1882 else
1883 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
1884
1885 if (fEfl & X86_EFL_VM)
1886 {
1887 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
1888 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
1889 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
1890 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
1891 }
1892 }
1893 /*
1894 * Same privilege, no stack change and smaller stack frame.
1895 */
1896 else
1897 {
1898 uint64_t uNewRsp;
1899 uint8_t bUnmapInfoStackFrame;
1900 RTPTRUNION uStackFrame;
1901 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
1902 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1,
1903 &uStackFrame.pv, &bUnmapInfoStackFrame, &uNewRsp);
1904 if (rcStrict != VINF_SUCCESS)
1905 return rcStrict;
1906
1907 if (f32BitGate)
1908 {
1909 if (fFlags & IEM_XCPT_FLAGS_ERR)
1910 *uStackFrame.pu32++ = uErr;
1911 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
1912 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
1913 uStackFrame.pu32[2] = fEfl;
1914 }
1915 else
1916 {
1917 if (fFlags & IEM_XCPT_FLAGS_ERR)
1918 *uStackFrame.pu16++ = uErr;
1919 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
1920 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | IEM_GET_CPL(pVCpu);
1921 uStackFrame.pu16[2] = fEfl;
1922 }
1923 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame); /* don't use the commit here */
1924 if (rcStrict != VINF_SUCCESS)
1925 return rcStrict;
1926
1927 /* Mark the CS selector as 'accessed'. */
1928 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1929 {
1930 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
1931 if (rcStrict != VINF_SUCCESS)
1932 return rcStrict;
1933 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1934 }
1935
1936 /*
1937 * Start committing the register changes (joins with the other branch).
1938 */
1939 pVCpu->cpum.GstCtx.rsp = uNewRsp;
1940 }
1941
1942 /* ... register committing continues. */
1943 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
1944 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
1945 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1946 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
1947 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
1948 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
1949
1950 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
1951 fEfl &= ~fEflToClear;
1952 IEMMISC_SET_EFL(pVCpu, fEfl);
1953
1954 if (fFlags & IEM_XCPT_FLAGS_CR2)
1955 pVCpu->cpum.GstCtx.cr2 = uCr2;
1956
1957 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1958 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1959
1960 /* Make sure the execution flags are correct. */
1961 uint32_t const fExecNew = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
1962 if (fExecNew != pVCpu->iem.s.fExec)
1963 Log(("iemRaiseXcptOrIntInProtMode: fExec %#x -> %#x (xor %#x)\n",
1964 pVCpu->iem.s.fExec, fExecNew, pVCpu->iem.s.fExec ^ fExecNew));
1965 pVCpu->iem.s.fExec = fExecNew;
1966 Assert(IEM_GET_CPL(pVCpu) == uNewCpl);
1967
1968 /*
1969 * Deal with debug events that follows the exception and clear inhibit flags.
1970 */
1971 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
1972 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
1973 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
1974 else
1975 {
1976 Log(("iemRaiseXcptOrIntInProtMode: Raising #DB after %#x; pending=%#x\n",
1977 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
1978 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
1979 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
1980 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
1981 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
1982 return iemRaiseDebugException(pVCpu);
1983 }
1984
1985 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1986}
1987
1988
1989/**
1990 * Implements exceptions and interrupts for long mode.
1991 *
1992 * @returns VBox strict status code.
1993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1994 * @param cbInstr The number of bytes to offset rIP by in the return
1995 * address.
1996 * @param u8Vector The interrupt / exception vector number.
1997 * @param fFlags The flags.
1998 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1999 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2000 */
2001static VBOXSTRICTRC
2002iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
2003 uint8_t cbInstr,
2004 uint8_t u8Vector,
2005 uint32_t fFlags,
2006 uint16_t uErr,
2007 uint64_t uCr2) RT_NOEXCEPT
2008{
2009 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2010
2011 /*
2012 * Read the IDT entry.
2013 */
2014 uint16_t offIdt = (uint16_t)u8Vector << 4;
2015 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
2016 {
2017 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2018 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2019 }
2020 X86DESC64 Idte;
2021#ifdef _MSC_VER /* Shut up silly compiler warning. */
2022 Idte.au64[0] = 0;
2023 Idte.au64[1] = 0;
2024#endif
2025 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
2026 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2027 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
2028 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2029 {
2030 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2031 return rcStrict;
2032 }
2033 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
2034 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2035 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2036
2037 /*
2038 * Check the descriptor type, DPL and such.
2039 * ASSUMES this is done in the same order as described for call-gate calls.
2040 */
2041 if (Idte.Gate.u1DescType)
2042 {
2043 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2044 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2045 }
2046 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2047 switch (Idte.Gate.u4Type)
2048 {
2049 case AMD64_SEL_TYPE_SYS_INT_GATE:
2050 fEflToClear |= X86_EFL_IF;
2051 break;
2052 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
2053 break;
2054
2055 default:
2056 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2057 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2058 }
2059
2060 /* Check DPL against CPL if applicable. */
2061 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2062 {
2063 if (IEM_GET_CPL(pVCpu) > Idte.Gate.u2Dpl)
2064 {
2065 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, IEM_GET_CPL(pVCpu), Idte.Gate.u2Dpl));
2066 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2067 }
2068 }
2069
2070 /* Is it there? */
2071 if (!Idte.Gate.u1Present)
2072 {
2073 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
2074 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2075 }
2076
2077 /* A null CS is bad. */
2078 RTSEL NewCS = Idte.Gate.u16Sel;
2079 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
2080 {
2081 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2082 return iemRaiseGeneralProtectionFault0(pVCpu);
2083 }
2084
2085 /* Fetch the descriptor for the new CS. */
2086 IEMSELDESC DescCS;
2087 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
2088 if (rcStrict != VINF_SUCCESS)
2089 {
2090 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2091 return rcStrict;
2092 }
2093
2094 /* Must be a 64-bit code segment. */
2095 if (!DescCS.Long.Gen.u1DescType)
2096 {
2097 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2098 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2099 }
2100 if ( !DescCS.Long.Gen.u1Long
2101 || DescCS.Long.Gen.u1DefBig
2102 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
2103 {
2104 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
2105 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
2106 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2107 }
2108
2109 /* Don't allow lowering the privilege level. For non-conforming CS
2110 selectors, the CS.DPL sets the privilege level the trap/interrupt
2111 handler runs at. For conforming CS selectors, the CPL remains
2112 unchanged, but the CS.DPL must be <= CPL. */
2113 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
2114 * when CPU in Ring-0. Result \#GP? */
2115 if (DescCS.Legacy.Gen.u2Dpl > IEM_GET_CPL(pVCpu))
2116 {
2117 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2118 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, IEM_GET_CPL(pVCpu)));
2119 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2120 }
2121
2122
2123 /* Make sure the selector is present. */
2124 if (!DescCS.Legacy.Gen.u1Present)
2125 {
2126 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2127 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
2128 }
2129
2130 /* Check that the new RIP is canonical. */
2131 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
2132 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
2133 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
2134 if (!IEM_IS_CANONICAL(uNewRip))
2135 {
2136 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
2137 return iemRaiseGeneralProtectionFault0(pVCpu);
2138 }
2139
2140 /*
2141 * If the privilege level changes or if the IST isn't zero, we need to get
2142 * a new stack from the TSS.
2143 */
2144 uint64_t uNewRsp;
2145 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2146 ? IEM_GET_CPL(pVCpu) : DescCS.Legacy.Gen.u2Dpl;
2147 if ( uNewCpl != IEM_GET_CPL(pVCpu)
2148 || Idte.Gate.u3IST != 0)
2149 {
2150 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
2151 if (rcStrict != VINF_SUCCESS)
2152 return rcStrict;
2153 }
2154 else
2155 uNewRsp = pVCpu->cpum.GstCtx.rsp;
2156 uNewRsp &= ~(uint64_t)0xf;
2157
2158 /*
2159 * Calc the flag image to push.
2160 */
2161 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
2162 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
2163 fEfl &= ~X86_EFL_RF;
2164 else
2165 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
2166
2167 /*
2168 * Start making changes.
2169 */
2170 /* Set the new CPL so that stack accesses use it. */
2171 uint8_t const uOldCpl = IEM_GET_CPL(pVCpu);
2172 IEM_SET_CPL(pVCpu, uNewCpl);
2173/** @todo Setting CPL this early seems wrong as it would affect and errors we
2174 * raise accessing the stack and (?) GDT/LDT... */
2175
2176 /* Create the stack frame. */
2177 uint8_t bUnmapInfoStackFrame;
2178 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
2179 RTPTRUNION uStackFrame;
2180 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, &bUnmapInfoStackFrame, cbStackFrame, UINT8_MAX,
2181 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
2182 if (rcStrict != VINF_SUCCESS)
2183 return rcStrict;
2184
2185 if (fFlags & IEM_XCPT_FLAGS_ERR)
2186 *uStackFrame.pu64++ = uErr;
2187 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
2188 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
2189 uStackFrame.pu64[2] = fEfl;
2190 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
2191 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
2192 rcStrict = iemMemCommitAndUnmap(pVCpu, bUnmapInfoStackFrame);
2193 if (rcStrict != VINF_SUCCESS)
2194 return rcStrict;
2195
2196 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
2197 /** @todo testcase: excatly _when_ are the accessed bits set - before or
2198 * after pushing the stack frame? (Write protect the gdt + stack to
2199 * find out.) */
2200 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2201 {
2202 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
2203 if (rcStrict != VINF_SUCCESS)
2204 return rcStrict;
2205 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2206 }
2207
2208 /*
2209 * Start comitting the register changes.
2210 */
2211 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
2212 * hidden registers when interrupting 32-bit or 16-bit code! */
2213 if (uNewCpl != uOldCpl)
2214 {
2215 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
2216 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
2217 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2218 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
2219 pVCpu->cpum.GstCtx.ss.u64Base = 0;
2220 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
2221 }
2222 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
2223 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2224 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2225 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2226 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
2227 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2228 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2229 pVCpu->cpum.GstCtx.rip = uNewRip;
2230
2231 fEfl &= ~fEflToClear;
2232 IEMMISC_SET_EFL(pVCpu, fEfl);
2233
2234 if (fFlags & IEM_XCPT_FLAGS_CR2)
2235 pVCpu->cpum.GstCtx.cr2 = uCr2;
2236
2237 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
2238 iemRaiseXcptAdjustState(pVCpu, u8Vector);
2239
2240 iemRecalcExecModeAndCplAndAcFlags(pVCpu);
2241
2242 /*
2243 * Deal with debug events that follows the exception and clear inhibit flags.
2244 */
2245 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2246 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK))
2247 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2248 else
2249 {
2250 Log(("iemRaiseXcptOrIntInLongMode: Raising #DB after %#x; pending=%#x\n",
2251 u8Vector, pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK));
2252 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2253 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2254 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2255 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_INHIBIT_SHADOW);
2256 return iemRaiseDebugException(pVCpu);
2257 }
2258
2259 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2260}
2261
2262
2263/**
2264 * Implements exceptions and interrupts.
2265 *
2266 * All exceptions and interrupts goes thru this function!
2267 *
2268 * @returns VBox strict status code.
2269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2270 * @param cbInstr The number of bytes to offset rIP by in the return
2271 * address.
2272 * @param u8Vector The interrupt / exception vector number.
2273 * @param fFlags The flags.
2274 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2275 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2276 */
2277VBOXSTRICTRC
2278iemRaiseXcptOrInt(PVMCPUCC pVCpu,
2279 uint8_t cbInstr,
2280 uint8_t u8Vector,
2281 uint32_t fFlags,
2282 uint16_t uErr,
2283 uint64_t uCr2) RT_NOEXCEPT
2284{
2285 /*
2286 * Get all the state that we might need here.
2287 */
2288 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2289 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2290
2291#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
2292 /*
2293 * Flush prefetch buffer
2294 */
2295 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
2296#endif
2297
2298 /*
2299 * Perform the V8086 IOPL check and upgrade the fault without nesting.
2300 */
2301 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
2302 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
2303 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
2304 | IEM_XCPT_FLAGS_BP_INSTR
2305 | IEM_XCPT_FLAGS_ICEBP_INSTR
2306 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
2307 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
2308 {
2309 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
2310 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
2311 u8Vector = X86_XCPT_GP;
2312 uErr = 0;
2313 }
2314
2315 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
2316#ifdef DBGFTRACE_ENABLED
2317 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
2318 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
2319 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
2320#endif
2321
2322 /*
2323 * Check if DBGF wants to intercept the exception.
2324 */
2325 if ( (fFlags & (IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_T_SOFT_INT))
2326 || !DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector)) )
2327 { /* likely */ }
2328 else
2329 {
2330 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + u8Vector),
2331 DBGFEVENTCTX_INVALID, 1, (uint64_t)uErr);
2332 if (rcStrict != VINF_SUCCESS)
2333 return rcStrict;
2334 }
2335
2336 /*
2337 * Evaluate whether NMI blocking should be in effect.
2338 * Normally, NMI blocking is in effect whenever we inject an NMI.
2339 */
2340 bool fBlockNmi = u8Vector == X86_XCPT_NMI
2341 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT);
2342
2343#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2344 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2345 {
2346 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
2347 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2348 return rcStrict0;
2349
2350 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
2351 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
2352 {
2353 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
2354 fBlockNmi = false;
2355 }
2356 }
2357#endif
2358
2359#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2360 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
2361 {
2362 /*
2363 * If the event is being injected as part of VMRUN, it isn't subject to event
2364 * intercepts in the nested-guest. However, secondary exceptions that occur
2365 * during injection of any event -are- subject to exception intercepts.
2366 *
2367 * See AMD spec. 15.20 "Event Injection".
2368 */
2369 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
2370 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
2371 else
2372 {
2373 /*
2374 * Check and handle if the event being raised is intercepted.
2375 */
2376 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2377 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
2378 return rcStrict0;
2379 }
2380 }
2381#endif
2382
2383 /*
2384 * Set NMI blocking if necessary.
2385 */
2386 if (fBlockNmi)
2387 CPUMSetInterruptInhibitingByNmi(&pVCpu->cpum.GstCtx);
2388
2389 /*
2390 * Do recursion accounting.
2391 */
2392 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
2393 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
2394 if (pVCpu->iem.s.cXcptRecursions == 0)
2395 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
2396 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
2397 else
2398 {
2399 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
2400 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
2401 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
2402
2403 if (pVCpu->iem.s.cXcptRecursions >= 4)
2404 {
2405#ifdef DEBUG_bird
2406 AssertFailed();
2407#endif
2408 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
2409 }
2410
2411 /*
2412 * Evaluate the sequence of recurring events.
2413 */
2414 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
2415 NULL /* pXcptRaiseInfo */);
2416 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
2417 { /* likely */ }
2418 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
2419 {
2420 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
2421 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
2422 u8Vector = X86_XCPT_DF;
2423 uErr = 0;
2424#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2425 /* VMX nested-guest #DF intercept needs to be checked here. */
2426 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2427 {
2428 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
2429 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2430 return rcStrict0;
2431 }
2432#endif
2433 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
2434 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
2435 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
2436 }
2437 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
2438 {
2439 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
2440 return iemInitiateCpuShutdown(pVCpu);
2441 }
2442 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
2443 {
2444 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
2445 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
2446 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
2447 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
2448 return VERR_EM_GUEST_CPU_HANG;
2449 }
2450 else
2451 {
2452 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
2453 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
2454 return VERR_IEM_IPE_9;
2455 }
2456
2457 /*
2458 * The 'EXT' bit is set when an exception occurs during deliver of an external
2459 * event (such as an interrupt or earlier exception)[1]. Privileged software
2460 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
2461 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
2462 *
2463 * [1] - Intel spec. 6.13 "Error Code"
2464 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
2465 * [3] - Intel Instruction reference for INT n.
2466 */
2467 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
2468 && (fFlags & IEM_XCPT_FLAGS_ERR)
2469 && u8Vector != X86_XCPT_PF
2470 && u8Vector != X86_XCPT_DF)
2471 {
2472 uErr |= X86_TRAP_ERR_EXTERNAL;
2473 }
2474 }
2475
2476 pVCpu->iem.s.cXcptRecursions++;
2477 pVCpu->iem.s.uCurXcpt = u8Vector;
2478 pVCpu->iem.s.fCurXcpt = fFlags;
2479 pVCpu->iem.s.uCurXcptErr = uErr;
2480 pVCpu->iem.s.uCurXcptCr2 = uCr2;
2481
2482 /*
2483 * Extensive logging.
2484 */
2485#if defined(LOG_ENABLED) && defined(IN_RING3)
2486 if (LogIs3Enabled())
2487 {
2488 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
2489 char szRegs[4096];
2490 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
2491 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
2492 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
2493 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
2494 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
2495 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
2496 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
2497 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
2498 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
2499 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
2500 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
2501 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
2502 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
2503 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
2504 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
2505 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
2506 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
2507 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
2508 " efer=%016VR{efer}\n"
2509 " pat=%016VR{pat}\n"
2510 " sf_mask=%016VR{sf_mask}\n"
2511 "krnl_gs_base=%016VR{krnl_gs_base}\n"
2512 " lstar=%016VR{lstar}\n"
2513 " star=%016VR{star} cstar=%016VR{cstar}\n"
2514 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
2515 );
2516
2517 char szInstr[256];
2518 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
2519 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
2520 szInstr, sizeof(szInstr), NULL);
2521 Log3(("%s%s\n", szRegs, szInstr));
2522 }
2523#endif /* LOG_ENABLED */
2524
2525 /*
2526 * Stats.
2527 */
2528 uint64_t const uTimestamp = ASMReadTSC();
2529 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
2530 {
2531 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
2532 EMHistoryAddExit(pVCpu,
2533 fFlags & IEM_XCPT_FLAGS_T_EXT_INT
2534 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector)
2535 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_IEM, u8Vector | 0x100),
2536 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
2537 IEMTLBTRACE_IRQ(pVCpu, u8Vector, fFlags, pVCpu->cpum.GstCtx.rflags.uBoth);
2538 }
2539 else
2540 {
2541 if (u8Vector < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts))
2542 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
2543 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
2544 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, uTimestamp);
2545 if (fFlags & IEM_XCPT_FLAGS_ERR)
2546 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_ERRCD), uErr, uTimestamp);
2547 if (fFlags & IEM_XCPT_FLAGS_CR2)
2548 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector | EMEXIT_F_XCPT_CR2), uCr2, uTimestamp);
2549 IEMTLBTRACE_XCPT(pVCpu, u8Vector, fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0, fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0, fFlags);
2550 }
2551
2552 /*
2553 * Hack alert! Convert incoming debug events to slient on Intel.
2554 * See the dbg+inhibit+ringxfer test in bs3-cpu-weird-1.
2555 */
2556 if ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2557 || !(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2558 || !IEM_IS_GUEST_CPU_INTEL(pVCpu))
2559 { /* ignore */ }
2560 else
2561 {
2562 Log(("iemRaiseXcptOrInt: Converting pending %#x debug events to a silent one (intel hack); vec=%#x\n",
2563 pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK, u8Vector));
2564 pVCpu->cpum.GstCtx.eflags.uBoth = (pVCpu->cpum.GstCtx.eflags.uBoth & ~CPUMCTX_DBG_HIT_DRX_MASK)
2565 | CPUMCTX_DBG_HIT_DRX_SILENT;
2566 }
2567
2568 /*
2569 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
2570 * to ensure that a stale TLB or paging cache entry will only cause one
2571 * spurious #PF.
2572 */
2573 if ( u8Vector == X86_XCPT_PF
2574 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
2575 IEMTlbInvalidatePage(pVCpu, uCr2);
2576
2577 /*
2578 * Call the mode specific worker function.
2579 */
2580 VBOXSTRICTRC rcStrict;
2581 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
2582 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2583 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
2584 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2585 else
2586 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2587
2588 /* Flush the prefetch buffer. */
2589 iemOpcodeFlushHeavy(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
2590
2591 /*
2592 * Unwind.
2593 */
2594 pVCpu->iem.s.cXcptRecursions--;
2595 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
2596 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
2597 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
2598 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel,
2599 pVCpu->cpum.GstCtx.esp, IEM_GET_CPL(pVCpu), pVCpu->iem.s.cXcptRecursions + 1));
2600 return rcStrict;
2601}
2602
2603/**
2604 * See iemRaiseXcptOrInt. Will not return.
2605 */
2606DECL_NO_RETURN(void)
2607iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
2608 uint8_t cbInstr,
2609 uint8_t u8Vector,
2610 uint32_t fFlags,
2611 uint16_t uErr,
2612 uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP
2613{
2614 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
2615 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
2616}
2617
2618
2619/** \#DE - 00. */
2620VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
2621{
2622 if (GCMIsInterceptingXcptDE(pVCpu))
2623 {
2624 int rc = GCMXcptDE(pVCpu, &pVCpu->cpum.GstCtx);
2625 if (rc == VINF_SUCCESS)
2626 {
2627 Log(("iemRaiseDivideError: Restarting instruction because of GCMXcptDE\n"));
2628 return VINF_IEM_RAISED_XCPT; /* must return non-zero status here to cause a instruction restart */
2629 }
2630 }
2631 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2632}
2633
2634
2635/** \#DE - 00. */
2636DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2637{
2638 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2639}
2640
2641
2642/** \#DB - 01.
2643 * @note This automatically clear DR7.GD. */
2644VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
2645{
2646 /* This always clears RF (via IEM_XCPT_FLAGS_DRx_INSTR_BP). */
2647 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
2648 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_DRx_INSTR_BP, 0, 0);
2649}
2650
2651
2652/** \#BR - 05. */
2653VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
2654{
2655 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2656}
2657
2658
2659/** \#UD - 06. */
2660VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
2661{
2662 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2663}
2664
2665
2666/** \#UD - 06. */
2667DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2668{
2669 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2670}
2671
2672
2673/** \#NM - 07. */
2674VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
2675{
2676 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2677}
2678
2679
2680/** \#NM - 07. */
2681DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2682{
2683 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2684}
2685
2686
2687/** \#TS(err) - 0a. */
2688VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
2689{
2690 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2691}
2692
2693
2694/** \#TS(tr) - 0a. */
2695VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
2696{
2697 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2698 pVCpu->cpum.GstCtx.tr.Sel, 0);
2699}
2700
2701
2702/** \#TS(0) - 0a. */
2703VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
2704{
2705 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2706 0, 0);
2707}
2708
2709
2710/** \#TS(err) - 0a. */
2711VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
2712{
2713 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2714 uSel & X86_SEL_MASK_OFF_RPL, 0);
2715}
2716
2717
2718/** \#NP(err) - 0b. */
2719VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
2720{
2721 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2722}
2723
2724
2725/** \#NP(sel) - 0b. */
2726VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
2727{
2728 Log(("iemRaiseSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
2729 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
2730 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2731 uSel & ~X86_SEL_RPL, 0);
2732}
2733
2734
2735/** \#SS(seg) - 0c. */
2736VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
2737{
2738 Log(("iemRaiseStackSelectorNotPresentBySelector: cs:rip=%04x:%RX64 uSel=%#x\n",
2739 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uSel));
2740 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2741 uSel & ~X86_SEL_RPL, 0);
2742}
2743
2744
2745/** \#SS(err) - 0c. */
2746VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
2747{
2748 Log(("iemRaiseStackSelectorNotPresentWithErr: cs:rip=%04x:%RX64 uErr=%#x\n",
2749 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
2750 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2751}
2752
2753
2754/** \#GP(n) - 0d. */
2755VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
2756{
2757 Log(("iemRaiseGeneralProtectionFault: cs:rip=%04x:%RX64 uErr=%#x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, uErr));
2758 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
2759}
2760
2761
2762/** \#GP(0) - 0d. */
2763VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
2764{
2765 Log(("iemRaiseGeneralProtectionFault0: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2766 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2767}
2768
2769
2770/** \#GP(0) - 0d. */
2771DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2772{
2773 Log(("iemRaiseGeneralProtectionFault0Jmp: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2774 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2775}
2776
2777
2778/** \#GP(sel) - 0d. */
2779VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
2780{
2781 Log(("iemRaiseGeneralProtectionFaultBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
2782 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
2783 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
2784 Sel & ~X86_SEL_RPL, 0);
2785}
2786
2787
2788/** \#GP(0) - 0d. */
2789VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
2790{
2791 Log(("iemRaiseNotCanonical: cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
2792 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2793}
2794
2795
2796/** \#GP(sel) - 0d. */
2797VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
2798{
2799 Log(("iemRaiseSelectorBounds: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
2800 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
2801 NOREF(iSegReg); NOREF(fAccess);
2802 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2803 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2804}
2805
2806
2807/** \#GP(sel) - 0d, longjmp. */
2808DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
2809{
2810 Log(("iemRaiseSelectorBoundsJmp: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
2811 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
2812 NOREF(iSegReg); NOREF(fAccess);
2813 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
2814 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2815}
2816
2817
2818/** \#GP(sel) - 0d. */
2819VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
2820{
2821 Log(("iemRaiseSelectorBoundsBySelector: cs:rip=%04x:%RX64 Sel=%#x\n",
2822 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
2823 NOREF(Sel);
2824 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2825}
2826
2827
2828/** \#GP(sel) - 0d, longjmp. */
2829DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP
2830{
2831 Log(("iemRaiseSelectorBoundsBySelectorJmp: cs:rip=%04x:%RX64 Sel=%#x\n",
2832 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, Sel));
2833 NOREF(Sel);
2834 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2835}
2836
2837
2838/** \#GP(sel) - 0d. */
2839VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
2840{
2841 Log(("iemRaiseSelectorInvalidAccess: cs:rip=%04x:%RX64 iSegReg=%d fAccess=%#x\n",
2842 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, iSegReg, fAccess));
2843 NOREF(iSegReg); NOREF(fAccess);
2844 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2845}
2846
2847
2848/** \#GP(sel) - 0d, longjmp. */
2849DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP
2850{
2851 NOREF(iSegReg); NOREF(fAccess);
2852 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2853}
2854
2855
2856/** \#PF(n) - 0e. */
2857VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT
2858{
2859 uint16_t uErr;
2860 switch (rc)
2861 {
2862 case VERR_PAGE_NOT_PRESENT:
2863 case VERR_PAGE_TABLE_NOT_PRESENT:
2864 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
2865 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
2866 uErr = 0;
2867 break;
2868
2869 case VERR_RESERVED_PAGE_TABLE_BITS:
2870 uErr = X86_TRAP_PF_P | X86_TRAP_PF_RSVD;
2871 break;
2872
2873 default:
2874 AssertMsgFailed(("%Rrc\n", rc));
2875 RT_FALL_THRU();
2876 case VERR_ACCESS_DENIED:
2877 uErr = X86_TRAP_PF_P;
2878 break;
2879 }
2880
2881 if (IEM_GET_CPL(pVCpu) == 3)
2882 uErr |= X86_TRAP_PF_US;
2883
2884 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
2885 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
2886 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
2887 uErr |= X86_TRAP_PF_ID;
2888
2889#if 0 /* This is so much non-sense, really. Why was it done like that? */
2890 /* Note! RW access callers reporting a WRITE protection fault, will clear
2891 the READ flag before calling. So, read-modify-write accesses (RW)
2892 can safely be reported as READ faults. */
2893 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
2894 uErr |= X86_TRAP_PF_RW;
2895#else
2896 if (fAccess & IEM_ACCESS_TYPE_WRITE)
2897 {
2898 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
2899 /// (regardless of outcome of the comparison in the latter case).
2900 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
2901 uErr |= X86_TRAP_PF_RW;
2902 }
2903#endif
2904
2905 /* For FXSAVE and FRSTOR the #PF is typically reported at the max address
2906 of the memory operand rather than at the start of it. (Not sure what
2907 happens if it crosses a page boundrary.) The current heuristics for
2908 this is to report the #PF for the last byte if the access is more than
2909 64 bytes. This is probably not correct, but we can work that out later,
2910 main objective now is to get FXSAVE to work like for real hardware and
2911 make bs3-cpu-basic2 work. */
2912 if (cbAccess <= 64)
2913 { /* likely*/ }
2914 else
2915 GCPtrWhere += cbAccess - 1;
2916
2917 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
2918 uErr, GCPtrWhere);
2919}
2920
2921
2922/** \#PF(n) - 0e, longjmp. */
2923DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess,
2924 uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP
2925{
2926 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, cbAccess, fAccess, rc)));
2927}
2928
2929
2930/** \#MF(0) - 10. */
2931VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT
2932{
2933 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_NE)
2934 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2935
2936 /* Convert a #MF into a FERR -> IRQ 13. See @bugref{6117}. */
2937 PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */);
2938 return iemRegUpdateRipAndFinishClearingRF(pVCpu);
2939}
2940
2941
2942/** \#MF(0) - 10, longjmp. */
2943DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2944{
2945 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseMathFault(pVCpu)));
2946}
2947
2948
2949/** \#AC(0) - 11. */
2950VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT
2951{
2952 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
2953}
2954
2955
2956/** \#AC(0) - 11, longjmp. */
2957DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2958{
2959 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
2960}
2961
2962
2963/** \#XF(0)/\#XM(0) - 19. */
2964VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
2965{
2966 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2967}
2968
2969
2970/** \#XF(0)/\#XM(0) - 19s, longjmp. */
2971DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
2972{
2973 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(iemRaiseSimdFpException(pVCpu)));
2974}
2975
2976
2977/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
2978IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
2979{
2980 NOREF(cbInstr);
2981 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2982}
2983
2984
2985/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
2986IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
2987{
2988 NOREF(cbInstr);
2989 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2990}
2991
2992
2993/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
2994IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
2995{
2996 NOREF(cbInstr);
2997 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
2998}
2999
3000
3001/**
3002 * Checks if IEM is in the process of delivering an event (interrupt or
3003 * exception).
3004 *
3005 * @returns true if we're in the process of raising an interrupt or exception,
3006 * false otherwise.
3007 * @param pVCpu The cross context virtual CPU structure.
3008 * @param puVector Where to store the vector associated with the
3009 * currently delivered event, optional.
3010 * @param pfFlags Where to store th event delivery flags (see
3011 * IEM_XCPT_FLAGS_XXX), optional.
3012 * @param puErr Where to store the error code associated with the
3013 * event, optional.
3014 * @param puCr2 Where to store the CR2 associated with the event,
3015 * optional.
3016 * @remarks The caller should check the flags to determine if the error code and
3017 * CR2 are valid for the event.
3018 */
3019VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
3020{
3021 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
3022 if (fRaisingXcpt)
3023 {
3024 if (puVector)
3025 *puVector = pVCpu->iem.s.uCurXcpt;
3026 if (pfFlags)
3027 *pfFlags = pVCpu->iem.s.fCurXcpt;
3028 if (puErr)
3029 *puErr = pVCpu->iem.s.uCurXcptErr;
3030 if (puCr2)
3031 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
3032 }
3033 return fRaisingXcpt;
3034}
3035
3036/** @} */
3037
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette