VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 70582

Last change on this file since 70582 was 70448, checked in by vboxsync, 7 years ago

VMM/IEM: build fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 636.3 KB
Line 
1/* $Id: IEMAll.cpp 70448 2018-01-03 05:27:54Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/em.h>
106# include <VBox/vmm/hm_svm.h>
107#endif
108#include <VBox/vmm/tm.h>
109#include <VBox/vmm/dbgf.h>
110#include <VBox/vmm/dbgftrace.h>
111#ifdef VBOX_WITH_RAW_MODE_NOT_R0
112# include <VBox/vmm/patm.h>
113# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
114# include <VBox/vmm/csam.h>
115# endif
116#endif
117#include "IEMInternal.h"
118#ifdef IEM_VERIFICATION_MODE_FULL
119# include <VBox/vmm/rem.h>
120# include <VBox/vmm/mm.h>
121#endif
122#include <VBox/vmm/vm.h>
123#include <VBox/log.h>
124#include <VBox/err.h>
125#include <VBox/param.h>
126#include <VBox/dis.h>
127#include <VBox/disopcode.h>
128#include <iprt/assert.h>
129#include <iprt/string.h>
130#include <iprt/x86.h>
131
132
133/*********************************************************************************************************************************
134* Structures and Typedefs *
135*********************************************************************************************************************************/
136/** @typedef PFNIEMOP
137 * Pointer to an opcode decoder function.
138 */
139
140/** @def FNIEMOP_DEF
141 * Define an opcode decoder function.
142 *
143 * We're using macors for this so that adding and removing parameters as well as
144 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
145 *
146 * @param a_Name The function name.
147 */
148
149/** @typedef PFNIEMOPRM
150 * Pointer to an opcode decoder function with RM byte.
151 */
152
153/** @def FNIEMOPRM_DEF
154 * Define an opcode decoder function with RM byte.
155 *
156 * We're using macors for this so that adding and removing parameters as well as
157 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
158 *
159 * @param a_Name The function name.
160 */
161
162#if defined(__GNUC__) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
171
172#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
181
182#elif defined(__GNUC__)
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
191
192#else
193typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
194typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
195# define FNIEMOP_DEF(a_Name) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
197# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
198 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
199# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
200 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
201
202#endif
203#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
204
205
206/**
207 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
208 */
209typedef union IEMSELDESC
210{
211 /** The legacy view. */
212 X86DESC Legacy;
213 /** The long mode view. */
214 X86DESC64 Long;
215} IEMSELDESC;
216/** Pointer to a selector descriptor table entry. */
217typedef IEMSELDESC *PIEMSELDESC;
218
219/**
220 * CPU exception classes.
221 */
222typedef enum IEMXCPTCLASS
223{
224 IEMXCPTCLASS_BENIGN,
225 IEMXCPTCLASS_CONTRIBUTORY,
226 IEMXCPTCLASS_PAGE_FAULT,
227 IEMXCPTCLASS_DOUBLE_FAULT
228} IEMXCPTCLASS;
229
230
231/*********************************************************************************************************************************
232* Defined Constants And Macros *
233*********************************************************************************************************************************/
234/** @def IEM_WITH_SETJMP
235 * Enables alternative status code handling using setjmps.
236 *
237 * This adds a bit of expense via the setjmp() call since it saves all the
238 * non-volatile registers. However, it eliminates return code checks and allows
239 * for more optimal return value passing (return regs instead of stack buffer).
240 */
241#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
242# define IEM_WITH_SETJMP
243#endif
244
245/** Temporary hack to disable the double execution. Will be removed in favor
246 * of a dedicated execution mode in EM. */
247//#define IEM_VERIFICATION_MODE_NO_REM
248
249/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
250 * due to GCC lacking knowledge about the value range of a switch. */
251#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
252
253/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
254#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation.
259 */
260#ifdef LOG_ENABLED
261# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
262 do { \
263 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
265 } while (0)
266#else
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
269#endif
270
271/**
272 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
273 * occation using the supplied logger statement.
274 *
275 * @param a_LoggerArgs What to log on failure.
276 */
277#ifdef LOG_ENABLED
278# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
279 do { \
280 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
281 /*LogFunc(a_LoggerArgs);*/ \
282 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
283 } while (0)
284#else
285# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
286 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
287#endif
288
289/**
290 * Call an opcode decoder function.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF.
294 */
295#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
304
305/**
306 * Call a common opcode decoder function taking one extra argument.
307 *
308 * We're using macors for this so that adding and removing parameters can be
309 * done as we please. See FNIEMOP_DEF_1.
310 */
311#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
312
313/**
314 * Check if we're currently executing in real or virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The IEM state of the current CPU.
318 */
319#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in virtual 8086 mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in long mode.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT
391/**
392 * Check the common SVM instruction preconditions.
393 */
394# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
395 do { \
396 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
397 { \
398 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
399 return iemRaiseUndefinedOpcode(pVCpu); \
400 } \
401 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
402 { \
403 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
404 return iemRaiseUndefinedOpcode(pVCpu); \
405 } \
406 if (pVCpu->iem.s.uCpl != 0) \
407 { \
408 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
409 return iemRaiseGeneralProtectionFault0(pVCpu); \
410 } \
411 } while (0)
412
413/**
414 * Check if an SVM is enabled.
415 */
416# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
417
418/**
419 * Check if an SVM control/instruction intercept is set.
420 */
421# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
422
423/**
424 * Check if an SVM read CRx intercept is set.
425 */
426# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
427
428/**
429 * Check if an SVM write CRx intercept is set.
430 */
431# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
432
433/**
434 * Check if an SVM read DRx intercept is set.
435 */
436# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
437
438/**
439 * Check if an SVM write DRx intercept is set.
440 */
441# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
442
443/**
444 * Check if an SVM exception intercept is set.
445 */
446# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
447
448/**
449 * Invokes the SVM \#VMEXIT handler for the nested-guest.
450 */
451# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
452 do \
453 { \
454 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
455 } while (0)
456
457/**
458 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
459 * corresponding decode assist information.
460 */
461# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
462 do \
463 { \
464 uint64_t uExitInfo1; \
465 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
466 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
467 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
468 else \
469 uExitInfo1 = 0; \
470 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
471 } while (0)
472
473#else
474# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
475# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
476# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
477# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
478# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
479# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
480# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
481# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
482# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
483# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
484
485#endif /* VBOX_WITH_NESTED_HWVIRT */
486
487
488/*********************************************************************************************************************************
489* Global Variables *
490*********************************************************************************************************************************/
491extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
492
493
494/** Function table for the ADD instruction. */
495IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
496{
497 iemAImpl_add_u8, iemAImpl_add_u8_locked,
498 iemAImpl_add_u16, iemAImpl_add_u16_locked,
499 iemAImpl_add_u32, iemAImpl_add_u32_locked,
500 iemAImpl_add_u64, iemAImpl_add_u64_locked
501};
502
503/** Function table for the ADC instruction. */
504IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
505{
506 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
507 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
508 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
509 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
510};
511
512/** Function table for the SUB instruction. */
513IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
514{
515 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
516 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
517 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
518 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
519};
520
521/** Function table for the SBB instruction. */
522IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
523{
524 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
525 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
526 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
527 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
528};
529
530/** Function table for the OR instruction. */
531IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
532{
533 iemAImpl_or_u8, iemAImpl_or_u8_locked,
534 iemAImpl_or_u16, iemAImpl_or_u16_locked,
535 iemAImpl_or_u32, iemAImpl_or_u32_locked,
536 iemAImpl_or_u64, iemAImpl_or_u64_locked
537};
538
539/** Function table for the XOR instruction. */
540IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
541{
542 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
543 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
544 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
545 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
546};
547
548/** Function table for the AND instruction. */
549IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
550{
551 iemAImpl_and_u8, iemAImpl_and_u8_locked,
552 iemAImpl_and_u16, iemAImpl_and_u16_locked,
553 iemAImpl_and_u32, iemAImpl_and_u32_locked,
554 iemAImpl_and_u64, iemAImpl_and_u64_locked
555};
556
557/** Function table for the CMP instruction.
558 * @remarks Making operand order ASSUMPTIONS.
559 */
560IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
561{
562 iemAImpl_cmp_u8, NULL,
563 iemAImpl_cmp_u16, NULL,
564 iemAImpl_cmp_u32, NULL,
565 iemAImpl_cmp_u64, NULL
566};
567
568/** Function table for the TEST instruction.
569 * @remarks Making operand order ASSUMPTIONS.
570 */
571IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
572{
573 iemAImpl_test_u8, NULL,
574 iemAImpl_test_u16, NULL,
575 iemAImpl_test_u32, NULL,
576 iemAImpl_test_u64, NULL
577};
578
579/** Function table for the BT instruction. */
580IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
581{
582 NULL, NULL,
583 iemAImpl_bt_u16, NULL,
584 iemAImpl_bt_u32, NULL,
585 iemAImpl_bt_u64, NULL
586};
587
588/** Function table for the BTC instruction. */
589IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
590{
591 NULL, NULL,
592 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
593 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
594 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
595};
596
597/** Function table for the BTR instruction. */
598IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
599{
600 NULL, NULL,
601 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
602 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
603 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
604};
605
606/** Function table for the BTS instruction. */
607IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
608{
609 NULL, NULL,
610 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
611 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
612 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
613};
614
615/** Function table for the BSF instruction. */
616IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
617{
618 NULL, NULL,
619 iemAImpl_bsf_u16, NULL,
620 iemAImpl_bsf_u32, NULL,
621 iemAImpl_bsf_u64, NULL
622};
623
624/** Function table for the BSR instruction. */
625IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
626{
627 NULL, NULL,
628 iemAImpl_bsr_u16, NULL,
629 iemAImpl_bsr_u32, NULL,
630 iemAImpl_bsr_u64, NULL
631};
632
633/** Function table for the IMUL instruction. */
634IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
635{
636 NULL, NULL,
637 iemAImpl_imul_two_u16, NULL,
638 iemAImpl_imul_two_u32, NULL,
639 iemAImpl_imul_two_u64, NULL
640};
641
642/** Group 1 /r lookup table. */
643IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
644{
645 &g_iemAImpl_add,
646 &g_iemAImpl_or,
647 &g_iemAImpl_adc,
648 &g_iemAImpl_sbb,
649 &g_iemAImpl_and,
650 &g_iemAImpl_sub,
651 &g_iemAImpl_xor,
652 &g_iemAImpl_cmp
653};
654
655/** Function table for the INC instruction. */
656IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
657{
658 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
659 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
660 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
661 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
662};
663
664/** Function table for the DEC instruction. */
665IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
666{
667 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
668 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
669 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
670 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
671};
672
673/** Function table for the NEG instruction. */
674IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
675{
676 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
677 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
678 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
679 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
680};
681
682/** Function table for the NOT instruction. */
683IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
684{
685 iemAImpl_not_u8, iemAImpl_not_u8_locked,
686 iemAImpl_not_u16, iemAImpl_not_u16_locked,
687 iemAImpl_not_u32, iemAImpl_not_u32_locked,
688 iemAImpl_not_u64, iemAImpl_not_u64_locked
689};
690
691
692/** Function table for the ROL instruction. */
693IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
694{
695 iemAImpl_rol_u8,
696 iemAImpl_rol_u16,
697 iemAImpl_rol_u32,
698 iemAImpl_rol_u64
699};
700
701/** Function table for the ROR instruction. */
702IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
703{
704 iemAImpl_ror_u8,
705 iemAImpl_ror_u16,
706 iemAImpl_ror_u32,
707 iemAImpl_ror_u64
708};
709
710/** Function table for the RCL instruction. */
711IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
712{
713 iemAImpl_rcl_u8,
714 iemAImpl_rcl_u16,
715 iemAImpl_rcl_u32,
716 iemAImpl_rcl_u64
717};
718
719/** Function table for the RCR instruction. */
720IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
721{
722 iemAImpl_rcr_u8,
723 iemAImpl_rcr_u16,
724 iemAImpl_rcr_u32,
725 iemAImpl_rcr_u64
726};
727
728/** Function table for the SHL instruction. */
729IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
730{
731 iemAImpl_shl_u8,
732 iemAImpl_shl_u16,
733 iemAImpl_shl_u32,
734 iemAImpl_shl_u64
735};
736
737/** Function table for the SHR instruction. */
738IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
739{
740 iemAImpl_shr_u8,
741 iemAImpl_shr_u16,
742 iemAImpl_shr_u32,
743 iemAImpl_shr_u64
744};
745
746/** Function table for the SAR instruction. */
747IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
748{
749 iemAImpl_sar_u8,
750 iemAImpl_sar_u16,
751 iemAImpl_sar_u32,
752 iemAImpl_sar_u64
753};
754
755
756/** Function table for the MUL instruction. */
757IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
758{
759 iemAImpl_mul_u8,
760 iemAImpl_mul_u16,
761 iemAImpl_mul_u32,
762 iemAImpl_mul_u64
763};
764
765/** Function table for the IMUL instruction working implicitly on rAX. */
766IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
767{
768 iemAImpl_imul_u8,
769 iemAImpl_imul_u16,
770 iemAImpl_imul_u32,
771 iemAImpl_imul_u64
772};
773
774/** Function table for the DIV instruction. */
775IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
776{
777 iemAImpl_div_u8,
778 iemAImpl_div_u16,
779 iemAImpl_div_u32,
780 iemAImpl_div_u64
781};
782
783/** Function table for the MUL instruction. */
784IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
785{
786 iemAImpl_idiv_u8,
787 iemAImpl_idiv_u16,
788 iemAImpl_idiv_u32,
789 iemAImpl_idiv_u64
790};
791
792/** Function table for the SHLD instruction */
793IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
794{
795 iemAImpl_shld_u16,
796 iemAImpl_shld_u32,
797 iemAImpl_shld_u64,
798};
799
800/** Function table for the SHRD instruction */
801IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
802{
803 iemAImpl_shrd_u16,
804 iemAImpl_shrd_u32,
805 iemAImpl_shrd_u64,
806};
807
808
809/** Function table for the PUNPCKLBW instruction */
810IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
811/** Function table for the PUNPCKLBD instruction */
812IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
813/** Function table for the PUNPCKLDQ instruction */
814IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
815/** Function table for the PUNPCKLQDQ instruction */
816IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
817
818/** Function table for the PUNPCKHBW instruction */
819IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
820/** Function table for the PUNPCKHBD instruction */
821IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
822/** Function table for the PUNPCKHDQ instruction */
823IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
824/** Function table for the PUNPCKHQDQ instruction */
825IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
826
827/** Function table for the PXOR instruction */
828IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
829/** Function table for the PCMPEQB instruction */
830IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
831/** Function table for the PCMPEQW instruction */
832IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
833/** Function table for the PCMPEQD instruction */
834IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
835
836
837#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
838/** What IEM just wrote. */
839uint8_t g_abIemWrote[256];
840/** How much IEM just wrote. */
841size_t g_cbIemWrote;
842#endif
843
844
845/*********************************************************************************************************************************
846* Internal Functions *
847*********************************************************************************************************************************/
848IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
849IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
850IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
851IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
852/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
853IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
854IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
855IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
856IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
857IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
858IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
859IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
860IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
861IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
862IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
863IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
864IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
865#ifdef IEM_WITH_SETJMP
866DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
868DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
869DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
870DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
871#endif
872
873IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
874IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
875IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
876IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
880IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
881IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
882IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
883IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
884IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
885IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
886IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
887IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
888IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
889
890#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
891IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
892#endif
893IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
894IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
895
896#ifdef VBOX_WITH_NESTED_HWVIRT
897IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
898 uint64_t uExitInfo2);
899IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
900 uint32_t uErr, uint64_t uCr2);
901#endif
902
903/**
904 * Sets the pass up status.
905 *
906 * @returns VINF_SUCCESS.
907 * @param pVCpu The cross context virtual CPU structure of the
908 * calling thread.
909 * @param rcPassUp The pass up status. Must be informational.
910 * VINF_SUCCESS is not allowed.
911 */
912IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
913{
914 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
915
916 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
917 if (rcOldPassUp == VINF_SUCCESS)
918 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
919 /* If both are EM scheduling codes, use EM priority rules. */
920 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
921 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
922 {
923 if (rcPassUp < rcOldPassUp)
924 {
925 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
926 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
927 }
928 else
929 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
930 }
931 /* Override EM scheduling with specific status code. */
932 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
933 {
934 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
935 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
936 }
937 /* Don't override specific status code, first come first served. */
938 else
939 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
940 return VINF_SUCCESS;
941}
942
943
944/**
945 * Calculates the CPU mode.
946 *
947 * This is mainly for updating IEMCPU::enmCpuMode.
948 *
949 * @returns CPU mode.
950 * @param pCtx The register context for the CPU.
951 */
952DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
953{
954 if (CPUMIsGuestIn64BitCodeEx(pCtx))
955 return IEMMODE_64BIT;
956 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
957 return IEMMODE_32BIT;
958 return IEMMODE_16BIT;
959}
960
961
962/**
963 * Initializes the execution state.
964 *
965 * @param pVCpu The cross context virtual CPU structure of the
966 * calling thread.
967 * @param fBypassHandlers Whether to bypass access handlers.
968 *
969 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
970 * side-effects in strict builds.
971 */
972DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
973{
974 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
975
976 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
977
978#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
979 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
981 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
982 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
983 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
984 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
985 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
986 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
987#endif
988
989#ifdef VBOX_WITH_RAW_MODE_NOT_R0
990 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
991#endif
992 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
993 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
994#ifdef VBOX_STRICT
995 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
996 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
997 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
998 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
999 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1000 pVCpu->iem.s.uRexReg = 127;
1001 pVCpu->iem.s.uRexB = 127;
1002 pVCpu->iem.s.uRexIndex = 127;
1003 pVCpu->iem.s.iEffSeg = 127;
1004 pVCpu->iem.s.idxPrefix = 127;
1005 pVCpu->iem.s.uVex3rdReg = 127;
1006 pVCpu->iem.s.uVexLength = 127;
1007 pVCpu->iem.s.fEvexStuff = 127;
1008 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1009# ifdef IEM_WITH_CODE_TLB
1010 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1011 pVCpu->iem.s.pbInstrBuf = NULL;
1012 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1013 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1014 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1015 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1016# else
1017 pVCpu->iem.s.offOpcode = 127;
1018 pVCpu->iem.s.cbOpcode = 127;
1019# endif
1020#endif
1021
1022 pVCpu->iem.s.cActiveMappings = 0;
1023 pVCpu->iem.s.iNextMapping = 0;
1024 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1025 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1026#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1027 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1028 && pCtx->cs.u64Base == 0
1029 && pCtx->cs.u32Limit == UINT32_MAX
1030 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1031 if (!pVCpu->iem.s.fInPatchCode)
1032 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1033#endif
1034
1035#ifdef IEM_VERIFICATION_MODE_FULL
1036 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1037 pVCpu->iem.s.fNoRem = true;
1038#endif
1039}
1040
1041#ifdef VBOX_WITH_NESTED_HWVIRT
1042/**
1043 * Performs a minimal reinitialization of the execution state.
1044 *
1045 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1046 * 'world-switch' types operations on the CPU. Currently only nested
1047 * hardware-virtualization uses it.
1048 *
1049 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1050 */
1051IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1052{
1053 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1054 IEMMODE const enmMode = iemCalcCpuMode(pCtx);
1055 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1056
1057 pVCpu->iem.s.uCpl = uCpl;
1058 pVCpu->iem.s.enmCpuMode = enmMode;
1059 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1060 pVCpu->iem.s.enmEffAddrMode = enmMode;
1061 if (enmMode != IEMMODE_64BIT)
1062 {
1063 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1064 pVCpu->iem.s.enmEffOpSize = enmMode;
1065 }
1066 else
1067 {
1068 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1069 pVCpu->iem.s.enmEffOpSize = enmMode;
1070 }
1071 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1072#ifndef IEM_WITH_CODE_TLB
1073 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1074 pVCpu->iem.s.offOpcode = 0;
1075 pVCpu->iem.s.cbOpcode = 0;
1076#endif
1077}
1078#endif
1079
1080/**
1081 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1082 *
1083 * @param pVCpu The cross context virtual CPU structure of the
1084 * calling thread.
1085 */
1086DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1087{
1088 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1089#ifdef IEM_VERIFICATION_MODE_FULL
1090 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1091#endif
1092#ifdef VBOX_STRICT
1093# ifdef IEM_WITH_CODE_TLB
1094 NOREF(pVCpu);
1095# else
1096 pVCpu->iem.s.cbOpcode = 0;
1097# endif
1098#else
1099 NOREF(pVCpu);
1100#endif
1101}
1102
1103
1104/**
1105 * Initializes the decoder state.
1106 *
1107 * iemReInitDecoder is mostly a copy of this function.
1108 *
1109 * @param pVCpu The cross context virtual CPU structure of the
1110 * calling thread.
1111 * @param fBypassHandlers Whether to bypass access handlers.
1112 */
1113DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1114{
1115 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1116
1117 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1118
1119#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1128#endif
1129
1130#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1131 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1132#endif
1133 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1134#ifdef IEM_VERIFICATION_MODE_FULL
1135 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1136 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1137#endif
1138 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1139 pVCpu->iem.s.enmCpuMode = enmMode;
1140 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1141 pVCpu->iem.s.enmEffAddrMode = enmMode;
1142 if (enmMode != IEMMODE_64BIT)
1143 {
1144 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1145 pVCpu->iem.s.enmEffOpSize = enmMode;
1146 }
1147 else
1148 {
1149 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1150 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1151 }
1152 pVCpu->iem.s.fPrefixes = 0;
1153 pVCpu->iem.s.uRexReg = 0;
1154 pVCpu->iem.s.uRexB = 0;
1155 pVCpu->iem.s.uRexIndex = 0;
1156 pVCpu->iem.s.idxPrefix = 0;
1157 pVCpu->iem.s.uVex3rdReg = 0;
1158 pVCpu->iem.s.uVexLength = 0;
1159 pVCpu->iem.s.fEvexStuff = 0;
1160 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1161#ifdef IEM_WITH_CODE_TLB
1162 pVCpu->iem.s.pbInstrBuf = NULL;
1163 pVCpu->iem.s.offInstrNextByte = 0;
1164 pVCpu->iem.s.offCurInstrStart = 0;
1165# ifdef VBOX_STRICT
1166 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1167 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1168 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1169# endif
1170#else
1171 pVCpu->iem.s.offOpcode = 0;
1172 pVCpu->iem.s.cbOpcode = 0;
1173#endif
1174 pVCpu->iem.s.cActiveMappings = 0;
1175 pVCpu->iem.s.iNextMapping = 0;
1176 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1177 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1178#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1179 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1180 && pCtx->cs.u64Base == 0
1181 && pCtx->cs.u32Limit == UINT32_MAX
1182 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1183 if (!pVCpu->iem.s.fInPatchCode)
1184 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1185#endif
1186
1187#ifdef DBGFTRACE_ENABLED
1188 switch (enmMode)
1189 {
1190 case IEMMODE_64BIT:
1191 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1192 break;
1193 case IEMMODE_32BIT:
1194 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1195 break;
1196 case IEMMODE_16BIT:
1197 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1198 break;
1199 }
1200#endif
1201}
1202
1203
1204/**
1205 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1206 *
1207 * This is mostly a copy of iemInitDecoder.
1208 *
1209 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1210 */
1211DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1212{
1213 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1214
1215 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1216
1217#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1220 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1221 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1226#endif
1227
1228 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1229#ifdef IEM_VERIFICATION_MODE_FULL
1230 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1231 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1232#endif
1233 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1234 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1235 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1236 pVCpu->iem.s.enmEffAddrMode = enmMode;
1237 if (enmMode != IEMMODE_64BIT)
1238 {
1239 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1240 pVCpu->iem.s.enmEffOpSize = enmMode;
1241 }
1242 else
1243 {
1244 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1245 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1246 }
1247 pVCpu->iem.s.fPrefixes = 0;
1248 pVCpu->iem.s.uRexReg = 0;
1249 pVCpu->iem.s.uRexB = 0;
1250 pVCpu->iem.s.uRexIndex = 0;
1251 pVCpu->iem.s.idxPrefix = 0;
1252 pVCpu->iem.s.uVex3rdReg = 0;
1253 pVCpu->iem.s.uVexLength = 0;
1254 pVCpu->iem.s.fEvexStuff = 0;
1255 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1256#ifdef IEM_WITH_CODE_TLB
1257 if (pVCpu->iem.s.pbInstrBuf)
1258 {
1259 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1260 - pVCpu->iem.s.uInstrBufPc;
1261 if (off < pVCpu->iem.s.cbInstrBufTotal)
1262 {
1263 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1264 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1265 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1266 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1267 else
1268 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1269 }
1270 else
1271 {
1272 pVCpu->iem.s.pbInstrBuf = NULL;
1273 pVCpu->iem.s.offInstrNextByte = 0;
1274 pVCpu->iem.s.offCurInstrStart = 0;
1275 pVCpu->iem.s.cbInstrBuf = 0;
1276 pVCpu->iem.s.cbInstrBufTotal = 0;
1277 }
1278 }
1279 else
1280 {
1281 pVCpu->iem.s.offInstrNextByte = 0;
1282 pVCpu->iem.s.offCurInstrStart = 0;
1283 pVCpu->iem.s.cbInstrBuf = 0;
1284 pVCpu->iem.s.cbInstrBufTotal = 0;
1285 }
1286#else
1287 pVCpu->iem.s.cbOpcode = 0;
1288 pVCpu->iem.s.offOpcode = 0;
1289#endif
1290 Assert(pVCpu->iem.s.cActiveMappings == 0);
1291 pVCpu->iem.s.iNextMapping = 0;
1292 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1293 Assert(pVCpu->iem.s.fBypassHandlers == false);
1294#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1295 if (!pVCpu->iem.s.fInPatchCode)
1296 { /* likely */ }
1297 else
1298 {
1299 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1300 && pCtx->cs.u64Base == 0
1301 && pCtx->cs.u32Limit == UINT32_MAX
1302 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1303 if (!pVCpu->iem.s.fInPatchCode)
1304 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1305 }
1306#endif
1307
1308#ifdef DBGFTRACE_ENABLED
1309 switch (enmMode)
1310 {
1311 case IEMMODE_64BIT:
1312 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1313 break;
1314 case IEMMODE_32BIT:
1315 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1316 break;
1317 case IEMMODE_16BIT:
1318 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1319 break;
1320 }
1321#endif
1322}
1323
1324
1325
1326/**
1327 * Prefetch opcodes the first time when starting executing.
1328 *
1329 * @returns Strict VBox status code.
1330 * @param pVCpu The cross context virtual CPU structure of the
1331 * calling thread.
1332 * @param fBypassHandlers Whether to bypass access handlers.
1333 */
1334IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1335{
1336#ifdef IEM_VERIFICATION_MODE_FULL
1337 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1338#endif
1339 iemInitDecoder(pVCpu, fBypassHandlers);
1340
1341#ifdef IEM_WITH_CODE_TLB
1342 /** @todo Do ITLB lookup here. */
1343
1344#else /* !IEM_WITH_CODE_TLB */
1345
1346 /*
1347 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1348 *
1349 * First translate CS:rIP to a physical address.
1350 */
1351 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1352 uint32_t cbToTryRead;
1353 RTGCPTR GCPtrPC;
1354 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1355 {
1356 cbToTryRead = PAGE_SIZE;
1357 GCPtrPC = pCtx->rip;
1358 if (IEM_IS_CANONICAL(GCPtrPC))
1359 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1360 else
1361 return iemRaiseGeneralProtectionFault0(pVCpu);
1362 }
1363 else
1364 {
1365 uint32_t GCPtrPC32 = pCtx->eip;
1366 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1367 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1368 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1369 else
1370 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1371 if (cbToTryRead) { /* likely */ }
1372 else /* overflowed */
1373 {
1374 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1375 cbToTryRead = UINT32_MAX;
1376 }
1377 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1378 Assert(GCPtrPC <= UINT32_MAX);
1379 }
1380
1381# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1382 /* Allow interpretation of patch manager code blocks since they can for
1383 instance throw #PFs for perfectly good reasons. */
1384 if (pVCpu->iem.s.fInPatchCode)
1385 {
1386 size_t cbRead = 0;
1387 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1388 AssertRCReturn(rc, rc);
1389 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1390 return VINF_SUCCESS;
1391 }
1392# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1393
1394 RTGCPHYS GCPhys;
1395 uint64_t fFlags;
1396 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1397 if (RT_SUCCESS(rc)) { /* probable */ }
1398 else
1399 {
1400 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1401 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1402 }
1403 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1404 else
1405 {
1406 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1407 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1408 }
1409 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1410 else
1411 {
1412 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1413 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1414 }
1415 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1416 /** @todo Check reserved bits and such stuff. PGM is better at doing
1417 * that, so do it when implementing the guest virtual address
1418 * TLB... */
1419
1420# ifdef IEM_VERIFICATION_MODE_FULL
1421 /*
1422 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1423 * instruction.
1424 */
1425 /** @todo optimize this differently by not using PGMPhysRead. */
1426 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1427 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1428 if ( offPrevOpcodes < cbOldOpcodes
1429 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1430 {
1431 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1432 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1433 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1434 pVCpu->iem.s.cbOpcode = cbNew;
1435 return VINF_SUCCESS;
1436 }
1437# endif
1438
1439 /*
1440 * Read the bytes at this address.
1441 */
1442 PVM pVM = pVCpu->CTX_SUFF(pVM);
1443# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1444 size_t cbActual;
1445 if ( PATMIsEnabled(pVM)
1446 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1447 {
1448 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1449 Assert(cbActual > 0);
1450 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1451 }
1452 else
1453# endif
1454 {
1455 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1456 if (cbToTryRead > cbLeftOnPage)
1457 cbToTryRead = cbLeftOnPage;
1458 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1459 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1460
1461 if (!pVCpu->iem.s.fBypassHandlers)
1462 {
1463 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1464 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1465 { /* likely */ }
1466 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1467 {
1468 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1469 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1470 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1471 }
1472 else
1473 {
1474 Log((RT_SUCCESS(rcStrict)
1475 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1476 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1477 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1478 return rcStrict;
1479 }
1480 }
1481 else
1482 {
1483 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1484 if (RT_SUCCESS(rc))
1485 { /* likely */ }
1486 else
1487 {
1488 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1489 GCPtrPC, GCPhys, rc, cbToTryRead));
1490 return rc;
1491 }
1492 }
1493 pVCpu->iem.s.cbOpcode = cbToTryRead;
1494 }
1495#endif /* !IEM_WITH_CODE_TLB */
1496 return VINF_SUCCESS;
1497}
1498
1499
1500/**
1501 * Invalidates the IEM TLBs.
1502 *
1503 * This is called internally as well as by PGM when moving GC mappings.
1504 *
1505 * @returns
1506 * @param pVCpu The cross context virtual CPU structure of the calling
1507 * thread.
1508 * @param fVmm Set when PGM calls us with a remapping.
1509 */
1510VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1511{
1512#ifdef IEM_WITH_CODE_TLB
1513 pVCpu->iem.s.cbInstrBufTotal = 0;
1514 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1515 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1516 { /* very likely */ }
1517 else
1518 {
1519 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1520 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1521 while (i-- > 0)
1522 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1523 }
1524#endif
1525
1526#ifdef IEM_WITH_DATA_TLB
1527 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1528 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1529 { /* very likely */ }
1530 else
1531 {
1532 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1533 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1534 while (i-- > 0)
1535 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1536 }
1537#endif
1538 NOREF(pVCpu); NOREF(fVmm);
1539}
1540
1541
1542/**
1543 * Invalidates a page in the TLBs.
1544 *
1545 * @param pVCpu The cross context virtual CPU structure of the calling
1546 * thread.
1547 * @param GCPtr The address of the page to invalidate
1548 */
1549VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1550{
1551#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1552 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1553 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1554 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1555 uintptr_t idx = (uint8_t)GCPtr;
1556
1557# ifdef IEM_WITH_CODE_TLB
1558 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1559 {
1560 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1561 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1562 pVCpu->iem.s.cbInstrBufTotal = 0;
1563 }
1564# endif
1565
1566# ifdef IEM_WITH_DATA_TLB
1567 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1568 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1569# endif
1570#else
1571 NOREF(pVCpu); NOREF(GCPtr);
1572#endif
1573}
1574
1575
1576/**
1577 * Invalidates the host physical aspects of the IEM TLBs.
1578 *
1579 * This is called internally as well as by PGM when moving GC mappings.
1580 *
1581 * @param pVCpu The cross context virtual CPU structure of the calling
1582 * thread.
1583 */
1584VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1585{
1586#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1587 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1588
1589# ifdef IEM_WITH_CODE_TLB
1590 pVCpu->iem.s.cbInstrBufTotal = 0;
1591# endif
1592 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1593 if (uTlbPhysRev != 0)
1594 {
1595 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1596 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1597 }
1598 else
1599 {
1600 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1601 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1602
1603 unsigned i;
1604# ifdef IEM_WITH_CODE_TLB
1605 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1606 while (i-- > 0)
1607 {
1608 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1609 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1610 }
1611# endif
1612# ifdef IEM_WITH_DATA_TLB
1613 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1614 while (i-- > 0)
1615 {
1616 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1617 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1618 }
1619# endif
1620 }
1621#else
1622 NOREF(pVCpu);
1623#endif
1624}
1625
1626
1627/**
1628 * Invalidates the host physical aspects of the IEM TLBs.
1629 *
1630 * This is called internally as well as by PGM when moving GC mappings.
1631 *
1632 * @param pVM The cross context VM structure.
1633 *
1634 * @remarks Caller holds the PGM lock.
1635 */
1636VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1637{
1638 RT_NOREF_PV(pVM);
1639}
1640
1641#ifdef IEM_WITH_CODE_TLB
1642
1643/**
1644 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1645 * failure and jumps.
1646 *
1647 * We end up here for a number of reasons:
1648 * - pbInstrBuf isn't yet initialized.
1649 * - Advancing beyond the buffer boundrary (e.g. cross page).
1650 * - Advancing beyond the CS segment limit.
1651 * - Fetching from non-mappable page (e.g. MMIO).
1652 *
1653 * @param pVCpu The cross context virtual CPU structure of the
1654 * calling thread.
1655 * @param pvDst Where to return the bytes.
1656 * @param cbDst Number of bytes to read.
1657 *
1658 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1659 */
1660IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1661{
1662#ifdef IN_RING3
1663//__debugbreak();
1664 for (;;)
1665 {
1666 Assert(cbDst <= 8);
1667 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1668
1669 /*
1670 * We might have a partial buffer match, deal with that first to make the
1671 * rest simpler. This is the first part of the cross page/buffer case.
1672 */
1673 if (pVCpu->iem.s.pbInstrBuf != NULL)
1674 {
1675 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1676 {
1677 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1678 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1679 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1680
1681 cbDst -= cbCopy;
1682 pvDst = (uint8_t *)pvDst + cbCopy;
1683 offBuf += cbCopy;
1684 pVCpu->iem.s.offInstrNextByte += offBuf;
1685 }
1686 }
1687
1688 /*
1689 * Check segment limit, figuring how much we're allowed to access at this point.
1690 *
1691 * We will fault immediately if RIP is past the segment limit / in non-canonical
1692 * territory. If we do continue, there are one or more bytes to read before we
1693 * end up in trouble and we need to do that first before faulting.
1694 */
1695 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1696 RTGCPTR GCPtrFirst;
1697 uint32_t cbMaxRead;
1698 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1699 {
1700 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1701 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1702 { /* likely */ }
1703 else
1704 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1705 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1706 }
1707 else
1708 {
1709 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1710 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1711 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1712 { /* likely */ }
1713 else
1714 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1715 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1716 if (cbMaxRead != 0)
1717 { /* likely */ }
1718 else
1719 {
1720 /* Overflowed because address is 0 and limit is max. */
1721 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1722 cbMaxRead = X86_PAGE_SIZE;
1723 }
1724 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1725 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1726 if (cbMaxRead2 < cbMaxRead)
1727 cbMaxRead = cbMaxRead2;
1728 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1729 }
1730
1731 /*
1732 * Get the TLB entry for this piece of code.
1733 */
1734 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1735 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1736 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1737 if (pTlbe->uTag == uTag)
1738 {
1739 /* likely when executing lots of code, otherwise unlikely */
1740# ifdef VBOX_WITH_STATISTICS
1741 pVCpu->iem.s.CodeTlb.cTlbHits++;
1742# endif
1743 }
1744 else
1745 {
1746 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1747# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1748 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1749 {
1750 pTlbe->uTag = uTag;
1751 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1752 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1753 pTlbe->GCPhys = NIL_RTGCPHYS;
1754 pTlbe->pbMappingR3 = NULL;
1755 }
1756 else
1757# endif
1758 {
1759 RTGCPHYS GCPhys;
1760 uint64_t fFlags;
1761 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1762 if (RT_FAILURE(rc))
1763 {
1764 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1765 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1766 }
1767
1768 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1769 pTlbe->uTag = uTag;
1770 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1771 pTlbe->GCPhys = GCPhys;
1772 pTlbe->pbMappingR3 = NULL;
1773 }
1774 }
1775
1776 /*
1777 * Check TLB page table level access flags.
1778 */
1779 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1780 {
1781 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1782 {
1783 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1784 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1785 }
1786 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1787 {
1788 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1789 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1790 }
1791 }
1792
1793# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1794 /*
1795 * Allow interpretation of patch manager code blocks since they can for
1796 * instance throw #PFs for perfectly good reasons.
1797 */
1798 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1799 { /* no unlikely */ }
1800 else
1801 {
1802 /** @todo Could be optimized this a little in ring-3 if we liked. */
1803 size_t cbRead = 0;
1804 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1805 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1806 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1807 return;
1808 }
1809# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1810
1811 /*
1812 * Look up the physical page info if necessary.
1813 */
1814 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1815 { /* not necessary */ }
1816 else
1817 {
1818 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1819 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1820 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1821 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1822 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1823 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1824 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1825 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1826 }
1827
1828# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1829 /*
1830 * Try do a direct read using the pbMappingR3 pointer.
1831 */
1832 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1833 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1834 {
1835 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1836 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1837 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1838 {
1839 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1840 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1841 }
1842 else
1843 {
1844 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1845 Assert(cbInstr < cbMaxRead);
1846 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1847 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1848 }
1849 if (cbDst <= cbMaxRead)
1850 {
1851 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1852 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1853 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1854 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1855 return;
1856 }
1857 pVCpu->iem.s.pbInstrBuf = NULL;
1858
1859 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1860 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1861 }
1862 else
1863# endif
1864#if 0
1865 /*
1866 * If there is no special read handling, so we can read a bit more and
1867 * put it in the prefetch buffer.
1868 */
1869 if ( cbDst < cbMaxRead
1870 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1871 {
1872 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1873 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1874 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1875 { /* likely */ }
1876 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1877 {
1878 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1879 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1880 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1881 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1882 }
1883 else
1884 {
1885 Log((RT_SUCCESS(rcStrict)
1886 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1887 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1888 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1889 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1890 }
1891 }
1892 /*
1893 * Special read handling, so only read exactly what's needed.
1894 * This is a highly unlikely scenario.
1895 */
1896 else
1897#endif
1898 {
1899 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1900 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1901 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1902 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1903 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1904 { /* likely */ }
1905 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1906 {
1907 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1908 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1909 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1910 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1911 }
1912 else
1913 {
1914 Log((RT_SUCCESS(rcStrict)
1915 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1916 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1917 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1918 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1919 }
1920 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1921 if (cbToRead == cbDst)
1922 return;
1923 }
1924
1925 /*
1926 * More to read, loop.
1927 */
1928 cbDst -= cbMaxRead;
1929 pvDst = (uint8_t *)pvDst + cbMaxRead;
1930 }
1931#else
1932 RT_NOREF(pvDst, cbDst);
1933 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1934#endif
1935}
1936
1937#else
1938
1939/**
1940 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1941 * exception if it fails.
1942 *
1943 * @returns Strict VBox status code.
1944 * @param pVCpu The cross context virtual CPU structure of the
1945 * calling thread.
1946 * @param cbMin The minimum number of bytes relative offOpcode
1947 * that must be read.
1948 */
1949IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1950{
1951 /*
1952 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1953 *
1954 * First translate CS:rIP to a physical address.
1955 */
1956 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1957 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1958 uint32_t cbToTryRead;
1959 RTGCPTR GCPtrNext;
1960 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1961 {
1962 cbToTryRead = PAGE_SIZE;
1963 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1964 if (!IEM_IS_CANONICAL(GCPtrNext))
1965 return iemRaiseGeneralProtectionFault0(pVCpu);
1966 }
1967 else
1968 {
1969 uint32_t GCPtrNext32 = pCtx->eip;
1970 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1971 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1972 if (GCPtrNext32 > pCtx->cs.u32Limit)
1973 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1974 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1975 if (!cbToTryRead) /* overflowed */
1976 {
1977 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1978 cbToTryRead = UINT32_MAX;
1979 /** @todo check out wrapping around the code segment. */
1980 }
1981 if (cbToTryRead < cbMin - cbLeft)
1982 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1983 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1984 }
1985
1986 /* Only read up to the end of the page, and make sure we don't read more
1987 than the opcode buffer can hold. */
1988 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1989 if (cbToTryRead > cbLeftOnPage)
1990 cbToTryRead = cbLeftOnPage;
1991 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1992 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1993/** @todo r=bird: Convert assertion into undefined opcode exception? */
1994 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1995
1996# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1997 /* Allow interpretation of patch manager code blocks since they can for
1998 instance throw #PFs for perfectly good reasons. */
1999 if (pVCpu->iem.s.fInPatchCode)
2000 {
2001 size_t cbRead = 0;
2002 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2003 AssertRCReturn(rc, rc);
2004 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2005 return VINF_SUCCESS;
2006 }
2007# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2008
2009 RTGCPHYS GCPhys;
2010 uint64_t fFlags;
2011 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2012 if (RT_FAILURE(rc))
2013 {
2014 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2015 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2016 }
2017 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2018 {
2019 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2020 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2021 }
2022 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2023 {
2024 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2025 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2026 }
2027 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2028 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2029 /** @todo Check reserved bits and such stuff. PGM is better at doing
2030 * that, so do it when implementing the guest virtual address
2031 * TLB... */
2032
2033 /*
2034 * Read the bytes at this address.
2035 *
2036 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2037 * and since PATM should only patch the start of an instruction there
2038 * should be no need to check again here.
2039 */
2040 if (!pVCpu->iem.s.fBypassHandlers)
2041 {
2042 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2043 cbToTryRead, PGMACCESSORIGIN_IEM);
2044 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2045 { /* likely */ }
2046 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2047 {
2048 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2049 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2050 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2051 }
2052 else
2053 {
2054 Log((RT_SUCCESS(rcStrict)
2055 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2056 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2057 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2058 return rcStrict;
2059 }
2060 }
2061 else
2062 {
2063 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2064 if (RT_SUCCESS(rc))
2065 { /* likely */ }
2066 else
2067 {
2068 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2069 return rc;
2070 }
2071 }
2072 pVCpu->iem.s.cbOpcode += cbToTryRead;
2073 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2074
2075 return VINF_SUCCESS;
2076}
2077
2078#endif /* !IEM_WITH_CODE_TLB */
2079#ifndef IEM_WITH_SETJMP
2080
2081/**
2082 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2083 *
2084 * @returns Strict VBox status code.
2085 * @param pVCpu The cross context virtual CPU structure of the
2086 * calling thread.
2087 * @param pb Where to return the opcode byte.
2088 */
2089DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2090{
2091 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2092 if (rcStrict == VINF_SUCCESS)
2093 {
2094 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2095 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2096 pVCpu->iem.s.offOpcode = offOpcode + 1;
2097 }
2098 else
2099 *pb = 0;
2100 return rcStrict;
2101}
2102
2103
2104/**
2105 * Fetches the next opcode byte.
2106 *
2107 * @returns Strict VBox status code.
2108 * @param pVCpu The cross context virtual CPU structure of the
2109 * calling thread.
2110 * @param pu8 Where to return the opcode byte.
2111 */
2112DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2113{
2114 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2115 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2116 {
2117 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2118 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2119 return VINF_SUCCESS;
2120 }
2121 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2122}
2123
2124#else /* IEM_WITH_SETJMP */
2125
2126/**
2127 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2128 *
2129 * @returns The opcode byte.
2130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2131 */
2132DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2133{
2134# ifdef IEM_WITH_CODE_TLB
2135 uint8_t u8;
2136 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2137 return u8;
2138# else
2139 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2140 if (rcStrict == VINF_SUCCESS)
2141 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2142 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2143# endif
2144}
2145
2146
2147/**
2148 * Fetches the next opcode byte, longjmp on error.
2149 *
2150 * @returns The opcode byte.
2151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2152 */
2153DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2154{
2155# ifdef IEM_WITH_CODE_TLB
2156 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2157 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2158 if (RT_LIKELY( pbBuf != NULL
2159 && offBuf < pVCpu->iem.s.cbInstrBuf))
2160 {
2161 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2162 return pbBuf[offBuf];
2163 }
2164# else
2165 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2166 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2167 {
2168 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2169 return pVCpu->iem.s.abOpcode[offOpcode];
2170 }
2171# endif
2172 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2173}
2174
2175#endif /* IEM_WITH_SETJMP */
2176
2177/**
2178 * Fetches the next opcode byte, returns automatically on failure.
2179 *
2180 * @param a_pu8 Where to return the opcode byte.
2181 * @remark Implicitly references pVCpu.
2182 */
2183#ifndef IEM_WITH_SETJMP
2184# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2185 do \
2186 { \
2187 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2188 if (rcStrict2 == VINF_SUCCESS) \
2189 { /* likely */ } \
2190 else \
2191 return rcStrict2; \
2192 } while (0)
2193#else
2194# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2195#endif /* IEM_WITH_SETJMP */
2196
2197
2198#ifndef IEM_WITH_SETJMP
2199/**
2200 * Fetches the next signed byte from the opcode stream.
2201 *
2202 * @returns Strict VBox status code.
2203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2204 * @param pi8 Where to return the signed byte.
2205 */
2206DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2207{
2208 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2209}
2210#endif /* !IEM_WITH_SETJMP */
2211
2212
2213/**
2214 * Fetches the next signed byte from the opcode stream, returning automatically
2215 * on failure.
2216 *
2217 * @param a_pi8 Where to return the signed byte.
2218 * @remark Implicitly references pVCpu.
2219 */
2220#ifndef IEM_WITH_SETJMP
2221# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2222 do \
2223 { \
2224 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2225 if (rcStrict2 != VINF_SUCCESS) \
2226 return rcStrict2; \
2227 } while (0)
2228#else /* IEM_WITH_SETJMP */
2229# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2230
2231#endif /* IEM_WITH_SETJMP */
2232
2233#ifndef IEM_WITH_SETJMP
2234
2235/**
2236 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2237 *
2238 * @returns Strict VBox status code.
2239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2240 * @param pu16 Where to return the opcode dword.
2241 */
2242DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2243{
2244 uint8_t u8;
2245 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2246 if (rcStrict == VINF_SUCCESS)
2247 *pu16 = (int8_t)u8;
2248 return rcStrict;
2249}
2250
2251
2252/**
2253 * Fetches the next signed byte from the opcode stream, extending it to
2254 * unsigned 16-bit.
2255 *
2256 * @returns Strict VBox status code.
2257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2258 * @param pu16 Where to return the unsigned word.
2259 */
2260DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2261{
2262 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2263 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2264 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2265
2266 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2267 pVCpu->iem.s.offOpcode = offOpcode + 1;
2268 return VINF_SUCCESS;
2269}
2270
2271#endif /* !IEM_WITH_SETJMP */
2272
2273/**
2274 * Fetches the next signed byte from the opcode stream and sign-extending it to
2275 * a word, returning automatically on failure.
2276 *
2277 * @param a_pu16 Where to return the word.
2278 * @remark Implicitly references pVCpu.
2279 */
2280#ifndef IEM_WITH_SETJMP
2281# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2282 do \
2283 { \
2284 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2285 if (rcStrict2 != VINF_SUCCESS) \
2286 return rcStrict2; \
2287 } while (0)
2288#else
2289# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2290#endif
2291
2292#ifndef IEM_WITH_SETJMP
2293
2294/**
2295 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2296 *
2297 * @returns Strict VBox status code.
2298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2299 * @param pu32 Where to return the opcode dword.
2300 */
2301DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2302{
2303 uint8_t u8;
2304 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2305 if (rcStrict == VINF_SUCCESS)
2306 *pu32 = (int8_t)u8;
2307 return rcStrict;
2308}
2309
2310
2311/**
2312 * Fetches the next signed byte from the opcode stream, extending it to
2313 * unsigned 32-bit.
2314 *
2315 * @returns Strict VBox status code.
2316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2317 * @param pu32 Where to return the unsigned dword.
2318 */
2319DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2320{
2321 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2322 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2323 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2324
2325 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2326 pVCpu->iem.s.offOpcode = offOpcode + 1;
2327 return VINF_SUCCESS;
2328}
2329
2330#endif /* !IEM_WITH_SETJMP */
2331
2332/**
2333 * Fetches the next signed byte from the opcode stream and sign-extending it to
2334 * a word, returning automatically on failure.
2335 *
2336 * @param a_pu32 Where to return the word.
2337 * @remark Implicitly references pVCpu.
2338 */
2339#ifndef IEM_WITH_SETJMP
2340#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2341 do \
2342 { \
2343 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2344 if (rcStrict2 != VINF_SUCCESS) \
2345 return rcStrict2; \
2346 } while (0)
2347#else
2348# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2349#endif
2350
2351#ifndef IEM_WITH_SETJMP
2352
2353/**
2354 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2355 *
2356 * @returns Strict VBox status code.
2357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2358 * @param pu64 Where to return the opcode qword.
2359 */
2360DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2361{
2362 uint8_t u8;
2363 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2364 if (rcStrict == VINF_SUCCESS)
2365 *pu64 = (int8_t)u8;
2366 return rcStrict;
2367}
2368
2369
2370/**
2371 * Fetches the next signed byte from the opcode stream, extending it to
2372 * unsigned 64-bit.
2373 *
2374 * @returns Strict VBox status code.
2375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2376 * @param pu64 Where to return the unsigned qword.
2377 */
2378DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2379{
2380 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2381 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2382 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2383
2384 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2385 pVCpu->iem.s.offOpcode = offOpcode + 1;
2386 return VINF_SUCCESS;
2387}
2388
2389#endif /* !IEM_WITH_SETJMP */
2390
2391
2392/**
2393 * Fetches the next signed byte from the opcode stream and sign-extending it to
2394 * a word, returning automatically on failure.
2395 *
2396 * @param a_pu64 Where to return the word.
2397 * @remark Implicitly references pVCpu.
2398 */
2399#ifndef IEM_WITH_SETJMP
2400# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2401 do \
2402 { \
2403 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2404 if (rcStrict2 != VINF_SUCCESS) \
2405 return rcStrict2; \
2406 } while (0)
2407#else
2408# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2409#endif
2410
2411
2412#ifndef IEM_WITH_SETJMP
2413
2414/**
2415 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2416 *
2417 * @returns Strict VBox status code.
2418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2419 * @param pu16 Where to return the opcode word.
2420 */
2421DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2422{
2423 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2424 if (rcStrict == VINF_SUCCESS)
2425 {
2426 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2427# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2428 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2429# else
2430 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2431# endif
2432 pVCpu->iem.s.offOpcode = offOpcode + 2;
2433 }
2434 else
2435 *pu16 = 0;
2436 return rcStrict;
2437}
2438
2439
2440/**
2441 * Fetches the next opcode word.
2442 *
2443 * @returns Strict VBox status code.
2444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2445 * @param pu16 Where to return the opcode word.
2446 */
2447DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2448{
2449 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2450 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2451 {
2452 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2453# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2454 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2455# else
2456 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2457# endif
2458 return VINF_SUCCESS;
2459 }
2460 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2461}
2462
2463#else /* IEM_WITH_SETJMP */
2464
2465/**
2466 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2467 *
2468 * @returns The opcode word.
2469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2470 */
2471DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2472{
2473# ifdef IEM_WITH_CODE_TLB
2474 uint16_t u16;
2475 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2476 return u16;
2477# else
2478 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2479 if (rcStrict == VINF_SUCCESS)
2480 {
2481 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2482 pVCpu->iem.s.offOpcode += 2;
2483# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2484 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2485# else
2486 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2487# endif
2488 }
2489 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2490# endif
2491}
2492
2493
2494/**
2495 * Fetches the next opcode word, longjmp on error.
2496 *
2497 * @returns The opcode word.
2498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2499 */
2500DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2501{
2502# ifdef IEM_WITH_CODE_TLB
2503 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2504 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2505 if (RT_LIKELY( pbBuf != NULL
2506 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2507 {
2508 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2509# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2510 return *(uint16_t const *)&pbBuf[offBuf];
2511# else
2512 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2513# endif
2514 }
2515# else
2516 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2517 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2518 {
2519 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2520# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2521 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2522# else
2523 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2524# endif
2525 }
2526# endif
2527 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2528}
2529
2530#endif /* IEM_WITH_SETJMP */
2531
2532
2533/**
2534 * Fetches the next opcode word, returns automatically on failure.
2535 *
2536 * @param a_pu16 Where to return the opcode word.
2537 * @remark Implicitly references pVCpu.
2538 */
2539#ifndef IEM_WITH_SETJMP
2540# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2541 do \
2542 { \
2543 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2544 if (rcStrict2 != VINF_SUCCESS) \
2545 return rcStrict2; \
2546 } while (0)
2547#else
2548# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2549#endif
2550
2551#ifndef IEM_WITH_SETJMP
2552
2553/**
2554 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2555 *
2556 * @returns Strict VBox status code.
2557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2558 * @param pu32 Where to return the opcode double word.
2559 */
2560DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2561{
2562 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2563 if (rcStrict == VINF_SUCCESS)
2564 {
2565 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2566 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2567 pVCpu->iem.s.offOpcode = offOpcode + 2;
2568 }
2569 else
2570 *pu32 = 0;
2571 return rcStrict;
2572}
2573
2574
2575/**
2576 * Fetches the next opcode word, zero extending it to a double word.
2577 *
2578 * @returns Strict VBox status code.
2579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2580 * @param pu32 Where to return the opcode double word.
2581 */
2582DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2583{
2584 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2585 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2586 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2587
2588 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2589 pVCpu->iem.s.offOpcode = offOpcode + 2;
2590 return VINF_SUCCESS;
2591}
2592
2593#endif /* !IEM_WITH_SETJMP */
2594
2595
2596/**
2597 * Fetches the next opcode word and zero extends it to a double word, returns
2598 * automatically on failure.
2599 *
2600 * @param a_pu32 Where to return the opcode double word.
2601 * @remark Implicitly references pVCpu.
2602 */
2603#ifndef IEM_WITH_SETJMP
2604# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2605 do \
2606 { \
2607 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2608 if (rcStrict2 != VINF_SUCCESS) \
2609 return rcStrict2; \
2610 } while (0)
2611#else
2612# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2613#endif
2614
2615#ifndef IEM_WITH_SETJMP
2616
2617/**
2618 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2619 *
2620 * @returns Strict VBox status code.
2621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2622 * @param pu64 Where to return the opcode quad word.
2623 */
2624DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2625{
2626 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2627 if (rcStrict == VINF_SUCCESS)
2628 {
2629 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2630 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2631 pVCpu->iem.s.offOpcode = offOpcode + 2;
2632 }
2633 else
2634 *pu64 = 0;
2635 return rcStrict;
2636}
2637
2638
2639/**
2640 * Fetches the next opcode word, zero extending it to a quad word.
2641 *
2642 * @returns Strict VBox status code.
2643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2644 * @param pu64 Where to return the opcode quad word.
2645 */
2646DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2647{
2648 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2649 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2650 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2651
2652 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2653 pVCpu->iem.s.offOpcode = offOpcode + 2;
2654 return VINF_SUCCESS;
2655}
2656
2657#endif /* !IEM_WITH_SETJMP */
2658
2659/**
2660 * Fetches the next opcode word and zero extends it to a quad word, returns
2661 * automatically on failure.
2662 *
2663 * @param a_pu64 Where to return the opcode quad word.
2664 * @remark Implicitly references pVCpu.
2665 */
2666#ifndef IEM_WITH_SETJMP
2667# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2668 do \
2669 { \
2670 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2671 if (rcStrict2 != VINF_SUCCESS) \
2672 return rcStrict2; \
2673 } while (0)
2674#else
2675# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2676#endif
2677
2678
2679#ifndef IEM_WITH_SETJMP
2680/**
2681 * Fetches the next signed word from the opcode stream.
2682 *
2683 * @returns Strict VBox status code.
2684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2685 * @param pi16 Where to return the signed word.
2686 */
2687DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2688{
2689 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2690}
2691#endif /* !IEM_WITH_SETJMP */
2692
2693
2694/**
2695 * Fetches the next signed word from the opcode stream, returning automatically
2696 * on failure.
2697 *
2698 * @param a_pi16 Where to return the signed word.
2699 * @remark Implicitly references pVCpu.
2700 */
2701#ifndef IEM_WITH_SETJMP
2702# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2703 do \
2704 { \
2705 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2706 if (rcStrict2 != VINF_SUCCESS) \
2707 return rcStrict2; \
2708 } while (0)
2709#else
2710# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2711#endif
2712
2713#ifndef IEM_WITH_SETJMP
2714
2715/**
2716 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2717 *
2718 * @returns Strict VBox status code.
2719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2720 * @param pu32 Where to return the opcode dword.
2721 */
2722DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2723{
2724 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2725 if (rcStrict == VINF_SUCCESS)
2726 {
2727 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2728# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2729 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2730# else
2731 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2732 pVCpu->iem.s.abOpcode[offOpcode + 1],
2733 pVCpu->iem.s.abOpcode[offOpcode + 2],
2734 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2735# endif
2736 pVCpu->iem.s.offOpcode = offOpcode + 4;
2737 }
2738 else
2739 *pu32 = 0;
2740 return rcStrict;
2741}
2742
2743
2744/**
2745 * Fetches the next opcode dword.
2746 *
2747 * @returns Strict VBox status code.
2748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2749 * @param pu32 Where to return the opcode double word.
2750 */
2751DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2752{
2753 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2754 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2755 {
2756 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2757# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2758 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2759# else
2760 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2761 pVCpu->iem.s.abOpcode[offOpcode + 1],
2762 pVCpu->iem.s.abOpcode[offOpcode + 2],
2763 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2764# endif
2765 return VINF_SUCCESS;
2766 }
2767 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2768}
2769
2770#else /* !IEM_WITH_SETJMP */
2771
2772/**
2773 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2774 *
2775 * @returns The opcode dword.
2776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2777 */
2778DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2779{
2780# ifdef IEM_WITH_CODE_TLB
2781 uint32_t u32;
2782 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2783 return u32;
2784# else
2785 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2786 if (rcStrict == VINF_SUCCESS)
2787 {
2788 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2789 pVCpu->iem.s.offOpcode = offOpcode + 4;
2790# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2791 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2792# else
2793 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2794 pVCpu->iem.s.abOpcode[offOpcode + 1],
2795 pVCpu->iem.s.abOpcode[offOpcode + 2],
2796 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2797# endif
2798 }
2799 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2800# endif
2801}
2802
2803
2804/**
2805 * Fetches the next opcode dword, longjmp on error.
2806 *
2807 * @returns The opcode dword.
2808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2809 */
2810DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2811{
2812# ifdef IEM_WITH_CODE_TLB
2813 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2814 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2815 if (RT_LIKELY( pbBuf != NULL
2816 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2817 {
2818 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2819# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2820 return *(uint32_t const *)&pbBuf[offBuf];
2821# else
2822 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2823 pbBuf[offBuf + 1],
2824 pbBuf[offBuf + 2],
2825 pbBuf[offBuf + 3]);
2826# endif
2827 }
2828# else
2829 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2830 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2831 {
2832 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2833# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2834 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2835# else
2836 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2837 pVCpu->iem.s.abOpcode[offOpcode + 1],
2838 pVCpu->iem.s.abOpcode[offOpcode + 2],
2839 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2840# endif
2841 }
2842# endif
2843 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2844}
2845
2846#endif /* !IEM_WITH_SETJMP */
2847
2848
2849/**
2850 * Fetches the next opcode dword, returns automatically on failure.
2851 *
2852 * @param a_pu32 Where to return the opcode dword.
2853 * @remark Implicitly references pVCpu.
2854 */
2855#ifndef IEM_WITH_SETJMP
2856# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2857 do \
2858 { \
2859 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2860 if (rcStrict2 != VINF_SUCCESS) \
2861 return rcStrict2; \
2862 } while (0)
2863#else
2864# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2865#endif
2866
2867#ifndef IEM_WITH_SETJMP
2868
2869/**
2870 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2871 *
2872 * @returns Strict VBox status code.
2873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2874 * @param pu64 Where to return the opcode dword.
2875 */
2876DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2877{
2878 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2879 if (rcStrict == VINF_SUCCESS)
2880 {
2881 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2882 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2883 pVCpu->iem.s.abOpcode[offOpcode + 1],
2884 pVCpu->iem.s.abOpcode[offOpcode + 2],
2885 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2886 pVCpu->iem.s.offOpcode = offOpcode + 4;
2887 }
2888 else
2889 *pu64 = 0;
2890 return rcStrict;
2891}
2892
2893
2894/**
2895 * Fetches the next opcode dword, zero extending it to a quad word.
2896 *
2897 * @returns Strict VBox status code.
2898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2899 * @param pu64 Where to return the opcode quad word.
2900 */
2901DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2902{
2903 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2904 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2905 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2906
2907 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2908 pVCpu->iem.s.abOpcode[offOpcode + 1],
2909 pVCpu->iem.s.abOpcode[offOpcode + 2],
2910 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2911 pVCpu->iem.s.offOpcode = offOpcode + 4;
2912 return VINF_SUCCESS;
2913}
2914
2915#endif /* !IEM_WITH_SETJMP */
2916
2917
2918/**
2919 * Fetches the next opcode dword and zero extends it to a quad word, returns
2920 * automatically on failure.
2921 *
2922 * @param a_pu64 Where to return the opcode quad word.
2923 * @remark Implicitly references pVCpu.
2924 */
2925#ifndef IEM_WITH_SETJMP
2926# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2927 do \
2928 { \
2929 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2930 if (rcStrict2 != VINF_SUCCESS) \
2931 return rcStrict2; \
2932 } while (0)
2933#else
2934# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2935#endif
2936
2937
2938#ifndef IEM_WITH_SETJMP
2939/**
2940 * Fetches the next signed double word from the opcode stream.
2941 *
2942 * @returns Strict VBox status code.
2943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2944 * @param pi32 Where to return the signed double word.
2945 */
2946DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2947{
2948 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2949}
2950#endif
2951
2952/**
2953 * Fetches the next signed double word from the opcode stream, returning
2954 * automatically on failure.
2955 *
2956 * @param a_pi32 Where to return the signed double word.
2957 * @remark Implicitly references pVCpu.
2958 */
2959#ifndef IEM_WITH_SETJMP
2960# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2961 do \
2962 { \
2963 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2964 if (rcStrict2 != VINF_SUCCESS) \
2965 return rcStrict2; \
2966 } while (0)
2967#else
2968# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2969#endif
2970
2971#ifndef IEM_WITH_SETJMP
2972
2973/**
2974 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2975 *
2976 * @returns Strict VBox status code.
2977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2978 * @param pu64 Where to return the opcode qword.
2979 */
2980DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2981{
2982 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2983 if (rcStrict == VINF_SUCCESS)
2984 {
2985 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2986 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2987 pVCpu->iem.s.abOpcode[offOpcode + 1],
2988 pVCpu->iem.s.abOpcode[offOpcode + 2],
2989 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2990 pVCpu->iem.s.offOpcode = offOpcode + 4;
2991 }
2992 else
2993 *pu64 = 0;
2994 return rcStrict;
2995}
2996
2997
2998/**
2999 * Fetches the next opcode dword, sign extending it into a quad word.
3000 *
3001 * @returns Strict VBox status code.
3002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3003 * @param pu64 Where to return the opcode quad word.
3004 */
3005DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3006{
3007 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3008 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3009 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3010
3011 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3012 pVCpu->iem.s.abOpcode[offOpcode + 1],
3013 pVCpu->iem.s.abOpcode[offOpcode + 2],
3014 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3015 *pu64 = i32;
3016 pVCpu->iem.s.offOpcode = offOpcode + 4;
3017 return VINF_SUCCESS;
3018}
3019
3020#endif /* !IEM_WITH_SETJMP */
3021
3022
3023/**
3024 * Fetches the next opcode double word and sign extends it to a quad word,
3025 * returns automatically on failure.
3026 *
3027 * @param a_pu64 Where to return the opcode quad word.
3028 * @remark Implicitly references pVCpu.
3029 */
3030#ifndef IEM_WITH_SETJMP
3031# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3032 do \
3033 { \
3034 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3035 if (rcStrict2 != VINF_SUCCESS) \
3036 return rcStrict2; \
3037 } while (0)
3038#else
3039# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3040#endif
3041
3042#ifndef IEM_WITH_SETJMP
3043
3044/**
3045 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3046 *
3047 * @returns Strict VBox status code.
3048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3049 * @param pu64 Where to return the opcode qword.
3050 */
3051DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3052{
3053 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3054 if (rcStrict == VINF_SUCCESS)
3055 {
3056 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3057# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3058 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3059# else
3060 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3061 pVCpu->iem.s.abOpcode[offOpcode + 1],
3062 pVCpu->iem.s.abOpcode[offOpcode + 2],
3063 pVCpu->iem.s.abOpcode[offOpcode + 3],
3064 pVCpu->iem.s.abOpcode[offOpcode + 4],
3065 pVCpu->iem.s.abOpcode[offOpcode + 5],
3066 pVCpu->iem.s.abOpcode[offOpcode + 6],
3067 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3068# endif
3069 pVCpu->iem.s.offOpcode = offOpcode + 8;
3070 }
3071 else
3072 *pu64 = 0;
3073 return rcStrict;
3074}
3075
3076
3077/**
3078 * Fetches the next opcode qword.
3079 *
3080 * @returns Strict VBox status code.
3081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3082 * @param pu64 Where to return the opcode qword.
3083 */
3084DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3085{
3086 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3087 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3088 {
3089# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3090 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3091# else
3092 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3093 pVCpu->iem.s.abOpcode[offOpcode + 1],
3094 pVCpu->iem.s.abOpcode[offOpcode + 2],
3095 pVCpu->iem.s.abOpcode[offOpcode + 3],
3096 pVCpu->iem.s.abOpcode[offOpcode + 4],
3097 pVCpu->iem.s.abOpcode[offOpcode + 5],
3098 pVCpu->iem.s.abOpcode[offOpcode + 6],
3099 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3100# endif
3101 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3102 return VINF_SUCCESS;
3103 }
3104 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3105}
3106
3107#else /* IEM_WITH_SETJMP */
3108
3109/**
3110 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3111 *
3112 * @returns The opcode qword.
3113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3114 */
3115DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3116{
3117# ifdef IEM_WITH_CODE_TLB
3118 uint64_t u64;
3119 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3120 return u64;
3121# else
3122 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3123 if (rcStrict == VINF_SUCCESS)
3124 {
3125 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3126 pVCpu->iem.s.offOpcode = offOpcode + 8;
3127# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3128 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3129# else
3130 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3131 pVCpu->iem.s.abOpcode[offOpcode + 1],
3132 pVCpu->iem.s.abOpcode[offOpcode + 2],
3133 pVCpu->iem.s.abOpcode[offOpcode + 3],
3134 pVCpu->iem.s.abOpcode[offOpcode + 4],
3135 pVCpu->iem.s.abOpcode[offOpcode + 5],
3136 pVCpu->iem.s.abOpcode[offOpcode + 6],
3137 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3138# endif
3139 }
3140 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3141# endif
3142}
3143
3144
3145/**
3146 * Fetches the next opcode qword, longjmp on error.
3147 *
3148 * @returns The opcode qword.
3149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3150 */
3151DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3152{
3153# ifdef IEM_WITH_CODE_TLB
3154 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3155 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3156 if (RT_LIKELY( pbBuf != NULL
3157 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3158 {
3159 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3160# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3161 return *(uint64_t const *)&pbBuf[offBuf];
3162# else
3163 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3164 pbBuf[offBuf + 1],
3165 pbBuf[offBuf + 2],
3166 pbBuf[offBuf + 3],
3167 pbBuf[offBuf + 4],
3168 pbBuf[offBuf + 5],
3169 pbBuf[offBuf + 6],
3170 pbBuf[offBuf + 7]);
3171# endif
3172 }
3173# else
3174 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3175 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3176 {
3177 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3178# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3179 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3180# else
3181 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3182 pVCpu->iem.s.abOpcode[offOpcode + 1],
3183 pVCpu->iem.s.abOpcode[offOpcode + 2],
3184 pVCpu->iem.s.abOpcode[offOpcode + 3],
3185 pVCpu->iem.s.abOpcode[offOpcode + 4],
3186 pVCpu->iem.s.abOpcode[offOpcode + 5],
3187 pVCpu->iem.s.abOpcode[offOpcode + 6],
3188 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3189# endif
3190 }
3191# endif
3192 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3193}
3194
3195#endif /* IEM_WITH_SETJMP */
3196
3197/**
3198 * Fetches the next opcode quad word, returns automatically on failure.
3199 *
3200 * @param a_pu64 Where to return the opcode quad word.
3201 * @remark Implicitly references pVCpu.
3202 */
3203#ifndef IEM_WITH_SETJMP
3204# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3205 do \
3206 { \
3207 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3208 if (rcStrict2 != VINF_SUCCESS) \
3209 return rcStrict2; \
3210 } while (0)
3211#else
3212# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3213#endif
3214
3215
3216/** @name Misc Worker Functions.
3217 * @{
3218 */
3219
3220/**
3221 * Gets the exception class for the specified exception vector.
3222 *
3223 * @returns The class of the specified exception.
3224 * @param uVector The exception vector.
3225 */
3226IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3227{
3228 Assert(uVector <= X86_XCPT_LAST);
3229 switch (uVector)
3230 {
3231 case X86_XCPT_DE:
3232 case X86_XCPT_TS:
3233 case X86_XCPT_NP:
3234 case X86_XCPT_SS:
3235 case X86_XCPT_GP:
3236 case X86_XCPT_SX: /* AMD only */
3237 return IEMXCPTCLASS_CONTRIBUTORY;
3238
3239 case X86_XCPT_PF:
3240 case X86_XCPT_VE: /* Intel only */
3241 return IEMXCPTCLASS_PAGE_FAULT;
3242
3243 case X86_XCPT_DF:
3244 return IEMXCPTCLASS_DOUBLE_FAULT;
3245 }
3246 return IEMXCPTCLASS_BENIGN;
3247}
3248
3249
3250/**
3251 * Evaluates how to handle an exception caused during delivery of another event
3252 * (exception / interrupt).
3253 *
3254 * @returns How to handle the recursive exception.
3255 * @param pVCpu The cross context virtual CPU structure of the
3256 * calling thread.
3257 * @param fPrevFlags The flags of the previous event.
3258 * @param uPrevVector The vector of the previous event.
3259 * @param fCurFlags The flags of the current exception.
3260 * @param uCurVector The vector of the current exception.
3261 * @param pfXcptRaiseInfo Where to store additional information about the
3262 * exception condition. Optional.
3263 */
3264VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3265 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3266{
3267 /*
3268 * Only CPU exceptions can be raised while delivering other events, software interrupt
3269 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3270 */
3271 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3272 Assert(pVCpu); RT_NOREF(pVCpu);
3273 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3274
3275 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3276 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3277 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3278 {
3279 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3280 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3281 {
3282 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3283 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3284 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3285 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3286 {
3287 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3288 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3289 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3290 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3291 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3292 }
3293 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3294 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3295 {
3296 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3297 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3298 }
3299 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3300 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3301 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3302 {
3303 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3304 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3305 }
3306 }
3307 else
3308 {
3309 if (uPrevVector == X86_XCPT_NMI)
3310 {
3311 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3312 if (uCurVector == X86_XCPT_PF)
3313 {
3314 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3315 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3316 }
3317 }
3318 else if ( uPrevVector == X86_XCPT_AC
3319 && uCurVector == X86_XCPT_AC)
3320 {
3321 enmRaise = IEMXCPTRAISE_CPU_HANG;
3322 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3323 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3324 }
3325 }
3326 }
3327 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3328 {
3329 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3330 if (uCurVector == X86_XCPT_PF)
3331 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3332 }
3333 else
3334 {
3335 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3336 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3337 }
3338
3339 if (pfXcptRaiseInfo)
3340 *pfXcptRaiseInfo = fRaiseInfo;
3341 return enmRaise;
3342}
3343
3344
3345/**
3346 * Enters the CPU shutdown state initiated by a triple fault or other
3347 * unrecoverable conditions.
3348 *
3349 * @returns Strict VBox status code.
3350 * @param pVCpu The cross context virtual CPU structure of the
3351 * calling thread.
3352 */
3353IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3354{
3355 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3356 {
3357 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3358 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3359 }
3360
3361 RT_NOREF(pVCpu);
3362 return VINF_EM_TRIPLE_FAULT;
3363}
3364
3365
3366/**
3367 * Validates a new SS segment.
3368 *
3369 * @returns VBox strict status code.
3370 * @param pVCpu The cross context virtual CPU structure of the
3371 * calling thread.
3372 * @param pCtx The CPU context.
3373 * @param NewSS The new SS selctor.
3374 * @param uCpl The CPL to load the stack for.
3375 * @param pDesc Where to return the descriptor.
3376 */
3377IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3378{
3379 NOREF(pCtx);
3380
3381 /* Null selectors are not allowed (we're not called for dispatching
3382 interrupts with SS=0 in long mode). */
3383 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3384 {
3385 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3386 return iemRaiseTaskSwitchFault0(pVCpu);
3387 }
3388
3389 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3390 if ((NewSS & X86_SEL_RPL) != uCpl)
3391 {
3392 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3393 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3394 }
3395
3396 /*
3397 * Read the descriptor.
3398 */
3399 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3400 if (rcStrict != VINF_SUCCESS)
3401 return rcStrict;
3402
3403 /*
3404 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3405 */
3406 if (!pDesc->Legacy.Gen.u1DescType)
3407 {
3408 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3409 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3410 }
3411
3412 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3413 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3414 {
3415 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3416 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3417 }
3418 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3419 {
3420 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3421 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3422 }
3423
3424 /* Is it there? */
3425 /** @todo testcase: Is this checked before the canonical / limit check below? */
3426 if (!pDesc->Legacy.Gen.u1Present)
3427 {
3428 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3429 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3430 }
3431
3432 return VINF_SUCCESS;
3433}
3434
3435
3436/**
3437 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3438 * not.
3439 *
3440 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3441 * @param a_pCtx The CPU context.
3442 */
3443#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3444# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3445 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3446 ? (a_pCtx)->eflags.u \
3447 : CPUMRawGetEFlags(a_pVCpu) )
3448#else
3449# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3450 ( (a_pCtx)->eflags.u )
3451#endif
3452
3453/**
3454 * Updates the EFLAGS in the correct manner wrt. PATM.
3455 *
3456 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3457 * @param a_pCtx The CPU context.
3458 * @param a_fEfl The new EFLAGS.
3459 */
3460#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3461# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3462 do { \
3463 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3464 (a_pCtx)->eflags.u = (a_fEfl); \
3465 else \
3466 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3467 } while (0)
3468#else
3469# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3470 do { \
3471 (a_pCtx)->eflags.u = (a_fEfl); \
3472 } while (0)
3473#endif
3474
3475
3476/** @} */
3477
3478/** @name Raising Exceptions.
3479 *
3480 * @{
3481 */
3482
3483
3484/**
3485 * Loads the specified stack far pointer from the TSS.
3486 *
3487 * @returns VBox strict status code.
3488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3489 * @param pCtx The CPU context.
3490 * @param uCpl The CPL to load the stack for.
3491 * @param pSelSS Where to return the new stack segment.
3492 * @param puEsp Where to return the new stack pointer.
3493 */
3494IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3495 PRTSEL pSelSS, uint32_t *puEsp)
3496{
3497 VBOXSTRICTRC rcStrict;
3498 Assert(uCpl < 4);
3499
3500 switch (pCtx->tr.Attr.n.u4Type)
3501 {
3502 /*
3503 * 16-bit TSS (X86TSS16).
3504 */
3505 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3506 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3507 {
3508 uint32_t off = uCpl * 4 + 2;
3509 if (off + 4 <= pCtx->tr.u32Limit)
3510 {
3511 /** @todo check actual access pattern here. */
3512 uint32_t u32Tmp = 0; /* gcc maybe... */
3513 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3514 if (rcStrict == VINF_SUCCESS)
3515 {
3516 *puEsp = RT_LOWORD(u32Tmp);
3517 *pSelSS = RT_HIWORD(u32Tmp);
3518 return VINF_SUCCESS;
3519 }
3520 }
3521 else
3522 {
3523 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3524 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3525 }
3526 break;
3527 }
3528
3529 /*
3530 * 32-bit TSS (X86TSS32).
3531 */
3532 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3533 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3534 {
3535 uint32_t off = uCpl * 8 + 4;
3536 if (off + 7 <= pCtx->tr.u32Limit)
3537 {
3538/** @todo check actual access pattern here. */
3539 uint64_t u64Tmp;
3540 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3541 if (rcStrict == VINF_SUCCESS)
3542 {
3543 *puEsp = u64Tmp & UINT32_MAX;
3544 *pSelSS = (RTSEL)(u64Tmp >> 32);
3545 return VINF_SUCCESS;
3546 }
3547 }
3548 else
3549 {
3550 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3551 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3552 }
3553 break;
3554 }
3555
3556 default:
3557 AssertFailed();
3558 rcStrict = VERR_IEM_IPE_4;
3559 break;
3560 }
3561
3562 *puEsp = 0; /* make gcc happy */
3563 *pSelSS = 0; /* make gcc happy */
3564 return rcStrict;
3565}
3566
3567
3568/**
3569 * Loads the specified stack pointer from the 64-bit TSS.
3570 *
3571 * @returns VBox strict status code.
3572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3573 * @param pCtx The CPU context.
3574 * @param uCpl The CPL to load the stack for.
3575 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3576 * @param puRsp Where to return the new stack pointer.
3577 */
3578IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3579{
3580 Assert(uCpl < 4);
3581 Assert(uIst < 8);
3582 *puRsp = 0; /* make gcc happy */
3583
3584 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3585
3586 uint32_t off;
3587 if (uIst)
3588 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3589 else
3590 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3591 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3592 {
3593 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3594 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3595 }
3596
3597 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3598}
3599
3600
3601/**
3602 * Adjust the CPU state according to the exception being raised.
3603 *
3604 * @param pCtx The CPU context.
3605 * @param u8Vector The exception that has been raised.
3606 */
3607DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3608{
3609 switch (u8Vector)
3610 {
3611 case X86_XCPT_DB:
3612 pCtx->dr[7] &= ~X86_DR7_GD;
3613 break;
3614 /** @todo Read the AMD and Intel exception reference... */
3615 }
3616}
3617
3618
3619/**
3620 * Implements exceptions and interrupts for real mode.
3621 *
3622 * @returns VBox strict status code.
3623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3624 * @param pCtx The CPU context.
3625 * @param cbInstr The number of bytes to offset rIP by in the return
3626 * address.
3627 * @param u8Vector The interrupt / exception vector number.
3628 * @param fFlags The flags.
3629 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3630 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3631 */
3632IEM_STATIC VBOXSTRICTRC
3633iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3634 PCPUMCTX pCtx,
3635 uint8_t cbInstr,
3636 uint8_t u8Vector,
3637 uint32_t fFlags,
3638 uint16_t uErr,
3639 uint64_t uCr2)
3640{
3641 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3642 NOREF(uErr); NOREF(uCr2);
3643
3644 /*
3645 * Read the IDT entry.
3646 */
3647 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3648 {
3649 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3650 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3651 }
3652 RTFAR16 Idte;
3653 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3654 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3655 return rcStrict;
3656
3657 /*
3658 * Push the stack frame.
3659 */
3660 uint16_t *pu16Frame;
3661 uint64_t uNewRsp;
3662 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3663 if (rcStrict != VINF_SUCCESS)
3664 return rcStrict;
3665
3666 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3667#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3668 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3669 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3670 fEfl |= UINT16_C(0xf000);
3671#endif
3672 pu16Frame[2] = (uint16_t)fEfl;
3673 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3674 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3675 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3676 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3677 return rcStrict;
3678
3679 /*
3680 * Load the vector address into cs:ip and make exception specific state
3681 * adjustments.
3682 */
3683 pCtx->cs.Sel = Idte.sel;
3684 pCtx->cs.ValidSel = Idte.sel;
3685 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3686 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3687 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3688 pCtx->rip = Idte.off;
3689 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3690 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3691
3692 /** @todo do we actually do this in real mode? */
3693 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3694 iemRaiseXcptAdjustState(pCtx, u8Vector);
3695
3696 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3697}
3698
3699
3700/**
3701 * Loads a NULL data selector into when coming from V8086 mode.
3702 *
3703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3704 * @param pSReg Pointer to the segment register.
3705 */
3706IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3707{
3708 pSReg->Sel = 0;
3709 pSReg->ValidSel = 0;
3710 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3711 {
3712 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3713 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3714 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3715 }
3716 else
3717 {
3718 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3719 /** @todo check this on AMD-V */
3720 pSReg->u64Base = 0;
3721 pSReg->u32Limit = 0;
3722 }
3723}
3724
3725
3726/**
3727 * Loads a segment selector during a task switch in V8086 mode.
3728 *
3729 * @param pSReg Pointer to the segment register.
3730 * @param uSel The selector value to load.
3731 */
3732IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3733{
3734 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3735 pSReg->Sel = uSel;
3736 pSReg->ValidSel = uSel;
3737 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3738 pSReg->u64Base = uSel << 4;
3739 pSReg->u32Limit = 0xffff;
3740 pSReg->Attr.u = 0xf3;
3741}
3742
3743
3744/**
3745 * Loads a NULL data selector into a selector register, both the hidden and
3746 * visible parts, in protected mode.
3747 *
3748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3749 * @param pSReg Pointer to the segment register.
3750 * @param uRpl The RPL.
3751 */
3752IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3753{
3754 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3755 * data selector in protected mode. */
3756 pSReg->Sel = uRpl;
3757 pSReg->ValidSel = uRpl;
3758 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3759 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3760 {
3761 /* VT-x (Intel 3960x) observed doing something like this. */
3762 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3763 pSReg->u32Limit = UINT32_MAX;
3764 pSReg->u64Base = 0;
3765 }
3766 else
3767 {
3768 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3769 pSReg->u32Limit = 0;
3770 pSReg->u64Base = 0;
3771 }
3772}
3773
3774
3775/**
3776 * Loads a segment selector during a task switch in protected mode.
3777 *
3778 * In this task switch scenario, we would throw \#TS exceptions rather than
3779 * \#GPs.
3780 *
3781 * @returns VBox strict status code.
3782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3783 * @param pSReg Pointer to the segment register.
3784 * @param uSel The new selector value.
3785 *
3786 * @remarks This does _not_ handle CS or SS.
3787 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3788 */
3789IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3790{
3791 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3792
3793 /* Null data selector. */
3794 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3795 {
3796 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3797 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3798 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3799 return VINF_SUCCESS;
3800 }
3801
3802 /* Fetch the descriptor. */
3803 IEMSELDESC Desc;
3804 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3805 if (rcStrict != VINF_SUCCESS)
3806 {
3807 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3808 VBOXSTRICTRC_VAL(rcStrict)));
3809 return rcStrict;
3810 }
3811
3812 /* Must be a data segment or readable code segment. */
3813 if ( !Desc.Legacy.Gen.u1DescType
3814 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3815 {
3816 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3817 Desc.Legacy.Gen.u4Type));
3818 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3819 }
3820
3821 /* Check privileges for data segments and non-conforming code segments. */
3822 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3823 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3824 {
3825 /* The RPL and the new CPL must be less than or equal to the DPL. */
3826 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3827 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3828 {
3829 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3830 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3831 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3832 }
3833 }
3834
3835 /* Is it there? */
3836 if (!Desc.Legacy.Gen.u1Present)
3837 {
3838 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3839 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3840 }
3841
3842 /* The base and limit. */
3843 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3844 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3845
3846 /*
3847 * Ok, everything checked out fine. Now set the accessed bit before
3848 * committing the result into the registers.
3849 */
3850 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3851 {
3852 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3853 if (rcStrict != VINF_SUCCESS)
3854 return rcStrict;
3855 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3856 }
3857
3858 /* Commit */
3859 pSReg->Sel = uSel;
3860 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3861 pSReg->u32Limit = cbLimit;
3862 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3863 pSReg->ValidSel = uSel;
3864 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3865 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3866 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3867
3868 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3869 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3870 return VINF_SUCCESS;
3871}
3872
3873
3874/**
3875 * Performs a task switch.
3876 *
3877 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3878 * caller is responsible for performing the necessary checks (like DPL, TSS
3879 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3880 * reference for JMP, CALL, IRET.
3881 *
3882 * If the task switch is the due to a software interrupt or hardware exception,
3883 * the caller is responsible for validating the TSS selector and descriptor. See
3884 * Intel Instruction reference for INT n.
3885 *
3886 * @returns VBox strict status code.
3887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3888 * @param pCtx The CPU context.
3889 * @param enmTaskSwitch What caused this task switch.
3890 * @param uNextEip The EIP effective after the task switch.
3891 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3892 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3893 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3894 * @param SelTSS The TSS selector of the new task.
3895 * @param pNewDescTSS Pointer to the new TSS descriptor.
3896 */
3897IEM_STATIC VBOXSTRICTRC
3898iemTaskSwitch(PVMCPU pVCpu,
3899 PCPUMCTX pCtx,
3900 IEMTASKSWITCH enmTaskSwitch,
3901 uint32_t uNextEip,
3902 uint32_t fFlags,
3903 uint16_t uErr,
3904 uint64_t uCr2,
3905 RTSEL SelTSS,
3906 PIEMSELDESC pNewDescTSS)
3907{
3908 Assert(!IEM_IS_REAL_MODE(pVCpu));
3909 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3910
3911 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3912 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3913 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3914 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3915 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3916
3917 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3918 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3919
3920 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3921 fIsNewTSS386, pCtx->eip, uNextEip));
3922
3923 /* Update CR2 in case it's a page-fault. */
3924 /** @todo This should probably be done much earlier in IEM/PGM. See
3925 * @bugref{5653#c49}. */
3926 if (fFlags & IEM_XCPT_FLAGS_CR2)
3927 pCtx->cr2 = uCr2;
3928
3929 /*
3930 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3931 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3932 */
3933 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3934 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3935 if (uNewTSSLimit < uNewTSSLimitMin)
3936 {
3937 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3938 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3939 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3940 }
3941
3942 /*
3943 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3944 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3945 */
3946 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3947 {
3948 uint32_t const uExitInfo1 = SelTSS;
3949 uint32_t uExitInfo2 = uErr;
3950 switch (enmTaskSwitch)
3951 {
3952 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3953 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3954 default: break;
3955 }
3956 if (fFlags & IEM_XCPT_FLAGS_ERR)
3957 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3958 if (pCtx->eflags.Bits.u1RF)
3959 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3960
3961 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3962 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3963 RT_NOREF2(uExitInfo1, uExitInfo2);
3964 }
3965 /** @todo Nested-VMX task-switch intercept. */
3966
3967 /*
3968 * Check the current TSS limit. The last written byte to the current TSS during the
3969 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3970 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3971 *
3972 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3973 * end up with smaller than "legal" TSS limits.
3974 */
3975 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3976 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3977 if (uCurTSSLimit < uCurTSSLimitMin)
3978 {
3979 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3980 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3981 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3982 }
3983
3984 /*
3985 * Verify that the new TSS can be accessed and map it. Map only the required contents
3986 * and not the entire TSS.
3987 */
3988 void *pvNewTSS;
3989 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3990 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3991 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3992 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3993 * not perform correct translation if this happens. See Intel spec. 7.2.1
3994 * "Task-State Segment" */
3995 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3996 if (rcStrict != VINF_SUCCESS)
3997 {
3998 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3999 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4000 return rcStrict;
4001 }
4002
4003 /*
4004 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4005 */
4006 uint32_t u32EFlags = pCtx->eflags.u32;
4007 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4008 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4009 {
4010 PX86DESC pDescCurTSS;
4011 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4012 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4013 if (rcStrict != VINF_SUCCESS)
4014 {
4015 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4016 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4017 return rcStrict;
4018 }
4019
4020 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4021 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4022 if (rcStrict != VINF_SUCCESS)
4023 {
4024 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4025 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4026 return rcStrict;
4027 }
4028
4029 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4030 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4031 {
4032 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4033 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4034 u32EFlags &= ~X86_EFL_NT;
4035 }
4036 }
4037
4038 /*
4039 * Save the CPU state into the current TSS.
4040 */
4041 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4042 if (GCPtrNewTSS == GCPtrCurTSS)
4043 {
4044 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4045 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4046 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4047 }
4048 if (fIsNewTSS386)
4049 {
4050 /*
4051 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4052 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4053 */
4054 void *pvCurTSS32;
4055 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4056 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4057 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4058 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4059 if (rcStrict != VINF_SUCCESS)
4060 {
4061 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4062 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4063 return rcStrict;
4064 }
4065
4066 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4067 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4068 pCurTSS32->eip = uNextEip;
4069 pCurTSS32->eflags = u32EFlags;
4070 pCurTSS32->eax = pCtx->eax;
4071 pCurTSS32->ecx = pCtx->ecx;
4072 pCurTSS32->edx = pCtx->edx;
4073 pCurTSS32->ebx = pCtx->ebx;
4074 pCurTSS32->esp = pCtx->esp;
4075 pCurTSS32->ebp = pCtx->ebp;
4076 pCurTSS32->esi = pCtx->esi;
4077 pCurTSS32->edi = pCtx->edi;
4078 pCurTSS32->es = pCtx->es.Sel;
4079 pCurTSS32->cs = pCtx->cs.Sel;
4080 pCurTSS32->ss = pCtx->ss.Sel;
4081 pCurTSS32->ds = pCtx->ds.Sel;
4082 pCurTSS32->fs = pCtx->fs.Sel;
4083 pCurTSS32->gs = pCtx->gs.Sel;
4084
4085 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4086 if (rcStrict != VINF_SUCCESS)
4087 {
4088 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4089 VBOXSTRICTRC_VAL(rcStrict)));
4090 return rcStrict;
4091 }
4092 }
4093 else
4094 {
4095 /*
4096 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4097 */
4098 void *pvCurTSS16;
4099 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4100 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4101 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4102 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4103 if (rcStrict != VINF_SUCCESS)
4104 {
4105 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4106 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4107 return rcStrict;
4108 }
4109
4110 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4111 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4112 pCurTSS16->ip = uNextEip;
4113 pCurTSS16->flags = u32EFlags;
4114 pCurTSS16->ax = pCtx->ax;
4115 pCurTSS16->cx = pCtx->cx;
4116 pCurTSS16->dx = pCtx->dx;
4117 pCurTSS16->bx = pCtx->bx;
4118 pCurTSS16->sp = pCtx->sp;
4119 pCurTSS16->bp = pCtx->bp;
4120 pCurTSS16->si = pCtx->si;
4121 pCurTSS16->di = pCtx->di;
4122 pCurTSS16->es = pCtx->es.Sel;
4123 pCurTSS16->cs = pCtx->cs.Sel;
4124 pCurTSS16->ss = pCtx->ss.Sel;
4125 pCurTSS16->ds = pCtx->ds.Sel;
4126
4127 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4128 if (rcStrict != VINF_SUCCESS)
4129 {
4130 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4131 VBOXSTRICTRC_VAL(rcStrict)));
4132 return rcStrict;
4133 }
4134 }
4135
4136 /*
4137 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4138 */
4139 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4140 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4141 {
4142 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4143 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4144 pNewTSS->selPrev = pCtx->tr.Sel;
4145 }
4146
4147 /*
4148 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4149 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4150 */
4151 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4152 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4153 bool fNewDebugTrap;
4154 if (fIsNewTSS386)
4155 {
4156 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4157 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4158 uNewEip = pNewTSS32->eip;
4159 uNewEflags = pNewTSS32->eflags;
4160 uNewEax = pNewTSS32->eax;
4161 uNewEcx = pNewTSS32->ecx;
4162 uNewEdx = pNewTSS32->edx;
4163 uNewEbx = pNewTSS32->ebx;
4164 uNewEsp = pNewTSS32->esp;
4165 uNewEbp = pNewTSS32->ebp;
4166 uNewEsi = pNewTSS32->esi;
4167 uNewEdi = pNewTSS32->edi;
4168 uNewES = pNewTSS32->es;
4169 uNewCS = pNewTSS32->cs;
4170 uNewSS = pNewTSS32->ss;
4171 uNewDS = pNewTSS32->ds;
4172 uNewFS = pNewTSS32->fs;
4173 uNewGS = pNewTSS32->gs;
4174 uNewLdt = pNewTSS32->selLdt;
4175 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4176 }
4177 else
4178 {
4179 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4180 uNewCr3 = 0;
4181 uNewEip = pNewTSS16->ip;
4182 uNewEflags = pNewTSS16->flags;
4183 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4184 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4185 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4186 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4187 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4188 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4189 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4190 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4191 uNewES = pNewTSS16->es;
4192 uNewCS = pNewTSS16->cs;
4193 uNewSS = pNewTSS16->ss;
4194 uNewDS = pNewTSS16->ds;
4195 uNewFS = 0;
4196 uNewGS = 0;
4197 uNewLdt = pNewTSS16->selLdt;
4198 fNewDebugTrap = false;
4199 }
4200
4201 if (GCPtrNewTSS == GCPtrCurTSS)
4202 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4203 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4204
4205 /*
4206 * We're done accessing the new TSS.
4207 */
4208 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4209 if (rcStrict != VINF_SUCCESS)
4210 {
4211 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4212 return rcStrict;
4213 }
4214
4215 /*
4216 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4217 */
4218 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4219 {
4220 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4221 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4222 if (rcStrict != VINF_SUCCESS)
4223 {
4224 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4225 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4226 return rcStrict;
4227 }
4228
4229 /* Check that the descriptor indicates the new TSS is available (not busy). */
4230 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4231 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4232 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4233
4234 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4235 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4236 if (rcStrict != VINF_SUCCESS)
4237 {
4238 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4239 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4240 return rcStrict;
4241 }
4242 }
4243
4244 /*
4245 * From this point on, we're technically in the new task. We will defer exceptions
4246 * until the completion of the task switch but before executing any instructions in the new task.
4247 */
4248 pCtx->tr.Sel = SelTSS;
4249 pCtx->tr.ValidSel = SelTSS;
4250 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4251 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4252 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4253 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4254 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4255
4256 /* Set the busy bit in TR. */
4257 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4258 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4259 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4260 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4261 {
4262 uNewEflags |= X86_EFL_NT;
4263 }
4264
4265 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4266 pCtx->cr0 |= X86_CR0_TS;
4267 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4268
4269 pCtx->eip = uNewEip;
4270 pCtx->eax = uNewEax;
4271 pCtx->ecx = uNewEcx;
4272 pCtx->edx = uNewEdx;
4273 pCtx->ebx = uNewEbx;
4274 pCtx->esp = uNewEsp;
4275 pCtx->ebp = uNewEbp;
4276 pCtx->esi = uNewEsi;
4277 pCtx->edi = uNewEdi;
4278
4279 uNewEflags &= X86_EFL_LIVE_MASK;
4280 uNewEflags |= X86_EFL_RA1_MASK;
4281 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4282
4283 /*
4284 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4285 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4286 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4287 */
4288 pCtx->es.Sel = uNewES;
4289 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4290
4291 pCtx->cs.Sel = uNewCS;
4292 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4293
4294 pCtx->ss.Sel = uNewSS;
4295 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4296
4297 pCtx->ds.Sel = uNewDS;
4298 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4299
4300 pCtx->fs.Sel = uNewFS;
4301 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4302
4303 pCtx->gs.Sel = uNewGS;
4304 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4305 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4306
4307 pCtx->ldtr.Sel = uNewLdt;
4308 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4309 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4310 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4311
4312 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4313 {
4314 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4315 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4316 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4317 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4318 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4319 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4320 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4321 }
4322
4323 /*
4324 * Switch CR3 for the new task.
4325 */
4326 if ( fIsNewTSS386
4327 && (pCtx->cr0 & X86_CR0_PG))
4328 {
4329 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4330 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4331 {
4332 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4333 AssertRCSuccessReturn(rc, rc);
4334 }
4335 else
4336 pCtx->cr3 = uNewCr3;
4337
4338 /* Inform PGM. */
4339 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4340 {
4341 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4342 AssertRCReturn(rc, rc);
4343 /* ignore informational status codes */
4344 }
4345 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4346 }
4347
4348 /*
4349 * Switch LDTR for the new task.
4350 */
4351 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4352 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4353 else
4354 {
4355 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4356
4357 IEMSELDESC DescNewLdt;
4358 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4359 if (rcStrict != VINF_SUCCESS)
4360 {
4361 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4362 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4363 return rcStrict;
4364 }
4365 if ( !DescNewLdt.Legacy.Gen.u1Present
4366 || DescNewLdt.Legacy.Gen.u1DescType
4367 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4368 {
4369 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4370 uNewLdt, DescNewLdt.Legacy.u));
4371 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4372 }
4373
4374 pCtx->ldtr.ValidSel = uNewLdt;
4375 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4376 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4377 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4378 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4379 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4380 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4381 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4382 }
4383
4384 IEMSELDESC DescSS;
4385 if (IEM_IS_V86_MODE(pVCpu))
4386 {
4387 pVCpu->iem.s.uCpl = 3;
4388 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4389 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4390 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4391 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4392 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4393 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4394
4395 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4396 DescSS.Legacy.u = 0;
4397 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4398 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4399 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4400 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4401 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4402 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4403 DescSS.Legacy.Gen.u2Dpl = 3;
4404 }
4405 else
4406 {
4407 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4408
4409 /*
4410 * Load the stack segment for the new task.
4411 */
4412 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4413 {
4414 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4415 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4416 }
4417
4418 /* Fetch the descriptor. */
4419 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4420 if (rcStrict != VINF_SUCCESS)
4421 {
4422 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4423 VBOXSTRICTRC_VAL(rcStrict)));
4424 return rcStrict;
4425 }
4426
4427 /* SS must be a data segment and writable. */
4428 if ( !DescSS.Legacy.Gen.u1DescType
4429 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4430 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4431 {
4432 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4433 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4434 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4435 }
4436
4437 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4438 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4439 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4440 {
4441 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4442 uNewCpl));
4443 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4444 }
4445
4446 /* Is it there? */
4447 if (!DescSS.Legacy.Gen.u1Present)
4448 {
4449 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4450 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4451 }
4452
4453 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4454 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4455
4456 /* Set the accessed bit before committing the result into SS. */
4457 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4458 {
4459 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4460 if (rcStrict != VINF_SUCCESS)
4461 return rcStrict;
4462 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4463 }
4464
4465 /* Commit SS. */
4466 pCtx->ss.Sel = uNewSS;
4467 pCtx->ss.ValidSel = uNewSS;
4468 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4469 pCtx->ss.u32Limit = cbLimit;
4470 pCtx->ss.u64Base = u64Base;
4471 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4472 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4473
4474 /* CPL has changed, update IEM before loading rest of segments. */
4475 pVCpu->iem.s.uCpl = uNewCpl;
4476
4477 /*
4478 * Load the data segments for the new task.
4479 */
4480 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4481 if (rcStrict != VINF_SUCCESS)
4482 return rcStrict;
4483 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4484 if (rcStrict != VINF_SUCCESS)
4485 return rcStrict;
4486 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4487 if (rcStrict != VINF_SUCCESS)
4488 return rcStrict;
4489 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4490 if (rcStrict != VINF_SUCCESS)
4491 return rcStrict;
4492
4493 /*
4494 * Load the code segment for the new task.
4495 */
4496 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4497 {
4498 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4499 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4500 }
4501
4502 /* Fetch the descriptor. */
4503 IEMSELDESC DescCS;
4504 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4505 if (rcStrict != VINF_SUCCESS)
4506 {
4507 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4508 return rcStrict;
4509 }
4510
4511 /* CS must be a code segment. */
4512 if ( !DescCS.Legacy.Gen.u1DescType
4513 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4514 {
4515 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4516 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4517 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4518 }
4519
4520 /* For conforming CS, DPL must be less than or equal to the RPL. */
4521 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4522 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4523 {
4524 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4525 DescCS.Legacy.Gen.u2Dpl));
4526 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4527 }
4528
4529 /* For non-conforming CS, DPL must match RPL. */
4530 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4531 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4532 {
4533 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4534 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4535 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4536 }
4537
4538 /* Is it there? */
4539 if (!DescCS.Legacy.Gen.u1Present)
4540 {
4541 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4542 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4543 }
4544
4545 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4546 u64Base = X86DESC_BASE(&DescCS.Legacy);
4547
4548 /* Set the accessed bit before committing the result into CS. */
4549 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4550 {
4551 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4552 if (rcStrict != VINF_SUCCESS)
4553 return rcStrict;
4554 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4555 }
4556
4557 /* Commit CS. */
4558 pCtx->cs.Sel = uNewCS;
4559 pCtx->cs.ValidSel = uNewCS;
4560 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4561 pCtx->cs.u32Limit = cbLimit;
4562 pCtx->cs.u64Base = u64Base;
4563 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4564 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4565 }
4566
4567 /** @todo Debug trap. */
4568 if (fIsNewTSS386 && fNewDebugTrap)
4569 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4570
4571 /*
4572 * Construct the error code masks based on what caused this task switch.
4573 * See Intel Instruction reference for INT.
4574 */
4575 uint16_t uExt;
4576 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4577 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4578 {
4579 uExt = 1;
4580 }
4581 else
4582 uExt = 0;
4583
4584 /*
4585 * Push any error code on to the new stack.
4586 */
4587 if (fFlags & IEM_XCPT_FLAGS_ERR)
4588 {
4589 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4590 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4591 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4592
4593 /* Check that there is sufficient space on the stack. */
4594 /** @todo Factor out segment limit checking for normal/expand down segments
4595 * into a separate function. */
4596 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4597 {
4598 if ( pCtx->esp - 1 > cbLimitSS
4599 || pCtx->esp < cbStackFrame)
4600 {
4601 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4602 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4603 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4604 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4605 }
4606 }
4607 else
4608 {
4609 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4610 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4611 {
4612 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4613 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4614 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4615 }
4616 }
4617
4618
4619 if (fIsNewTSS386)
4620 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4621 else
4622 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4623 if (rcStrict != VINF_SUCCESS)
4624 {
4625 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4626 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4627 return rcStrict;
4628 }
4629 }
4630
4631 /* Check the new EIP against the new CS limit. */
4632 if (pCtx->eip > pCtx->cs.u32Limit)
4633 {
4634 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4635 pCtx->eip, pCtx->cs.u32Limit));
4636 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4637 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4638 }
4639
4640 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4641 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4642}
4643
4644
4645/**
4646 * Implements exceptions and interrupts for protected mode.
4647 *
4648 * @returns VBox strict status code.
4649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4650 * @param pCtx The CPU context.
4651 * @param cbInstr The number of bytes to offset rIP by in the return
4652 * address.
4653 * @param u8Vector The interrupt / exception vector number.
4654 * @param fFlags The flags.
4655 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4656 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4657 */
4658IEM_STATIC VBOXSTRICTRC
4659iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4660 PCPUMCTX pCtx,
4661 uint8_t cbInstr,
4662 uint8_t u8Vector,
4663 uint32_t fFlags,
4664 uint16_t uErr,
4665 uint64_t uCr2)
4666{
4667 /*
4668 * Read the IDT entry.
4669 */
4670 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4671 {
4672 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4673 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4674 }
4675 X86DESC Idte;
4676 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4677 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4678 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4679 return rcStrict;
4680 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4681 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4682 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4683
4684 /*
4685 * Check the descriptor type, DPL and such.
4686 * ASSUMES this is done in the same order as described for call-gate calls.
4687 */
4688 if (Idte.Gate.u1DescType)
4689 {
4690 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4691 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4692 }
4693 bool fTaskGate = false;
4694 uint8_t f32BitGate = true;
4695 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4696 switch (Idte.Gate.u4Type)
4697 {
4698 case X86_SEL_TYPE_SYS_UNDEFINED:
4699 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4700 case X86_SEL_TYPE_SYS_LDT:
4701 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4702 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4703 case X86_SEL_TYPE_SYS_UNDEFINED2:
4704 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4705 case X86_SEL_TYPE_SYS_UNDEFINED3:
4706 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4707 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4708 case X86_SEL_TYPE_SYS_UNDEFINED4:
4709 {
4710 /** @todo check what actually happens when the type is wrong...
4711 * esp. call gates. */
4712 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4713 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4714 }
4715
4716 case X86_SEL_TYPE_SYS_286_INT_GATE:
4717 f32BitGate = false;
4718 RT_FALL_THRU();
4719 case X86_SEL_TYPE_SYS_386_INT_GATE:
4720 fEflToClear |= X86_EFL_IF;
4721 break;
4722
4723 case X86_SEL_TYPE_SYS_TASK_GATE:
4724 fTaskGate = true;
4725#ifndef IEM_IMPLEMENTS_TASKSWITCH
4726 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4727#endif
4728 break;
4729
4730 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4731 f32BitGate = false;
4732 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4733 break;
4734
4735 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4736 }
4737
4738 /* Check DPL against CPL if applicable. */
4739 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4740 {
4741 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4742 {
4743 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4744 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4745 }
4746 }
4747
4748 /* Is it there? */
4749 if (!Idte.Gate.u1Present)
4750 {
4751 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4752 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4753 }
4754
4755 /* Is it a task-gate? */
4756 if (fTaskGate)
4757 {
4758 /*
4759 * Construct the error code masks based on what caused this task switch.
4760 * See Intel Instruction reference for INT.
4761 */
4762 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4763 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4764 RTSEL SelTSS = Idte.Gate.u16Sel;
4765
4766 /*
4767 * Fetch the TSS descriptor in the GDT.
4768 */
4769 IEMSELDESC DescTSS;
4770 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4771 if (rcStrict != VINF_SUCCESS)
4772 {
4773 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4774 VBOXSTRICTRC_VAL(rcStrict)));
4775 return rcStrict;
4776 }
4777
4778 /* The TSS descriptor must be a system segment and be available (not busy). */
4779 if ( DescTSS.Legacy.Gen.u1DescType
4780 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4781 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4782 {
4783 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4784 u8Vector, SelTSS, DescTSS.Legacy.au64));
4785 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4786 }
4787
4788 /* The TSS must be present. */
4789 if (!DescTSS.Legacy.Gen.u1Present)
4790 {
4791 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4792 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4793 }
4794
4795 /* Do the actual task switch. */
4796 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4797 }
4798
4799 /* A null CS is bad. */
4800 RTSEL NewCS = Idte.Gate.u16Sel;
4801 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4802 {
4803 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4804 return iemRaiseGeneralProtectionFault0(pVCpu);
4805 }
4806
4807 /* Fetch the descriptor for the new CS. */
4808 IEMSELDESC DescCS;
4809 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4810 if (rcStrict != VINF_SUCCESS)
4811 {
4812 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4813 return rcStrict;
4814 }
4815
4816 /* Must be a code segment. */
4817 if (!DescCS.Legacy.Gen.u1DescType)
4818 {
4819 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4820 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4821 }
4822 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4823 {
4824 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4825 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4826 }
4827
4828 /* Don't allow lowering the privilege level. */
4829 /** @todo Does the lowering of privileges apply to software interrupts
4830 * only? This has bearings on the more-privileged or
4831 * same-privilege stack behavior further down. A testcase would
4832 * be nice. */
4833 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4834 {
4835 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4836 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4837 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4838 }
4839
4840 /* Make sure the selector is present. */
4841 if (!DescCS.Legacy.Gen.u1Present)
4842 {
4843 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4844 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4845 }
4846
4847 /* Check the new EIP against the new CS limit. */
4848 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4849 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4850 ? Idte.Gate.u16OffsetLow
4851 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4852 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4853 if (uNewEip > cbLimitCS)
4854 {
4855 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4856 u8Vector, uNewEip, cbLimitCS, NewCS));
4857 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4858 }
4859 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4860
4861 /* Calc the flag image to push. */
4862 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4863 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4864 fEfl &= ~X86_EFL_RF;
4865 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4866 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4867
4868 /* From V8086 mode only go to CPL 0. */
4869 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4870 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4871 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4872 {
4873 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4874 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4875 }
4876
4877 /*
4878 * If the privilege level changes, we need to get a new stack from the TSS.
4879 * This in turns means validating the new SS and ESP...
4880 */
4881 if (uNewCpl != pVCpu->iem.s.uCpl)
4882 {
4883 RTSEL NewSS;
4884 uint32_t uNewEsp;
4885 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4886 if (rcStrict != VINF_SUCCESS)
4887 return rcStrict;
4888
4889 IEMSELDESC DescSS;
4890 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4891 if (rcStrict != VINF_SUCCESS)
4892 return rcStrict;
4893 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4894 if (!DescSS.Legacy.Gen.u1DefBig)
4895 {
4896 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4897 uNewEsp = (uint16_t)uNewEsp;
4898 }
4899
4900 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4901
4902 /* Check that there is sufficient space for the stack frame. */
4903 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4904 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4905 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4906 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4907
4908 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4909 {
4910 if ( uNewEsp - 1 > cbLimitSS
4911 || uNewEsp < cbStackFrame)
4912 {
4913 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4914 u8Vector, NewSS, uNewEsp, cbStackFrame));
4915 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4916 }
4917 }
4918 else
4919 {
4920 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4921 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4922 {
4923 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4924 u8Vector, NewSS, uNewEsp, cbStackFrame));
4925 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4926 }
4927 }
4928
4929 /*
4930 * Start making changes.
4931 */
4932
4933 /* Set the new CPL so that stack accesses use it. */
4934 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4935 pVCpu->iem.s.uCpl = uNewCpl;
4936
4937 /* Create the stack frame. */
4938 RTPTRUNION uStackFrame;
4939 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4940 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4941 if (rcStrict != VINF_SUCCESS)
4942 return rcStrict;
4943 void * const pvStackFrame = uStackFrame.pv;
4944 if (f32BitGate)
4945 {
4946 if (fFlags & IEM_XCPT_FLAGS_ERR)
4947 *uStackFrame.pu32++ = uErr;
4948 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4949 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4950 uStackFrame.pu32[2] = fEfl;
4951 uStackFrame.pu32[3] = pCtx->esp;
4952 uStackFrame.pu32[4] = pCtx->ss.Sel;
4953 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4954 if (fEfl & X86_EFL_VM)
4955 {
4956 uStackFrame.pu32[1] = pCtx->cs.Sel;
4957 uStackFrame.pu32[5] = pCtx->es.Sel;
4958 uStackFrame.pu32[6] = pCtx->ds.Sel;
4959 uStackFrame.pu32[7] = pCtx->fs.Sel;
4960 uStackFrame.pu32[8] = pCtx->gs.Sel;
4961 }
4962 }
4963 else
4964 {
4965 if (fFlags & IEM_XCPT_FLAGS_ERR)
4966 *uStackFrame.pu16++ = uErr;
4967 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4968 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4969 uStackFrame.pu16[2] = fEfl;
4970 uStackFrame.pu16[3] = pCtx->sp;
4971 uStackFrame.pu16[4] = pCtx->ss.Sel;
4972 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4973 if (fEfl & X86_EFL_VM)
4974 {
4975 uStackFrame.pu16[1] = pCtx->cs.Sel;
4976 uStackFrame.pu16[5] = pCtx->es.Sel;
4977 uStackFrame.pu16[6] = pCtx->ds.Sel;
4978 uStackFrame.pu16[7] = pCtx->fs.Sel;
4979 uStackFrame.pu16[8] = pCtx->gs.Sel;
4980 }
4981 }
4982 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4983 if (rcStrict != VINF_SUCCESS)
4984 return rcStrict;
4985
4986 /* Mark the selectors 'accessed' (hope this is the correct time). */
4987 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4988 * after pushing the stack frame? (Write protect the gdt + stack to
4989 * find out.) */
4990 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4991 {
4992 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4993 if (rcStrict != VINF_SUCCESS)
4994 return rcStrict;
4995 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4996 }
4997
4998 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4999 {
5000 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5001 if (rcStrict != VINF_SUCCESS)
5002 return rcStrict;
5003 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5004 }
5005
5006 /*
5007 * Start comitting the register changes (joins with the DPL=CPL branch).
5008 */
5009 pCtx->ss.Sel = NewSS;
5010 pCtx->ss.ValidSel = NewSS;
5011 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5012 pCtx->ss.u32Limit = cbLimitSS;
5013 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5014 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5015 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5016 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5017 * SP is loaded).
5018 * Need to check the other combinations too:
5019 * - 16-bit TSS, 32-bit handler
5020 * - 32-bit TSS, 16-bit handler */
5021 if (!pCtx->ss.Attr.n.u1DefBig)
5022 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5023 else
5024 pCtx->rsp = uNewEsp - cbStackFrame;
5025
5026 if (fEfl & X86_EFL_VM)
5027 {
5028 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5029 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5030 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5031 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5032 }
5033 }
5034 /*
5035 * Same privilege, no stack change and smaller stack frame.
5036 */
5037 else
5038 {
5039 uint64_t uNewRsp;
5040 RTPTRUNION uStackFrame;
5041 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5042 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5043 if (rcStrict != VINF_SUCCESS)
5044 return rcStrict;
5045 void * const pvStackFrame = uStackFrame.pv;
5046
5047 if (f32BitGate)
5048 {
5049 if (fFlags & IEM_XCPT_FLAGS_ERR)
5050 *uStackFrame.pu32++ = uErr;
5051 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5052 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5053 uStackFrame.pu32[2] = fEfl;
5054 }
5055 else
5056 {
5057 if (fFlags & IEM_XCPT_FLAGS_ERR)
5058 *uStackFrame.pu16++ = uErr;
5059 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5060 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5061 uStackFrame.pu16[2] = fEfl;
5062 }
5063 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5064 if (rcStrict != VINF_SUCCESS)
5065 return rcStrict;
5066
5067 /* Mark the CS selector as 'accessed'. */
5068 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5069 {
5070 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5071 if (rcStrict != VINF_SUCCESS)
5072 return rcStrict;
5073 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5074 }
5075
5076 /*
5077 * Start committing the register changes (joins with the other branch).
5078 */
5079 pCtx->rsp = uNewRsp;
5080 }
5081
5082 /* ... register committing continues. */
5083 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5084 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5085 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5086 pCtx->cs.u32Limit = cbLimitCS;
5087 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5088 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5089
5090 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5091 fEfl &= ~fEflToClear;
5092 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5093
5094 if (fFlags & IEM_XCPT_FLAGS_CR2)
5095 pCtx->cr2 = uCr2;
5096
5097 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5098 iemRaiseXcptAdjustState(pCtx, u8Vector);
5099
5100 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5101}
5102
5103
5104/**
5105 * Implements exceptions and interrupts for long mode.
5106 *
5107 * @returns VBox strict status code.
5108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5109 * @param pCtx The CPU context.
5110 * @param cbInstr The number of bytes to offset rIP by in the return
5111 * address.
5112 * @param u8Vector The interrupt / exception vector number.
5113 * @param fFlags The flags.
5114 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5115 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5116 */
5117IEM_STATIC VBOXSTRICTRC
5118iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5119 PCPUMCTX pCtx,
5120 uint8_t cbInstr,
5121 uint8_t u8Vector,
5122 uint32_t fFlags,
5123 uint16_t uErr,
5124 uint64_t uCr2)
5125{
5126 /*
5127 * Read the IDT entry.
5128 */
5129 uint16_t offIdt = (uint16_t)u8Vector << 4;
5130 if (pCtx->idtr.cbIdt < offIdt + 7)
5131 {
5132 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5133 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5134 }
5135 X86DESC64 Idte;
5136 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5137 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5138 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5139 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5140 return rcStrict;
5141 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5142 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5143 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5144
5145 /*
5146 * Check the descriptor type, DPL and such.
5147 * ASSUMES this is done in the same order as described for call-gate calls.
5148 */
5149 if (Idte.Gate.u1DescType)
5150 {
5151 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5152 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5153 }
5154 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5155 switch (Idte.Gate.u4Type)
5156 {
5157 case AMD64_SEL_TYPE_SYS_INT_GATE:
5158 fEflToClear |= X86_EFL_IF;
5159 break;
5160 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5161 break;
5162
5163 default:
5164 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5165 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5166 }
5167
5168 /* Check DPL against CPL if applicable. */
5169 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5170 {
5171 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5172 {
5173 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5174 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5175 }
5176 }
5177
5178 /* Is it there? */
5179 if (!Idte.Gate.u1Present)
5180 {
5181 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5182 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5183 }
5184
5185 /* A null CS is bad. */
5186 RTSEL NewCS = Idte.Gate.u16Sel;
5187 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5188 {
5189 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5190 return iemRaiseGeneralProtectionFault0(pVCpu);
5191 }
5192
5193 /* Fetch the descriptor for the new CS. */
5194 IEMSELDESC DescCS;
5195 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5196 if (rcStrict != VINF_SUCCESS)
5197 {
5198 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5199 return rcStrict;
5200 }
5201
5202 /* Must be a 64-bit code segment. */
5203 if (!DescCS.Long.Gen.u1DescType)
5204 {
5205 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5206 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5207 }
5208 if ( !DescCS.Long.Gen.u1Long
5209 || DescCS.Long.Gen.u1DefBig
5210 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5211 {
5212 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5213 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5214 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5215 }
5216
5217 /* Don't allow lowering the privilege level. For non-conforming CS
5218 selectors, the CS.DPL sets the privilege level the trap/interrupt
5219 handler runs at. For conforming CS selectors, the CPL remains
5220 unchanged, but the CS.DPL must be <= CPL. */
5221 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5222 * when CPU in Ring-0. Result \#GP? */
5223 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5224 {
5225 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5226 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5227 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5228 }
5229
5230
5231 /* Make sure the selector is present. */
5232 if (!DescCS.Legacy.Gen.u1Present)
5233 {
5234 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5235 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5236 }
5237
5238 /* Check that the new RIP is canonical. */
5239 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5240 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5241 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5242 if (!IEM_IS_CANONICAL(uNewRip))
5243 {
5244 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5245 return iemRaiseGeneralProtectionFault0(pVCpu);
5246 }
5247
5248 /*
5249 * If the privilege level changes or if the IST isn't zero, we need to get
5250 * a new stack from the TSS.
5251 */
5252 uint64_t uNewRsp;
5253 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5254 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5255 if ( uNewCpl != pVCpu->iem.s.uCpl
5256 || Idte.Gate.u3IST != 0)
5257 {
5258 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5259 if (rcStrict != VINF_SUCCESS)
5260 return rcStrict;
5261 }
5262 else
5263 uNewRsp = pCtx->rsp;
5264 uNewRsp &= ~(uint64_t)0xf;
5265
5266 /*
5267 * Calc the flag image to push.
5268 */
5269 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5270 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5271 fEfl &= ~X86_EFL_RF;
5272 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5273 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5274
5275 /*
5276 * Start making changes.
5277 */
5278 /* Set the new CPL so that stack accesses use it. */
5279 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5280 pVCpu->iem.s.uCpl = uNewCpl;
5281
5282 /* Create the stack frame. */
5283 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5284 RTPTRUNION uStackFrame;
5285 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5286 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5287 if (rcStrict != VINF_SUCCESS)
5288 return rcStrict;
5289 void * const pvStackFrame = uStackFrame.pv;
5290
5291 if (fFlags & IEM_XCPT_FLAGS_ERR)
5292 *uStackFrame.pu64++ = uErr;
5293 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5294 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5295 uStackFrame.pu64[2] = fEfl;
5296 uStackFrame.pu64[3] = pCtx->rsp;
5297 uStackFrame.pu64[4] = pCtx->ss.Sel;
5298 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5299 if (rcStrict != VINF_SUCCESS)
5300 return rcStrict;
5301
5302 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5303 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5304 * after pushing the stack frame? (Write protect the gdt + stack to
5305 * find out.) */
5306 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5307 {
5308 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5309 if (rcStrict != VINF_SUCCESS)
5310 return rcStrict;
5311 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5312 }
5313
5314 /*
5315 * Start comitting the register changes.
5316 */
5317 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5318 * hidden registers when interrupting 32-bit or 16-bit code! */
5319 if (uNewCpl != uOldCpl)
5320 {
5321 pCtx->ss.Sel = 0 | uNewCpl;
5322 pCtx->ss.ValidSel = 0 | uNewCpl;
5323 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5324 pCtx->ss.u32Limit = UINT32_MAX;
5325 pCtx->ss.u64Base = 0;
5326 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5327 }
5328 pCtx->rsp = uNewRsp - cbStackFrame;
5329 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5330 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5331 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5332 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5333 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5334 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5335 pCtx->rip = uNewRip;
5336
5337 fEfl &= ~fEflToClear;
5338 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5339
5340 if (fFlags & IEM_XCPT_FLAGS_CR2)
5341 pCtx->cr2 = uCr2;
5342
5343 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5344 iemRaiseXcptAdjustState(pCtx, u8Vector);
5345
5346 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5347}
5348
5349
5350/**
5351 * Implements exceptions and interrupts.
5352 *
5353 * All exceptions and interrupts goes thru this function!
5354 *
5355 * @returns VBox strict status code.
5356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5357 * @param cbInstr The number of bytes to offset rIP by in the return
5358 * address.
5359 * @param u8Vector The interrupt / exception vector number.
5360 * @param fFlags The flags.
5361 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5362 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5363 */
5364DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5365iemRaiseXcptOrInt(PVMCPU pVCpu,
5366 uint8_t cbInstr,
5367 uint8_t u8Vector,
5368 uint32_t fFlags,
5369 uint16_t uErr,
5370 uint64_t uCr2)
5371{
5372 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5373#ifdef IN_RING0
5374 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5375 AssertRCReturn(rc, rc);
5376#endif
5377
5378#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5379 /*
5380 * Flush prefetch buffer
5381 */
5382 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5383#endif
5384
5385 /*
5386 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5387 */
5388 if ( pCtx->eflags.Bits.u1VM
5389 && pCtx->eflags.Bits.u2IOPL != 3
5390 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5391 && (pCtx->cr0 & X86_CR0_PE) )
5392 {
5393 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5394 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5395 u8Vector = X86_XCPT_GP;
5396 uErr = 0;
5397 }
5398#ifdef DBGFTRACE_ENABLED
5399 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5400 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5401 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5402#endif
5403
5404#ifdef VBOX_WITH_NESTED_HWVIRT
5405 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5406 {
5407 /*
5408 * If the event is being injected as part of VMRUN, it isn't subject to event
5409 * intercepts in the nested-guest. However, secondary exceptions that occur
5410 * during injection of any event -are- subject to exception intercepts.
5411 * See AMD spec. 15.20 "Event Injection".
5412 */
5413 if (!pCtx->hwvirt.svm.fInterceptEvents)
5414 pCtx->hwvirt.svm.fInterceptEvents = 1;
5415 else
5416 {
5417 /*
5418 * Check and handle if the event being raised is intercepted.
5419 */
5420 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5421 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5422 return rcStrict0;
5423 }
5424 }
5425#endif /* VBOX_WITH_NESTED_HWVIRT */
5426
5427 /*
5428 * Do recursion accounting.
5429 */
5430 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5431 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5432 if (pVCpu->iem.s.cXcptRecursions == 0)
5433 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5434 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5435 else
5436 {
5437 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5438 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5439 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5440
5441 if (pVCpu->iem.s.cXcptRecursions >= 3)
5442 {
5443#ifdef DEBUG_bird
5444 AssertFailed();
5445#endif
5446 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5447 }
5448
5449 /*
5450 * Evaluate the sequence of recurring events.
5451 */
5452 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5453 NULL /* pXcptRaiseInfo */);
5454 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5455 { /* likely */ }
5456 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5457 {
5458 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5459 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5460 u8Vector = X86_XCPT_DF;
5461 uErr = 0;
5462 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5463 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5464 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5465 }
5466 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5467 {
5468 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5469 return iemInitiateCpuShutdown(pVCpu);
5470 }
5471 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5472 {
5473 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5474 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5475 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5476 return VERR_EM_GUEST_CPU_HANG;
5477 }
5478 else
5479 {
5480 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5481 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5482 return VERR_IEM_IPE_9;
5483 }
5484
5485 /*
5486 * The 'EXT' bit is set when an exception occurs during deliver of an external
5487 * event (such as an interrupt or earlier exception)[1]. Privileged software
5488 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5489 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5490 *
5491 * [1] - Intel spec. 6.13 "Error Code"
5492 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5493 * [3] - Intel Instruction reference for INT n.
5494 */
5495 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5496 && (fFlags & IEM_XCPT_FLAGS_ERR)
5497 && u8Vector != X86_XCPT_PF
5498 && u8Vector != X86_XCPT_DF)
5499 {
5500 uErr |= X86_TRAP_ERR_EXTERNAL;
5501 }
5502 }
5503
5504 pVCpu->iem.s.cXcptRecursions++;
5505 pVCpu->iem.s.uCurXcpt = u8Vector;
5506 pVCpu->iem.s.fCurXcpt = fFlags;
5507 pVCpu->iem.s.uCurXcptErr = uErr;
5508 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5509
5510 /*
5511 * Extensive logging.
5512 */
5513#if defined(LOG_ENABLED) && defined(IN_RING3)
5514 if (LogIs3Enabled())
5515 {
5516 PVM pVM = pVCpu->CTX_SUFF(pVM);
5517 char szRegs[4096];
5518 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5519 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5520 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5521 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5522 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5523 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5524 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5525 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5526 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5527 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5528 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5529 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5530 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5531 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5532 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5533 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5534 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5535 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5536 " efer=%016VR{efer}\n"
5537 " pat=%016VR{pat}\n"
5538 " sf_mask=%016VR{sf_mask}\n"
5539 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5540 " lstar=%016VR{lstar}\n"
5541 " star=%016VR{star} cstar=%016VR{cstar}\n"
5542 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5543 );
5544
5545 char szInstr[256];
5546 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5547 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5548 szInstr, sizeof(szInstr), NULL);
5549 Log3(("%s%s\n", szRegs, szInstr));
5550 }
5551#endif /* LOG_ENABLED */
5552
5553 /*
5554 * Call the mode specific worker function.
5555 */
5556 VBOXSTRICTRC rcStrict;
5557 if (!(pCtx->cr0 & X86_CR0_PE))
5558 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5559 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5560 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5561 else
5562 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5563
5564 /* Flush the prefetch buffer. */
5565#ifdef IEM_WITH_CODE_TLB
5566 pVCpu->iem.s.pbInstrBuf = NULL;
5567#else
5568 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5569#endif
5570
5571 /*
5572 * Unwind.
5573 */
5574 pVCpu->iem.s.cXcptRecursions--;
5575 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5576 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5577 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5578 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5579 return rcStrict;
5580}
5581
5582#ifdef IEM_WITH_SETJMP
5583/**
5584 * See iemRaiseXcptOrInt. Will not return.
5585 */
5586IEM_STATIC DECL_NO_RETURN(void)
5587iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5588 uint8_t cbInstr,
5589 uint8_t u8Vector,
5590 uint32_t fFlags,
5591 uint16_t uErr,
5592 uint64_t uCr2)
5593{
5594 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5595 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5596}
5597#endif
5598
5599
5600/** \#DE - 00. */
5601DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5602{
5603 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5604}
5605
5606
5607/** \#DB - 01.
5608 * @note This automatically clear DR7.GD. */
5609DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5610{
5611 /** @todo set/clear RF. */
5612 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5613 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5614}
5615
5616
5617/** \#BR - 05. */
5618DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5619{
5620 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5621}
5622
5623
5624/** \#UD - 06. */
5625DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5626{
5627 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5628}
5629
5630
5631/** \#NM - 07. */
5632DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5633{
5634 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5635}
5636
5637
5638/** \#TS(err) - 0a. */
5639DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5640{
5641 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5642}
5643
5644
5645/** \#TS(tr) - 0a. */
5646DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5647{
5648 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5649 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5650}
5651
5652
5653/** \#TS(0) - 0a. */
5654DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5655{
5656 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5657 0, 0);
5658}
5659
5660
5661/** \#TS(err) - 0a. */
5662DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5663{
5664 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5665 uSel & X86_SEL_MASK_OFF_RPL, 0);
5666}
5667
5668
5669/** \#NP(err) - 0b. */
5670DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5671{
5672 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5673}
5674
5675
5676/** \#NP(sel) - 0b. */
5677DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5678{
5679 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5680 uSel & ~X86_SEL_RPL, 0);
5681}
5682
5683
5684/** \#SS(seg) - 0c. */
5685DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5686{
5687 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5688 uSel & ~X86_SEL_RPL, 0);
5689}
5690
5691
5692/** \#SS(err) - 0c. */
5693DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5694{
5695 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5696}
5697
5698
5699/** \#GP(n) - 0d. */
5700DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5701{
5702 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5703}
5704
5705
5706/** \#GP(0) - 0d. */
5707DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5708{
5709 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5710}
5711
5712#ifdef IEM_WITH_SETJMP
5713/** \#GP(0) - 0d. */
5714DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5715{
5716 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5717}
5718#endif
5719
5720
5721/** \#GP(sel) - 0d. */
5722DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5723{
5724 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5725 Sel & ~X86_SEL_RPL, 0);
5726}
5727
5728
5729/** \#GP(0) - 0d. */
5730DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5731{
5732 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5733}
5734
5735
5736/** \#GP(sel) - 0d. */
5737DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5738{
5739 NOREF(iSegReg); NOREF(fAccess);
5740 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5741 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5742}
5743
5744#ifdef IEM_WITH_SETJMP
5745/** \#GP(sel) - 0d, longjmp. */
5746DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5747{
5748 NOREF(iSegReg); NOREF(fAccess);
5749 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5750 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5751}
5752#endif
5753
5754/** \#GP(sel) - 0d. */
5755DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5756{
5757 NOREF(Sel);
5758 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5759}
5760
5761#ifdef IEM_WITH_SETJMP
5762/** \#GP(sel) - 0d, longjmp. */
5763DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5764{
5765 NOREF(Sel);
5766 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5767}
5768#endif
5769
5770
5771/** \#GP(sel) - 0d. */
5772DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5773{
5774 NOREF(iSegReg); NOREF(fAccess);
5775 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5776}
5777
5778#ifdef IEM_WITH_SETJMP
5779/** \#GP(sel) - 0d, longjmp. */
5780DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5781 uint32_t fAccess)
5782{
5783 NOREF(iSegReg); NOREF(fAccess);
5784 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5785}
5786#endif
5787
5788
5789/** \#PF(n) - 0e. */
5790DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5791{
5792 uint16_t uErr;
5793 switch (rc)
5794 {
5795 case VERR_PAGE_NOT_PRESENT:
5796 case VERR_PAGE_TABLE_NOT_PRESENT:
5797 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5798 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5799 uErr = 0;
5800 break;
5801
5802 default:
5803 AssertMsgFailed(("%Rrc\n", rc));
5804 RT_FALL_THRU();
5805 case VERR_ACCESS_DENIED:
5806 uErr = X86_TRAP_PF_P;
5807 break;
5808
5809 /** @todo reserved */
5810 }
5811
5812 if (pVCpu->iem.s.uCpl == 3)
5813 uErr |= X86_TRAP_PF_US;
5814
5815 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5816 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5817 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5818 uErr |= X86_TRAP_PF_ID;
5819
5820#if 0 /* This is so much non-sense, really. Why was it done like that? */
5821 /* Note! RW access callers reporting a WRITE protection fault, will clear
5822 the READ flag before calling. So, read-modify-write accesses (RW)
5823 can safely be reported as READ faults. */
5824 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5825 uErr |= X86_TRAP_PF_RW;
5826#else
5827 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5828 {
5829 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5830 uErr |= X86_TRAP_PF_RW;
5831 }
5832#endif
5833
5834 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5835 uErr, GCPtrWhere);
5836}
5837
5838#ifdef IEM_WITH_SETJMP
5839/** \#PF(n) - 0e, longjmp. */
5840IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5841{
5842 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5843}
5844#endif
5845
5846
5847/** \#MF(0) - 10. */
5848DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5849{
5850 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5851}
5852
5853
5854/** \#AC(0) - 11. */
5855DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5856{
5857 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5858}
5859
5860
5861/**
5862 * Macro for calling iemCImplRaiseDivideError().
5863 *
5864 * This enables us to add/remove arguments and force different levels of
5865 * inlining as we wish.
5866 *
5867 * @return Strict VBox status code.
5868 */
5869#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5870IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5871{
5872 NOREF(cbInstr);
5873 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5874}
5875
5876
5877/**
5878 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5879 *
5880 * This enables us to add/remove arguments and force different levels of
5881 * inlining as we wish.
5882 *
5883 * @return Strict VBox status code.
5884 */
5885#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5886IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5887{
5888 NOREF(cbInstr);
5889 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5890}
5891
5892
5893/**
5894 * Macro for calling iemCImplRaiseInvalidOpcode().
5895 *
5896 * This enables us to add/remove arguments and force different levels of
5897 * inlining as we wish.
5898 *
5899 * @return Strict VBox status code.
5900 */
5901#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5902IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5903{
5904 NOREF(cbInstr);
5905 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5906}
5907
5908
5909/** @} */
5910
5911
5912/*
5913 *
5914 * Helpers routines.
5915 * Helpers routines.
5916 * Helpers routines.
5917 *
5918 */
5919
5920/**
5921 * Recalculates the effective operand size.
5922 *
5923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5924 */
5925IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5926{
5927 switch (pVCpu->iem.s.enmCpuMode)
5928 {
5929 case IEMMODE_16BIT:
5930 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5931 break;
5932 case IEMMODE_32BIT:
5933 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5934 break;
5935 case IEMMODE_64BIT:
5936 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5937 {
5938 case 0:
5939 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5940 break;
5941 case IEM_OP_PRF_SIZE_OP:
5942 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5943 break;
5944 case IEM_OP_PRF_SIZE_REX_W:
5945 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5946 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5947 break;
5948 }
5949 break;
5950 default:
5951 AssertFailed();
5952 }
5953}
5954
5955
5956/**
5957 * Sets the default operand size to 64-bit and recalculates the effective
5958 * operand size.
5959 *
5960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5961 */
5962IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5963{
5964 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5965 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5966 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5967 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5968 else
5969 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5970}
5971
5972
5973/*
5974 *
5975 * Common opcode decoders.
5976 * Common opcode decoders.
5977 * Common opcode decoders.
5978 *
5979 */
5980//#include <iprt/mem.h>
5981
5982/**
5983 * Used to add extra details about a stub case.
5984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5985 */
5986IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5987{
5988#if defined(LOG_ENABLED) && defined(IN_RING3)
5989 PVM pVM = pVCpu->CTX_SUFF(pVM);
5990 char szRegs[4096];
5991 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5992 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5993 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5994 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5995 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5996 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5997 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5998 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5999 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6000 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6001 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6002 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6003 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6004 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6005 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6006 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6007 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6008 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6009 " efer=%016VR{efer}\n"
6010 " pat=%016VR{pat}\n"
6011 " sf_mask=%016VR{sf_mask}\n"
6012 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6013 " lstar=%016VR{lstar}\n"
6014 " star=%016VR{star} cstar=%016VR{cstar}\n"
6015 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6016 );
6017
6018 char szInstr[256];
6019 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6020 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6021 szInstr, sizeof(szInstr), NULL);
6022
6023 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6024#else
6025 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6026#endif
6027}
6028
6029/**
6030 * Complains about a stub.
6031 *
6032 * Providing two versions of this macro, one for daily use and one for use when
6033 * working on IEM.
6034 */
6035#if 0
6036# define IEMOP_BITCH_ABOUT_STUB() \
6037 do { \
6038 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6039 iemOpStubMsg2(pVCpu); \
6040 RTAssertPanic(); \
6041 } while (0)
6042#else
6043# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6044#endif
6045
6046/** Stubs an opcode. */
6047#define FNIEMOP_STUB(a_Name) \
6048 FNIEMOP_DEF(a_Name) \
6049 { \
6050 RT_NOREF_PV(pVCpu); \
6051 IEMOP_BITCH_ABOUT_STUB(); \
6052 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6053 } \
6054 typedef int ignore_semicolon
6055
6056/** Stubs an opcode. */
6057#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6058 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6059 { \
6060 RT_NOREF_PV(pVCpu); \
6061 RT_NOREF_PV(a_Name0); \
6062 IEMOP_BITCH_ABOUT_STUB(); \
6063 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6064 } \
6065 typedef int ignore_semicolon
6066
6067/** Stubs an opcode which currently should raise \#UD. */
6068#define FNIEMOP_UD_STUB(a_Name) \
6069 FNIEMOP_DEF(a_Name) \
6070 { \
6071 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6072 return IEMOP_RAISE_INVALID_OPCODE(); \
6073 } \
6074 typedef int ignore_semicolon
6075
6076/** Stubs an opcode which currently should raise \#UD. */
6077#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6078 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6079 { \
6080 RT_NOREF_PV(pVCpu); \
6081 RT_NOREF_PV(a_Name0); \
6082 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6083 return IEMOP_RAISE_INVALID_OPCODE(); \
6084 } \
6085 typedef int ignore_semicolon
6086
6087
6088
6089/** @name Register Access.
6090 * @{
6091 */
6092
6093/**
6094 * Gets a reference (pointer) to the specified hidden segment register.
6095 *
6096 * @returns Hidden register reference.
6097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6098 * @param iSegReg The segment register.
6099 */
6100IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6101{
6102 Assert(iSegReg < X86_SREG_COUNT);
6103 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6104 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6105
6106#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6107 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6108 { /* likely */ }
6109 else
6110 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6111#else
6112 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6113#endif
6114 return pSReg;
6115}
6116
6117
6118/**
6119 * Ensures that the given hidden segment register is up to date.
6120 *
6121 * @returns Hidden register reference.
6122 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6123 * @param pSReg The segment register.
6124 */
6125IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6126{
6127#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6128 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6129 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6130#else
6131 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6132 NOREF(pVCpu);
6133#endif
6134 return pSReg;
6135}
6136
6137
6138/**
6139 * Gets a reference (pointer) to the specified segment register (the selector
6140 * value).
6141 *
6142 * @returns Pointer to the selector variable.
6143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6144 * @param iSegReg The segment register.
6145 */
6146DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6147{
6148 Assert(iSegReg < X86_SREG_COUNT);
6149 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6150 return &pCtx->aSRegs[iSegReg].Sel;
6151}
6152
6153
6154/**
6155 * Fetches the selector value of a segment register.
6156 *
6157 * @returns The selector value.
6158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6159 * @param iSegReg The segment register.
6160 */
6161DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6162{
6163 Assert(iSegReg < X86_SREG_COUNT);
6164 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6165}
6166
6167
6168/**
6169 * Gets a reference (pointer) to the specified general purpose register.
6170 *
6171 * @returns Register reference.
6172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6173 * @param iReg The general purpose register.
6174 */
6175DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6176{
6177 Assert(iReg < 16);
6178 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6179 return &pCtx->aGRegs[iReg];
6180}
6181
6182
6183/**
6184 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6185 *
6186 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6187 *
6188 * @returns Register reference.
6189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6190 * @param iReg The register.
6191 */
6192DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6193{
6194 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6195 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6196 {
6197 Assert(iReg < 16);
6198 return &pCtx->aGRegs[iReg].u8;
6199 }
6200 /* high 8-bit register. */
6201 Assert(iReg < 8);
6202 return &pCtx->aGRegs[iReg & 3].bHi;
6203}
6204
6205
6206/**
6207 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6208 *
6209 * @returns Register reference.
6210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6211 * @param iReg The register.
6212 */
6213DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6214{
6215 Assert(iReg < 16);
6216 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6217 return &pCtx->aGRegs[iReg].u16;
6218}
6219
6220
6221/**
6222 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6223 *
6224 * @returns Register reference.
6225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6226 * @param iReg The register.
6227 */
6228DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6229{
6230 Assert(iReg < 16);
6231 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6232 return &pCtx->aGRegs[iReg].u32;
6233}
6234
6235
6236/**
6237 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6238 *
6239 * @returns Register reference.
6240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6241 * @param iReg The register.
6242 */
6243DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6244{
6245 Assert(iReg < 64);
6246 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6247 return &pCtx->aGRegs[iReg].u64;
6248}
6249
6250
6251/**
6252 * Fetches the value of a 8-bit general purpose register.
6253 *
6254 * @returns The register value.
6255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6256 * @param iReg The register.
6257 */
6258DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6259{
6260 return *iemGRegRefU8(pVCpu, iReg);
6261}
6262
6263
6264/**
6265 * Fetches the value of a 16-bit general purpose register.
6266 *
6267 * @returns The register value.
6268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6269 * @param iReg The register.
6270 */
6271DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6272{
6273 Assert(iReg < 16);
6274 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6275}
6276
6277
6278/**
6279 * Fetches the value of a 32-bit general purpose register.
6280 *
6281 * @returns The register value.
6282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6283 * @param iReg The register.
6284 */
6285DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6286{
6287 Assert(iReg < 16);
6288 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6289}
6290
6291
6292/**
6293 * Fetches the value of a 64-bit general purpose register.
6294 *
6295 * @returns The register value.
6296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6297 * @param iReg The register.
6298 */
6299DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6300{
6301 Assert(iReg < 16);
6302 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6303}
6304
6305
6306/**
6307 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6308 *
6309 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6310 * segment limit.
6311 *
6312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6313 * @param offNextInstr The offset of the next instruction.
6314 */
6315IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6316{
6317 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6318 switch (pVCpu->iem.s.enmEffOpSize)
6319 {
6320 case IEMMODE_16BIT:
6321 {
6322 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6323 if ( uNewIp > pCtx->cs.u32Limit
6324 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6325 return iemRaiseGeneralProtectionFault0(pVCpu);
6326 pCtx->rip = uNewIp;
6327 break;
6328 }
6329
6330 case IEMMODE_32BIT:
6331 {
6332 Assert(pCtx->rip <= UINT32_MAX);
6333 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6334
6335 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6336 if (uNewEip > pCtx->cs.u32Limit)
6337 return iemRaiseGeneralProtectionFault0(pVCpu);
6338 pCtx->rip = uNewEip;
6339 break;
6340 }
6341
6342 case IEMMODE_64BIT:
6343 {
6344 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6345
6346 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6347 if (!IEM_IS_CANONICAL(uNewRip))
6348 return iemRaiseGeneralProtectionFault0(pVCpu);
6349 pCtx->rip = uNewRip;
6350 break;
6351 }
6352
6353 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6354 }
6355
6356 pCtx->eflags.Bits.u1RF = 0;
6357
6358#ifndef IEM_WITH_CODE_TLB
6359 /* Flush the prefetch buffer. */
6360 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6361#endif
6362
6363 return VINF_SUCCESS;
6364}
6365
6366
6367/**
6368 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6369 *
6370 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6371 * segment limit.
6372 *
6373 * @returns Strict VBox status code.
6374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6375 * @param offNextInstr The offset of the next instruction.
6376 */
6377IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6378{
6379 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6380 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6381
6382 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6383 if ( uNewIp > pCtx->cs.u32Limit
6384 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6385 return iemRaiseGeneralProtectionFault0(pVCpu);
6386 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6387 pCtx->rip = uNewIp;
6388 pCtx->eflags.Bits.u1RF = 0;
6389
6390#ifndef IEM_WITH_CODE_TLB
6391 /* Flush the prefetch buffer. */
6392 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6393#endif
6394
6395 return VINF_SUCCESS;
6396}
6397
6398
6399/**
6400 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6401 *
6402 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6403 * segment limit.
6404 *
6405 * @returns Strict VBox status code.
6406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6407 * @param offNextInstr The offset of the next instruction.
6408 */
6409IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6410{
6411 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6412 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6413
6414 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6415 {
6416 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6417
6418 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6419 if (uNewEip > pCtx->cs.u32Limit)
6420 return iemRaiseGeneralProtectionFault0(pVCpu);
6421 pCtx->rip = uNewEip;
6422 }
6423 else
6424 {
6425 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6426
6427 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6428 if (!IEM_IS_CANONICAL(uNewRip))
6429 return iemRaiseGeneralProtectionFault0(pVCpu);
6430 pCtx->rip = uNewRip;
6431 }
6432 pCtx->eflags.Bits.u1RF = 0;
6433
6434#ifndef IEM_WITH_CODE_TLB
6435 /* Flush the prefetch buffer. */
6436 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6437#endif
6438
6439 return VINF_SUCCESS;
6440}
6441
6442
6443/**
6444 * Performs a near jump to the specified address.
6445 *
6446 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6447 * segment limit.
6448 *
6449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6450 * @param uNewRip The new RIP value.
6451 */
6452IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6453{
6454 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6455 switch (pVCpu->iem.s.enmEffOpSize)
6456 {
6457 case IEMMODE_16BIT:
6458 {
6459 Assert(uNewRip <= UINT16_MAX);
6460 if ( uNewRip > pCtx->cs.u32Limit
6461 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6462 return iemRaiseGeneralProtectionFault0(pVCpu);
6463 /** @todo Test 16-bit jump in 64-bit mode. */
6464 pCtx->rip = uNewRip;
6465 break;
6466 }
6467
6468 case IEMMODE_32BIT:
6469 {
6470 Assert(uNewRip <= UINT32_MAX);
6471 Assert(pCtx->rip <= UINT32_MAX);
6472 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6473
6474 if (uNewRip > pCtx->cs.u32Limit)
6475 return iemRaiseGeneralProtectionFault0(pVCpu);
6476 pCtx->rip = uNewRip;
6477 break;
6478 }
6479
6480 case IEMMODE_64BIT:
6481 {
6482 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6483
6484 if (!IEM_IS_CANONICAL(uNewRip))
6485 return iemRaiseGeneralProtectionFault0(pVCpu);
6486 pCtx->rip = uNewRip;
6487 break;
6488 }
6489
6490 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6491 }
6492
6493 pCtx->eflags.Bits.u1RF = 0;
6494
6495#ifndef IEM_WITH_CODE_TLB
6496 /* Flush the prefetch buffer. */
6497 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6498#endif
6499
6500 return VINF_SUCCESS;
6501}
6502
6503
6504/**
6505 * Get the address of the top of the stack.
6506 *
6507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6508 * @param pCtx The CPU context which SP/ESP/RSP should be
6509 * read.
6510 */
6511DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6512{
6513 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6514 return pCtx->rsp;
6515 if (pCtx->ss.Attr.n.u1DefBig)
6516 return pCtx->esp;
6517 return pCtx->sp;
6518}
6519
6520
6521/**
6522 * Updates the RIP/EIP/IP to point to the next instruction.
6523 *
6524 * This function leaves the EFLAGS.RF flag alone.
6525 *
6526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6527 * @param cbInstr The number of bytes to add.
6528 */
6529IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6530{
6531 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6532 switch (pVCpu->iem.s.enmCpuMode)
6533 {
6534 case IEMMODE_16BIT:
6535 Assert(pCtx->rip <= UINT16_MAX);
6536 pCtx->eip += cbInstr;
6537 pCtx->eip &= UINT32_C(0xffff);
6538 break;
6539
6540 case IEMMODE_32BIT:
6541 pCtx->eip += cbInstr;
6542 Assert(pCtx->rip <= UINT32_MAX);
6543 break;
6544
6545 case IEMMODE_64BIT:
6546 pCtx->rip += cbInstr;
6547 break;
6548 default: AssertFailed();
6549 }
6550}
6551
6552
6553#if 0
6554/**
6555 * Updates the RIP/EIP/IP to point to the next instruction.
6556 *
6557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6558 */
6559IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6560{
6561 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6562}
6563#endif
6564
6565
6566
6567/**
6568 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6569 *
6570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6571 * @param cbInstr The number of bytes to add.
6572 */
6573IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6574{
6575 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6576
6577 pCtx->eflags.Bits.u1RF = 0;
6578
6579 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6580#if ARCH_BITS >= 64
6581 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6582 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6583 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6584#else
6585 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6586 pCtx->rip += cbInstr;
6587 else
6588 pCtx->eip += cbInstr;
6589#endif
6590}
6591
6592
6593/**
6594 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6595 *
6596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6597 */
6598IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6599{
6600 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6601}
6602
6603
6604/**
6605 * Adds to the stack pointer.
6606 *
6607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6608 * @param pCtx The CPU context which SP/ESP/RSP should be
6609 * updated.
6610 * @param cbToAdd The number of bytes to add (8-bit!).
6611 */
6612DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6613{
6614 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6615 pCtx->rsp += cbToAdd;
6616 else if (pCtx->ss.Attr.n.u1DefBig)
6617 pCtx->esp += cbToAdd;
6618 else
6619 pCtx->sp += cbToAdd;
6620}
6621
6622
6623/**
6624 * Subtracts from the stack pointer.
6625 *
6626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6627 * @param pCtx The CPU context which SP/ESP/RSP should be
6628 * updated.
6629 * @param cbToSub The number of bytes to subtract (8-bit!).
6630 */
6631DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6632{
6633 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6634 pCtx->rsp -= cbToSub;
6635 else if (pCtx->ss.Attr.n.u1DefBig)
6636 pCtx->esp -= cbToSub;
6637 else
6638 pCtx->sp -= cbToSub;
6639}
6640
6641
6642/**
6643 * Adds to the temporary stack pointer.
6644 *
6645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6646 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6647 * @param cbToAdd The number of bytes to add (16-bit).
6648 * @param pCtx Where to get the current stack mode.
6649 */
6650DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6651{
6652 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6653 pTmpRsp->u += cbToAdd;
6654 else if (pCtx->ss.Attr.n.u1DefBig)
6655 pTmpRsp->DWords.dw0 += cbToAdd;
6656 else
6657 pTmpRsp->Words.w0 += cbToAdd;
6658}
6659
6660
6661/**
6662 * Subtracts from the temporary stack pointer.
6663 *
6664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6665 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6666 * @param cbToSub The number of bytes to subtract.
6667 * @param pCtx Where to get the current stack mode.
6668 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6669 * expecting that.
6670 */
6671DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6672{
6673 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6674 pTmpRsp->u -= cbToSub;
6675 else if (pCtx->ss.Attr.n.u1DefBig)
6676 pTmpRsp->DWords.dw0 -= cbToSub;
6677 else
6678 pTmpRsp->Words.w0 -= cbToSub;
6679}
6680
6681
6682/**
6683 * Calculates the effective stack address for a push of the specified size as
6684 * well as the new RSP value (upper bits may be masked).
6685 *
6686 * @returns Effective stack addressf for the push.
6687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6688 * @param pCtx Where to get the current stack mode.
6689 * @param cbItem The size of the stack item to pop.
6690 * @param puNewRsp Where to return the new RSP value.
6691 */
6692DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6693{
6694 RTUINT64U uTmpRsp;
6695 RTGCPTR GCPtrTop;
6696 uTmpRsp.u = pCtx->rsp;
6697
6698 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6699 GCPtrTop = uTmpRsp.u -= cbItem;
6700 else if (pCtx->ss.Attr.n.u1DefBig)
6701 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6702 else
6703 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6704 *puNewRsp = uTmpRsp.u;
6705 return GCPtrTop;
6706}
6707
6708
6709/**
6710 * Gets the current stack pointer and calculates the value after a pop of the
6711 * specified size.
6712 *
6713 * @returns Current stack pointer.
6714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6715 * @param pCtx Where to get the current stack mode.
6716 * @param cbItem The size of the stack item to pop.
6717 * @param puNewRsp Where to return the new RSP value.
6718 */
6719DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6720{
6721 RTUINT64U uTmpRsp;
6722 RTGCPTR GCPtrTop;
6723 uTmpRsp.u = pCtx->rsp;
6724
6725 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6726 {
6727 GCPtrTop = uTmpRsp.u;
6728 uTmpRsp.u += cbItem;
6729 }
6730 else if (pCtx->ss.Attr.n.u1DefBig)
6731 {
6732 GCPtrTop = uTmpRsp.DWords.dw0;
6733 uTmpRsp.DWords.dw0 += cbItem;
6734 }
6735 else
6736 {
6737 GCPtrTop = uTmpRsp.Words.w0;
6738 uTmpRsp.Words.w0 += cbItem;
6739 }
6740 *puNewRsp = uTmpRsp.u;
6741 return GCPtrTop;
6742}
6743
6744
6745/**
6746 * Calculates the effective stack address for a push of the specified size as
6747 * well as the new temporary RSP value (upper bits may be masked).
6748 *
6749 * @returns Effective stack addressf for the push.
6750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6751 * @param pCtx Where to get the current stack mode.
6752 * @param pTmpRsp The temporary stack pointer. This is updated.
6753 * @param cbItem The size of the stack item to pop.
6754 */
6755DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6756{
6757 RTGCPTR GCPtrTop;
6758
6759 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6760 GCPtrTop = pTmpRsp->u -= cbItem;
6761 else if (pCtx->ss.Attr.n.u1DefBig)
6762 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6763 else
6764 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6765 return GCPtrTop;
6766}
6767
6768
6769/**
6770 * Gets the effective stack address for a pop of the specified size and
6771 * calculates and updates the temporary RSP.
6772 *
6773 * @returns Current stack pointer.
6774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6775 * @param pCtx Where to get the current stack mode.
6776 * @param pTmpRsp The temporary stack pointer. This is updated.
6777 * @param cbItem The size of the stack item to pop.
6778 */
6779DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6780{
6781 RTGCPTR GCPtrTop;
6782 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6783 {
6784 GCPtrTop = pTmpRsp->u;
6785 pTmpRsp->u += cbItem;
6786 }
6787 else if (pCtx->ss.Attr.n.u1DefBig)
6788 {
6789 GCPtrTop = pTmpRsp->DWords.dw0;
6790 pTmpRsp->DWords.dw0 += cbItem;
6791 }
6792 else
6793 {
6794 GCPtrTop = pTmpRsp->Words.w0;
6795 pTmpRsp->Words.w0 += cbItem;
6796 }
6797 return GCPtrTop;
6798}
6799
6800/** @} */
6801
6802
6803/** @name FPU access and helpers.
6804 *
6805 * @{
6806 */
6807
6808
6809/**
6810 * Hook for preparing to use the host FPU.
6811 *
6812 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6813 *
6814 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6815 */
6816DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6817{
6818#ifdef IN_RING3
6819 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6820#else
6821 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6822#endif
6823}
6824
6825
6826/**
6827 * Hook for preparing to use the host FPU for SSE.
6828 *
6829 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6830 *
6831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6832 */
6833DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6834{
6835 iemFpuPrepareUsage(pVCpu);
6836}
6837
6838
6839/**
6840 * Hook for preparing to use the host FPU for AVX.
6841 *
6842 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6843 *
6844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6845 */
6846DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6847{
6848 iemFpuPrepareUsage(pVCpu);
6849}
6850
6851
6852/**
6853 * Hook for actualizing the guest FPU state before the interpreter reads it.
6854 *
6855 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6856 *
6857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6858 */
6859DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6860{
6861#ifdef IN_RING3
6862 NOREF(pVCpu);
6863#else
6864 CPUMRZFpuStateActualizeForRead(pVCpu);
6865#endif
6866}
6867
6868
6869/**
6870 * Hook for actualizing the guest FPU state before the interpreter changes it.
6871 *
6872 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6873 *
6874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6875 */
6876DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6877{
6878#ifdef IN_RING3
6879 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6880#else
6881 CPUMRZFpuStateActualizeForChange(pVCpu);
6882#endif
6883}
6884
6885
6886/**
6887 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6888 * only.
6889 *
6890 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6891 *
6892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6893 */
6894DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6895{
6896#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6897 NOREF(pVCpu);
6898#else
6899 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6900#endif
6901}
6902
6903
6904/**
6905 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6906 * read+write.
6907 *
6908 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6909 *
6910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6911 */
6912DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6913{
6914#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6915 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6916#else
6917 CPUMRZFpuStateActualizeForChange(pVCpu);
6918#endif
6919}
6920
6921
6922/**
6923 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6924 * only.
6925 *
6926 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6927 *
6928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6929 */
6930DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6931{
6932#ifdef IN_RING3
6933 NOREF(pVCpu);
6934#else
6935 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6936#endif
6937}
6938
6939
6940/**
6941 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6942 * read+write.
6943 *
6944 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6945 *
6946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6947 */
6948DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6949{
6950#ifdef IN_RING3
6951 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6952#else
6953 CPUMRZFpuStateActualizeForChange(pVCpu);
6954#endif
6955}
6956
6957
6958/**
6959 * Stores a QNaN value into a FPU register.
6960 *
6961 * @param pReg Pointer to the register.
6962 */
6963DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6964{
6965 pReg->au32[0] = UINT32_C(0x00000000);
6966 pReg->au32[1] = UINT32_C(0xc0000000);
6967 pReg->au16[4] = UINT16_C(0xffff);
6968}
6969
6970
6971/**
6972 * Updates the FOP, FPU.CS and FPUIP registers.
6973 *
6974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6975 * @param pCtx The CPU context.
6976 * @param pFpuCtx The FPU context.
6977 */
6978DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6979{
6980 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6981 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6982 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6983 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6984 {
6985 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6986 * happens in real mode here based on the fnsave and fnstenv images. */
6987 pFpuCtx->CS = 0;
6988 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6989 }
6990 else
6991 {
6992 pFpuCtx->CS = pCtx->cs.Sel;
6993 pFpuCtx->FPUIP = pCtx->rip;
6994 }
6995}
6996
6997
6998/**
6999 * Updates the x87.DS and FPUDP registers.
7000 *
7001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7002 * @param pCtx The CPU context.
7003 * @param pFpuCtx The FPU context.
7004 * @param iEffSeg The effective segment register.
7005 * @param GCPtrEff The effective address relative to @a iEffSeg.
7006 */
7007DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7008{
7009 RTSEL sel;
7010 switch (iEffSeg)
7011 {
7012 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7013 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7014 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7015 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7016 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7017 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7018 default:
7019 AssertMsgFailed(("%d\n", iEffSeg));
7020 sel = pCtx->ds.Sel;
7021 }
7022 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7023 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7024 {
7025 pFpuCtx->DS = 0;
7026 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7027 }
7028 else
7029 {
7030 pFpuCtx->DS = sel;
7031 pFpuCtx->FPUDP = GCPtrEff;
7032 }
7033}
7034
7035
7036/**
7037 * Rotates the stack registers in the push direction.
7038 *
7039 * @param pFpuCtx The FPU context.
7040 * @remarks This is a complete waste of time, but fxsave stores the registers in
7041 * stack order.
7042 */
7043DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7044{
7045 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7046 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7047 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7048 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7049 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7050 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7051 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7052 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7053 pFpuCtx->aRegs[0].r80 = r80Tmp;
7054}
7055
7056
7057/**
7058 * Rotates the stack registers in the pop direction.
7059 *
7060 * @param pFpuCtx The FPU context.
7061 * @remarks This is a complete waste of time, but fxsave stores the registers in
7062 * stack order.
7063 */
7064DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7065{
7066 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7067 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7068 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7069 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7070 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7071 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7072 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7073 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7074 pFpuCtx->aRegs[7].r80 = r80Tmp;
7075}
7076
7077
7078/**
7079 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7080 * exception prevents it.
7081 *
7082 * @param pResult The FPU operation result to push.
7083 * @param pFpuCtx The FPU context.
7084 */
7085IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7086{
7087 /* Update FSW and bail if there are pending exceptions afterwards. */
7088 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7089 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7090 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7091 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7092 {
7093 pFpuCtx->FSW = fFsw;
7094 return;
7095 }
7096
7097 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7098 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7099 {
7100 /* All is fine, push the actual value. */
7101 pFpuCtx->FTW |= RT_BIT(iNewTop);
7102 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7103 }
7104 else if (pFpuCtx->FCW & X86_FCW_IM)
7105 {
7106 /* Masked stack overflow, push QNaN. */
7107 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7108 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7109 }
7110 else
7111 {
7112 /* Raise stack overflow, don't push anything. */
7113 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7114 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7115 return;
7116 }
7117
7118 fFsw &= ~X86_FSW_TOP_MASK;
7119 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7120 pFpuCtx->FSW = fFsw;
7121
7122 iemFpuRotateStackPush(pFpuCtx);
7123}
7124
7125
7126/**
7127 * Stores a result in a FPU register and updates the FSW and FTW.
7128 *
7129 * @param pFpuCtx The FPU context.
7130 * @param pResult The result to store.
7131 * @param iStReg Which FPU register to store it in.
7132 */
7133IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7134{
7135 Assert(iStReg < 8);
7136 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7137 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7138 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7139 pFpuCtx->FTW |= RT_BIT(iReg);
7140 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7141}
7142
7143
7144/**
7145 * Only updates the FPU status word (FSW) with the result of the current
7146 * instruction.
7147 *
7148 * @param pFpuCtx The FPU context.
7149 * @param u16FSW The FSW output of the current instruction.
7150 */
7151IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7152{
7153 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7154 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7155}
7156
7157
7158/**
7159 * Pops one item off the FPU stack if no pending exception prevents it.
7160 *
7161 * @param pFpuCtx The FPU context.
7162 */
7163IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7164{
7165 /* Check pending exceptions. */
7166 uint16_t uFSW = pFpuCtx->FSW;
7167 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7168 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7169 return;
7170
7171 /* TOP--. */
7172 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7173 uFSW &= ~X86_FSW_TOP_MASK;
7174 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7175 pFpuCtx->FSW = uFSW;
7176
7177 /* Mark the previous ST0 as empty. */
7178 iOldTop >>= X86_FSW_TOP_SHIFT;
7179 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7180
7181 /* Rotate the registers. */
7182 iemFpuRotateStackPop(pFpuCtx);
7183}
7184
7185
7186/**
7187 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7188 *
7189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7190 * @param pResult The FPU operation result to push.
7191 */
7192IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7193{
7194 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7195 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7196 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7197 iemFpuMaybePushResult(pResult, pFpuCtx);
7198}
7199
7200
7201/**
7202 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7203 * and sets FPUDP and FPUDS.
7204 *
7205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7206 * @param pResult The FPU operation result to push.
7207 * @param iEffSeg The effective segment register.
7208 * @param GCPtrEff The effective address relative to @a iEffSeg.
7209 */
7210IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7211{
7212 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7213 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7214 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7215 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7216 iemFpuMaybePushResult(pResult, pFpuCtx);
7217}
7218
7219
7220/**
7221 * Replace ST0 with the first value and push the second onto the FPU stack,
7222 * unless a pending exception prevents it.
7223 *
7224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7225 * @param pResult The FPU operation result to store and push.
7226 */
7227IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7228{
7229 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7230 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7231 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7232
7233 /* Update FSW and bail if there are pending exceptions afterwards. */
7234 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7235 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7236 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7237 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7238 {
7239 pFpuCtx->FSW = fFsw;
7240 return;
7241 }
7242
7243 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7244 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7245 {
7246 /* All is fine, push the actual value. */
7247 pFpuCtx->FTW |= RT_BIT(iNewTop);
7248 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7249 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7250 }
7251 else if (pFpuCtx->FCW & X86_FCW_IM)
7252 {
7253 /* Masked stack overflow, push QNaN. */
7254 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7255 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7256 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7257 }
7258 else
7259 {
7260 /* Raise stack overflow, don't push anything. */
7261 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7262 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7263 return;
7264 }
7265
7266 fFsw &= ~X86_FSW_TOP_MASK;
7267 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7268 pFpuCtx->FSW = fFsw;
7269
7270 iemFpuRotateStackPush(pFpuCtx);
7271}
7272
7273
7274/**
7275 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7276 * FOP.
7277 *
7278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7279 * @param pResult The result to store.
7280 * @param iStReg Which FPU register to store it in.
7281 */
7282IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7283{
7284 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7285 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7286 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7287 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7288}
7289
7290
7291/**
7292 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7293 * FOP, and then pops the stack.
7294 *
7295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7296 * @param pResult The result to store.
7297 * @param iStReg Which FPU register to store it in.
7298 */
7299IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7300{
7301 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7302 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7303 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7304 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7305 iemFpuMaybePopOne(pFpuCtx);
7306}
7307
7308
7309/**
7310 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7311 * FPUDP, and FPUDS.
7312 *
7313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7314 * @param pResult The result to store.
7315 * @param iStReg Which FPU register to store it in.
7316 * @param iEffSeg The effective memory operand selector register.
7317 * @param GCPtrEff The effective memory operand offset.
7318 */
7319IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7320 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7321{
7322 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7323 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7324 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7325 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7326 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7327}
7328
7329
7330/**
7331 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7332 * FPUDP, and FPUDS, and then pops the stack.
7333 *
7334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7335 * @param pResult The result to store.
7336 * @param iStReg Which FPU register to store it in.
7337 * @param iEffSeg The effective memory operand selector register.
7338 * @param GCPtrEff The effective memory operand offset.
7339 */
7340IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7341 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7342{
7343 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7344 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7345 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7346 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7347 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7348 iemFpuMaybePopOne(pFpuCtx);
7349}
7350
7351
7352/**
7353 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7354 *
7355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7356 */
7357IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7358{
7359 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7360 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7361 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7362}
7363
7364
7365/**
7366 * Marks the specified stack register as free (for FFREE).
7367 *
7368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7369 * @param iStReg The register to free.
7370 */
7371IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7372{
7373 Assert(iStReg < 8);
7374 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7375 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7376 pFpuCtx->FTW &= ~RT_BIT(iReg);
7377}
7378
7379
7380/**
7381 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7382 *
7383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7384 */
7385IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7386{
7387 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7388 uint16_t uFsw = pFpuCtx->FSW;
7389 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7390 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7391 uFsw &= ~X86_FSW_TOP_MASK;
7392 uFsw |= uTop;
7393 pFpuCtx->FSW = uFsw;
7394}
7395
7396
7397/**
7398 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7399 *
7400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7401 */
7402IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7403{
7404 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7405 uint16_t uFsw = pFpuCtx->FSW;
7406 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7407 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7408 uFsw &= ~X86_FSW_TOP_MASK;
7409 uFsw |= uTop;
7410 pFpuCtx->FSW = uFsw;
7411}
7412
7413
7414/**
7415 * Updates the FSW, FOP, FPUIP, and FPUCS.
7416 *
7417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7418 * @param u16FSW The FSW from the current instruction.
7419 */
7420IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7421{
7422 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7423 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7424 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7425 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7426}
7427
7428
7429/**
7430 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7431 *
7432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7433 * @param u16FSW The FSW from the current instruction.
7434 */
7435IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7436{
7437 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7438 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7439 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7440 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7441 iemFpuMaybePopOne(pFpuCtx);
7442}
7443
7444
7445/**
7446 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7447 *
7448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7449 * @param u16FSW The FSW from the current instruction.
7450 * @param iEffSeg The effective memory operand selector register.
7451 * @param GCPtrEff The effective memory operand offset.
7452 */
7453IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7454{
7455 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7456 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7457 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7458 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7459 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7460}
7461
7462
7463/**
7464 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7465 *
7466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7467 * @param u16FSW The FSW from the current instruction.
7468 */
7469IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7470{
7471 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7472 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7473 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7474 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7475 iemFpuMaybePopOne(pFpuCtx);
7476 iemFpuMaybePopOne(pFpuCtx);
7477}
7478
7479
7480/**
7481 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7482 *
7483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7484 * @param u16FSW The FSW from the current instruction.
7485 * @param iEffSeg The effective memory operand selector register.
7486 * @param GCPtrEff The effective memory operand offset.
7487 */
7488IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7489{
7490 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7491 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7492 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7493 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7494 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7495 iemFpuMaybePopOne(pFpuCtx);
7496}
7497
7498
7499/**
7500 * Worker routine for raising an FPU stack underflow exception.
7501 *
7502 * @param pFpuCtx The FPU context.
7503 * @param iStReg The stack register being accessed.
7504 */
7505IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7506{
7507 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7508 if (pFpuCtx->FCW & X86_FCW_IM)
7509 {
7510 /* Masked underflow. */
7511 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7512 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7513 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7514 if (iStReg != UINT8_MAX)
7515 {
7516 pFpuCtx->FTW |= RT_BIT(iReg);
7517 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7518 }
7519 }
7520 else
7521 {
7522 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7523 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7524 }
7525}
7526
7527
7528/**
7529 * Raises a FPU stack underflow exception.
7530 *
7531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7532 * @param iStReg The destination register that should be loaded
7533 * with QNaN if \#IS is not masked. Specify
7534 * UINT8_MAX if none (like for fcom).
7535 */
7536DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7537{
7538 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7539 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7540 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7541 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7542}
7543
7544
7545DECL_NO_INLINE(IEM_STATIC, void)
7546iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7547{
7548 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7549 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7550 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7551 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7552 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7553}
7554
7555
7556DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7557{
7558 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7559 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7560 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7561 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7562 iemFpuMaybePopOne(pFpuCtx);
7563}
7564
7565
7566DECL_NO_INLINE(IEM_STATIC, void)
7567iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7568{
7569 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7570 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7571 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7572 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7573 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7574 iemFpuMaybePopOne(pFpuCtx);
7575}
7576
7577
7578DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7579{
7580 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7581 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7582 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7583 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7584 iemFpuMaybePopOne(pFpuCtx);
7585 iemFpuMaybePopOne(pFpuCtx);
7586}
7587
7588
7589DECL_NO_INLINE(IEM_STATIC, void)
7590iemFpuStackPushUnderflow(PVMCPU pVCpu)
7591{
7592 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7593 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7594 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7595
7596 if (pFpuCtx->FCW & X86_FCW_IM)
7597 {
7598 /* Masked overflow - Push QNaN. */
7599 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7600 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7601 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7602 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7603 pFpuCtx->FTW |= RT_BIT(iNewTop);
7604 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7605 iemFpuRotateStackPush(pFpuCtx);
7606 }
7607 else
7608 {
7609 /* Exception pending - don't change TOP or the register stack. */
7610 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7611 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7612 }
7613}
7614
7615
7616DECL_NO_INLINE(IEM_STATIC, void)
7617iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7618{
7619 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7620 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7621 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7622
7623 if (pFpuCtx->FCW & X86_FCW_IM)
7624 {
7625 /* Masked overflow - Push QNaN. */
7626 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7627 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7628 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7629 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7630 pFpuCtx->FTW |= RT_BIT(iNewTop);
7631 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7632 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7633 iemFpuRotateStackPush(pFpuCtx);
7634 }
7635 else
7636 {
7637 /* Exception pending - don't change TOP or the register stack. */
7638 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7639 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7640 }
7641}
7642
7643
7644/**
7645 * Worker routine for raising an FPU stack overflow exception on a push.
7646 *
7647 * @param pFpuCtx The FPU context.
7648 */
7649IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7650{
7651 if (pFpuCtx->FCW & X86_FCW_IM)
7652 {
7653 /* Masked overflow. */
7654 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7655 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7656 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7657 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7658 pFpuCtx->FTW |= RT_BIT(iNewTop);
7659 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7660 iemFpuRotateStackPush(pFpuCtx);
7661 }
7662 else
7663 {
7664 /* Exception pending - don't change TOP or the register stack. */
7665 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7666 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7667 }
7668}
7669
7670
7671/**
7672 * Raises a FPU stack overflow exception on a push.
7673 *
7674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7675 */
7676DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7677{
7678 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7679 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7680 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7681 iemFpuStackPushOverflowOnly(pFpuCtx);
7682}
7683
7684
7685/**
7686 * Raises a FPU stack overflow exception on a push with a memory operand.
7687 *
7688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7689 * @param iEffSeg The effective memory operand selector register.
7690 * @param GCPtrEff The effective memory operand offset.
7691 */
7692DECL_NO_INLINE(IEM_STATIC, void)
7693iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7694{
7695 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7696 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7697 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7698 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7699 iemFpuStackPushOverflowOnly(pFpuCtx);
7700}
7701
7702
7703IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7704{
7705 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7706 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7707 if (pFpuCtx->FTW & RT_BIT(iReg))
7708 return VINF_SUCCESS;
7709 return VERR_NOT_FOUND;
7710}
7711
7712
7713IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7714{
7715 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7716 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7717 if (pFpuCtx->FTW & RT_BIT(iReg))
7718 {
7719 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7720 return VINF_SUCCESS;
7721 }
7722 return VERR_NOT_FOUND;
7723}
7724
7725
7726IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7727 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7728{
7729 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7730 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7731 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7732 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7733 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7734 {
7735 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7736 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7737 return VINF_SUCCESS;
7738 }
7739 return VERR_NOT_FOUND;
7740}
7741
7742
7743IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7744{
7745 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7746 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7747 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7748 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7749 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7750 {
7751 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7752 return VINF_SUCCESS;
7753 }
7754 return VERR_NOT_FOUND;
7755}
7756
7757
7758/**
7759 * Updates the FPU exception status after FCW is changed.
7760 *
7761 * @param pFpuCtx The FPU context.
7762 */
7763IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7764{
7765 uint16_t u16Fsw = pFpuCtx->FSW;
7766 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7767 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7768 else
7769 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7770 pFpuCtx->FSW = u16Fsw;
7771}
7772
7773
7774/**
7775 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7776 *
7777 * @returns The full FTW.
7778 * @param pFpuCtx The FPU context.
7779 */
7780IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7781{
7782 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7783 uint16_t u16Ftw = 0;
7784 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7785 for (unsigned iSt = 0; iSt < 8; iSt++)
7786 {
7787 unsigned const iReg = (iSt + iTop) & 7;
7788 if (!(u8Ftw & RT_BIT(iReg)))
7789 u16Ftw |= 3 << (iReg * 2); /* empty */
7790 else
7791 {
7792 uint16_t uTag;
7793 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7794 if (pr80Reg->s.uExponent == 0x7fff)
7795 uTag = 2; /* Exponent is all 1's => Special. */
7796 else if (pr80Reg->s.uExponent == 0x0000)
7797 {
7798 if (pr80Reg->s.u64Mantissa == 0x0000)
7799 uTag = 1; /* All bits are zero => Zero. */
7800 else
7801 uTag = 2; /* Must be special. */
7802 }
7803 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7804 uTag = 0; /* Valid. */
7805 else
7806 uTag = 2; /* Must be special. */
7807
7808 u16Ftw |= uTag << (iReg * 2); /* empty */
7809 }
7810 }
7811
7812 return u16Ftw;
7813}
7814
7815
7816/**
7817 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7818 *
7819 * @returns The compressed FTW.
7820 * @param u16FullFtw The full FTW to convert.
7821 */
7822IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7823{
7824 uint8_t u8Ftw = 0;
7825 for (unsigned i = 0; i < 8; i++)
7826 {
7827 if ((u16FullFtw & 3) != 3 /*empty*/)
7828 u8Ftw |= RT_BIT(i);
7829 u16FullFtw >>= 2;
7830 }
7831
7832 return u8Ftw;
7833}
7834
7835/** @} */
7836
7837
7838/** @name Memory access.
7839 *
7840 * @{
7841 */
7842
7843
7844/**
7845 * Updates the IEMCPU::cbWritten counter if applicable.
7846 *
7847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7848 * @param fAccess The access being accounted for.
7849 * @param cbMem The access size.
7850 */
7851DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7852{
7853 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7854 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7855 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7856}
7857
7858
7859/**
7860 * Checks if the given segment can be written to, raise the appropriate
7861 * exception if not.
7862 *
7863 * @returns VBox strict status code.
7864 *
7865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7866 * @param pHid Pointer to the hidden register.
7867 * @param iSegReg The register number.
7868 * @param pu64BaseAddr Where to return the base address to use for the
7869 * segment. (In 64-bit code it may differ from the
7870 * base in the hidden segment.)
7871 */
7872IEM_STATIC VBOXSTRICTRC
7873iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7874{
7875 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7876 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7877 else
7878 {
7879 if (!pHid->Attr.n.u1Present)
7880 {
7881 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7882 AssertRelease(uSel == 0);
7883 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7884 return iemRaiseGeneralProtectionFault0(pVCpu);
7885 }
7886
7887 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7888 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7889 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7890 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7891 *pu64BaseAddr = pHid->u64Base;
7892 }
7893 return VINF_SUCCESS;
7894}
7895
7896
7897/**
7898 * Checks if the given segment can be read from, raise the appropriate
7899 * exception if not.
7900 *
7901 * @returns VBox strict status code.
7902 *
7903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7904 * @param pHid Pointer to the hidden register.
7905 * @param iSegReg The register number.
7906 * @param pu64BaseAddr Where to return the base address to use for the
7907 * segment. (In 64-bit code it may differ from the
7908 * base in the hidden segment.)
7909 */
7910IEM_STATIC VBOXSTRICTRC
7911iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7912{
7913 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7914 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7915 else
7916 {
7917 if (!pHid->Attr.n.u1Present)
7918 {
7919 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7920 AssertRelease(uSel == 0);
7921 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7922 return iemRaiseGeneralProtectionFault0(pVCpu);
7923 }
7924
7925 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7926 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7927 *pu64BaseAddr = pHid->u64Base;
7928 }
7929 return VINF_SUCCESS;
7930}
7931
7932
7933/**
7934 * Applies the segment limit, base and attributes.
7935 *
7936 * This may raise a \#GP or \#SS.
7937 *
7938 * @returns VBox strict status code.
7939 *
7940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7941 * @param fAccess The kind of access which is being performed.
7942 * @param iSegReg The index of the segment register to apply.
7943 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7944 * TSS, ++).
7945 * @param cbMem The access size.
7946 * @param pGCPtrMem Pointer to the guest memory address to apply
7947 * segmentation to. Input and output parameter.
7948 */
7949IEM_STATIC VBOXSTRICTRC
7950iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7951{
7952 if (iSegReg == UINT8_MAX)
7953 return VINF_SUCCESS;
7954
7955 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7956 switch (pVCpu->iem.s.enmCpuMode)
7957 {
7958 case IEMMODE_16BIT:
7959 case IEMMODE_32BIT:
7960 {
7961 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7962 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7963
7964 if ( pSel->Attr.n.u1Present
7965 && !pSel->Attr.n.u1Unusable)
7966 {
7967 Assert(pSel->Attr.n.u1DescType);
7968 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7969 {
7970 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7971 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7972 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7973
7974 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7975 {
7976 /** @todo CPL check. */
7977 }
7978
7979 /*
7980 * There are two kinds of data selectors, normal and expand down.
7981 */
7982 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7983 {
7984 if ( GCPtrFirst32 > pSel->u32Limit
7985 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7986 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7987 }
7988 else
7989 {
7990 /*
7991 * The upper boundary is defined by the B bit, not the G bit!
7992 */
7993 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7994 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7995 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7996 }
7997 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7998 }
7999 else
8000 {
8001
8002 /*
8003 * Code selector and usually be used to read thru, writing is
8004 * only permitted in real and V8086 mode.
8005 */
8006 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8007 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8008 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8009 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8010 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8011
8012 if ( GCPtrFirst32 > pSel->u32Limit
8013 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8014 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8015
8016 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8017 {
8018 /** @todo CPL check. */
8019 }
8020
8021 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8022 }
8023 }
8024 else
8025 return iemRaiseGeneralProtectionFault0(pVCpu);
8026 return VINF_SUCCESS;
8027 }
8028
8029 case IEMMODE_64BIT:
8030 {
8031 RTGCPTR GCPtrMem = *pGCPtrMem;
8032 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8033 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8034
8035 Assert(cbMem >= 1);
8036 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8037 return VINF_SUCCESS;
8038 return iemRaiseGeneralProtectionFault0(pVCpu);
8039 }
8040
8041 default:
8042 AssertFailedReturn(VERR_IEM_IPE_7);
8043 }
8044}
8045
8046
8047/**
8048 * Translates a virtual address to a physical physical address and checks if we
8049 * can access the page as specified.
8050 *
8051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8052 * @param GCPtrMem The virtual address.
8053 * @param fAccess The intended access.
8054 * @param pGCPhysMem Where to return the physical address.
8055 */
8056IEM_STATIC VBOXSTRICTRC
8057iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8058{
8059 /** @todo Need a different PGM interface here. We're currently using
8060 * generic / REM interfaces. this won't cut it for R0 & RC. */
8061 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8062 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8063 RTGCPHYS GCPhys;
8064 uint64_t fFlags;
8065 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8066 if (RT_FAILURE(rc))
8067 {
8068 /** @todo Check unassigned memory in unpaged mode. */
8069 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8070 *pGCPhysMem = NIL_RTGCPHYS;
8071 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8072 }
8073
8074 /* If the page is writable and does not have the no-exec bit set, all
8075 access is allowed. Otherwise we'll have to check more carefully... */
8076 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8077 {
8078 /* Write to read only memory? */
8079 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8080 && !(fFlags & X86_PTE_RW)
8081 && ( (pVCpu->iem.s.uCpl == 3
8082 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8083 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8084 {
8085 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8086 *pGCPhysMem = NIL_RTGCPHYS;
8087 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8088 }
8089
8090 /* Kernel memory accessed by userland? */
8091 if ( !(fFlags & X86_PTE_US)
8092 && pVCpu->iem.s.uCpl == 3
8093 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8094 {
8095 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8096 *pGCPhysMem = NIL_RTGCPHYS;
8097 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8098 }
8099
8100 /* Executing non-executable memory? */
8101 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8102 && (fFlags & X86_PTE_PAE_NX)
8103 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8104 {
8105 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8106 *pGCPhysMem = NIL_RTGCPHYS;
8107 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8108 VERR_ACCESS_DENIED);
8109 }
8110 }
8111
8112 /*
8113 * Set the dirty / access flags.
8114 * ASSUMES this is set when the address is translated rather than on committ...
8115 */
8116 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8117 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8118 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8119 {
8120 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8121 AssertRC(rc2);
8122 }
8123
8124 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8125 *pGCPhysMem = GCPhys;
8126 return VINF_SUCCESS;
8127}
8128
8129
8130
8131/**
8132 * Maps a physical page.
8133 *
8134 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8136 * @param GCPhysMem The physical address.
8137 * @param fAccess The intended access.
8138 * @param ppvMem Where to return the mapping address.
8139 * @param pLock The PGM lock.
8140 */
8141IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8142{
8143#ifdef IEM_VERIFICATION_MODE_FULL
8144 /* Force the alternative path so we can ignore writes. */
8145 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8146 {
8147 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8148 {
8149 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8150 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8151 if (RT_FAILURE(rc2))
8152 pVCpu->iem.s.fProblematicMemory = true;
8153 }
8154 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8155 }
8156#endif
8157#ifdef IEM_LOG_MEMORY_WRITES
8158 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8159 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8160#endif
8161#ifdef IEM_VERIFICATION_MODE_MINIMAL
8162 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8163#endif
8164
8165 /** @todo This API may require some improving later. A private deal with PGM
8166 * regarding locking and unlocking needs to be struct. A couple of TLBs
8167 * living in PGM, but with publicly accessible inlined access methods
8168 * could perhaps be an even better solution. */
8169 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8170 GCPhysMem,
8171 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8172 pVCpu->iem.s.fBypassHandlers,
8173 ppvMem,
8174 pLock);
8175 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8176 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8177
8178#ifdef IEM_VERIFICATION_MODE_FULL
8179 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8180 pVCpu->iem.s.fProblematicMemory = true;
8181#endif
8182 return rc;
8183}
8184
8185
8186/**
8187 * Unmap a page previously mapped by iemMemPageMap.
8188 *
8189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8190 * @param GCPhysMem The physical address.
8191 * @param fAccess The intended access.
8192 * @param pvMem What iemMemPageMap returned.
8193 * @param pLock The PGM lock.
8194 */
8195DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8196{
8197 NOREF(pVCpu);
8198 NOREF(GCPhysMem);
8199 NOREF(fAccess);
8200 NOREF(pvMem);
8201 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8202}
8203
8204
8205/**
8206 * Looks up a memory mapping entry.
8207 *
8208 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8210 * @param pvMem The memory address.
8211 * @param fAccess The access to.
8212 */
8213DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8214{
8215 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8216 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8217 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8218 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8219 return 0;
8220 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8221 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8222 return 1;
8223 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8224 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8225 return 2;
8226 return VERR_NOT_FOUND;
8227}
8228
8229
8230/**
8231 * Finds a free memmap entry when using iNextMapping doesn't work.
8232 *
8233 * @returns Memory mapping index, 1024 on failure.
8234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8235 */
8236IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8237{
8238 /*
8239 * The easy case.
8240 */
8241 if (pVCpu->iem.s.cActiveMappings == 0)
8242 {
8243 pVCpu->iem.s.iNextMapping = 1;
8244 return 0;
8245 }
8246
8247 /* There should be enough mappings for all instructions. */
8248 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8249
8250 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8251 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8252 return i;
8253
8254 AssertFailedReturn(1024);
8255}
8256
8257
8258/**
8259 * Commits a bounce buffer that needs writing back and unmaps it.
8260 *
8261 * @returns Strict VBox status code.
8262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8263 * @param iMemMap The index of the buffer to commit.
8264 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8265 * Always false in ring-3, obviously.
8266 */
8267IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8268{
8269 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8270 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8271#ifdef IN_RING3
8272 Assert(!fPostponeFail);
8273 RT_NOREF_PV(fPostponeFail);
8274#endif
8275
8276 /*
8277 * Do the writing.
8278 */
8279#ifndef IEM_VERIFICATION_MODE_MINIMAL
8280 PVM pVM = pVCpu->CTX_SUFF(pVM);
8281 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8282 && !IEM_VERIFICATION_ENABLED(pVCpu))
8283 {
8284 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8285 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8286 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8287 if (!pVCpu->iem.s.fBypassHandlers)
8288 {
8289 /*
8290 * Carefully and efficiently dealing with access handler return
8291 * codes make this a little bloated.
8292 */
8293 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8294 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8295 pbBuf,
8296 cbFirst,
8297 PGMACCESSORIGIN_IEM);
8298 if (rcStrict == VINF_SUCCESS)
8299 {
8300 if (cbSecond)
8301 {
8302 rcStrict = PGMPhysWrite(pVM,
8303 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8304 pbBuf + cbFirst,
8305 cbSecond,
8306 PGMACCESSORIGIN_IEM);
8307 if (rcStrict == VINF_SUCCESS)
8308 { /* nothing */ }
8309 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8310 {
8311 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8312 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8313 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8314 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8315 }
8316# ifndef IN_RING3
8317 else if (fPostponeFail)
8318 {
8319 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8320 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8321 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8322 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8323 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8324 return iemSetPassUpStatus(pVCpu, rcStrict);
8325 }
8326# endif
8327 else
8328 {
8329 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8330 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8331 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8332 return rcStrict;
8333 }
8334 }
8335 }
8336 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8337 {
8338 if (!cbSecond)
8339 {
8340 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8341 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8342 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8343 }
8344 else
8345 {
8346 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8347 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8348 pbBuf + cbFirst,
8349 cbSecond,
8350 PGMACCESSORIGIN_IEM);
8351 if (rcStrict2 == VINF_SUCCESS)
8352 {
8353 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8354 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8355 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8356 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8357 }
8358 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8359 {
8360 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8361 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8362 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8363 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8364 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8365 }
8366# ifndef IN_RING3
8367 else if (fPostponeFail)
8368 {
8369 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8370 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8371 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8372 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8373 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8374 return iemSetPassUpStatus(pVCpu, rcStrict);
8375 }
8376# endif
8377 else
8378 {
8379 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8380 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8381 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8382 return rcStrict2;
8383 }
8384 }
8385 }
8386# ifndef IN_RING3
8387 else if (fPostponeFail)
8388 {
8389 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8390 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8392 if (!cbSecond)
8393 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8394 else
8395 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8396 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8397 return iemSetPassUpStatus(pVCpu, rcStrict);
8398 }
8399# endif
8400 else
8401 {
8402 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8405 return rcStrict;
8406 }
8407 }
8408 else
8409 {
8410 /*
8411 * No access handlers, much simpler.
8412 */
8413 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8414 if (RT_SUCCESS(rc))
8415 {
8416 if (cbSecond)
8417 {
8418 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8419 if (RT_SUCCESS(rc))
8420 { /* likely */ }
8421 else
8422 {
8423 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8425 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8426 return rc;
8427 }
8428 }
8429 }
8430 else
8431 {
8432 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8435 return rc;
8436 }
8437 }
8438 }
8439#endif
8440
8441#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8442 /*
8443 * Record the write(s).
8444 */
8445 if (!pVCpu->iem.s.fNoRem)
8446 {
8447 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8448 if (pEvtRec)
8449 {
8450 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8451 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8452 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8453 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8454 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8455 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8456 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8457 }
8458 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8459 {
8460 pEvtRec = iemVerifyAllocRecord(pVCpu);
8461 if (pEvtRec)
8462 {
8463 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8464 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8465 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8466 memcpy(pEvtRec->u.RamWrite.ab,
8467 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8468 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8469 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8470 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8471 }
8472 }
8473 }
8474#endif
8475#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8476 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8477 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8478 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8479 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8480 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8481 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8482
8483 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8484 g_cbIemWrote = cbWrote;
8485 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8486#endif
8487
8488 /*
8489 * Free the mapping entry.
8490 */
8491 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8492 Assert(pVCpu->iem.s.cActiveMappings != 0);
8493 pVCpu->iem.s.cActiveMappings--;
8494 return VINF_SUCCESS;
8495}
8496
8497
8498/**
8499 * iemMemMap worker that deals with a request crossing pages.
8500 */
8501IEM_STATIC VBOXSTRICTRC
8502iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8503{
8504 /*
8505 * Do the address translations.
8506 */
8507 RTGCPHYS GCPhysFirst;
8508 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8509 if (rcStrict != VINF_SUCCESS)
8510 return rcStrict;
8511
8512 RTGCPHYS GCPhysSecond;
8513 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8514 fAccess, &GCPhysSecond);
8515 if (rcStrict != VINF_SUCCESS)
8516 return rcStrict;
8517 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8518
8519 PVM pVM = pVCpu->CTX_SUFF(pVM);
8520#ifdef IEM_VERIFICATION_MODE_FULL
8521 /*
8522 * Detect problematic memory when verifying so we can select
8523 * the right execution engine. (TLB: Redo this.)
8524 */
8525 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8526 {
8527 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8528 if (RT_SUCCESS(rc2))
8529 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8530 if (RT_FAILURE(rc2))
8531 pVCpu->iem.s.fProblematicMemory = true;
8532 }
8533#endif
8534
8535
8536 /*
8537 * Read in the current memory content if it's a read, execute or partial
8538 * write access.
8539 */
8540 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8541 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8542 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8543
8544 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8545 {
8546 if (!pVCpu->iem.s.fBypassHandlers)
8547 {
8548 /*
8549 * Must carefully deal with access handler status codes here,
8550 * makes the code a bit bloated.
8551 */
8552 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8553 if (rcStrict == VINF_SUCCESS)
8554 {
8555 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8556 if (rcStrict == VINF_SUCCESS)
8557 { /*likely */ }
8558 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8559 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8560 else
8561 {
8562 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8563 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8564 return rcStrict;
8565 }
8566 }
8567 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8568 {
8569 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8570 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8571 {
8572 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8573 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8574 }
8575 else
8576 {
8577 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8578 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8579 return rcStrict2;
8580 }
8581 }
8582 else
8583 {
8584 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8585 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8586 return rcStrict;
8587 }
8588 }
8589 else
8590 {
8591 /*
8592 * No informational status codes here, much more straight forward.
8593 */
8594 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8595 if (RT_SUCCESS(rc))
8596 {
8597 Assert(rc == VINF_SUCCESS);
8598 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8599 if (RT_SUCCESS(rc))
8600 Assert(rc == VINF_SUCCESS);
8601 else
8602 {
8603 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8604 return rc;
8605 }
8606 }
8607 else
8608 {
8609 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8610 return rc;
8611 }
8612 }
8613
8614#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8615 if ( !pVCpu->iem.s.fNoRem
8616 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8617 {
8618 /*
8619 * Record the reads.
8620 */
8621 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8622 if (pEvtRec)
8623 {
8624 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8625 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8626 pEvtRec->u.RamRead.cb = cbFirstPage;
8627 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8628 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8629 }
8630 pEvtRec = iemVerifyAllocRecord(pVCpu);
8631 if (pEvtRec)
8632 {
8633 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8634 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8635 pEvtRec->u.RamRead.cb = cbSecondPage;
8636 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8637 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8638 }
8639 }
8640#endif
8641 }
8642#ifdef VBOX_STRICT
8643 else
8644 memset(pbBuf, 0xcc, cbMem);
8645 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8646 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8647#endif
8648
8649 /*
8650 * Commit the bounce buffer entry.
8651 */
8652 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8653 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8654 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8655 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8656 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8657 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8658 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8659 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8660 pVCpu->iem.s.cActiveMappings++;
8661
8662 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8663 *ppvMem = pbBuf;
8664 return VINF_SUCCESS;
8665}
8666
8667
8668/**
8669 * iemMemMap woker that deals with iemMemPageMap failures.
8670 */
8671IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8672 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8673{
8674 /*
8675 * Filter out conditions we can handle and the ones which shouldn't happen.
8676 */
8677 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8678 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8679 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8680 {
8681 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8682 return rcMap;
8683 }
8684 pVCpu->iem.s.cPotentialExits++;
8685
8686 /*
8687 * Read in the current memory content if it's a read, execute or partial
8688 * write access.
8689 */
8690 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8691 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8692 {
8693 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8694 memset(pbBuf, 0xff, cbMem);
8695 else
8696 {
8697 int rc;
8698 if (!pVCpu->iem.s.fBypassHandlers)
8699 {
8700 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8701 if (rcStrict == VINF_SUCCESS)
8702 { /* nothing */ }
8703 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8704 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8705 else
8706 {
8707 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8708 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8709 return rcStrict;
8710 }
8711 }
8712 else
8713 {
8714 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8715 if (RT_SUCCESS(rc))
8716 { /* likely */ }
8717 else
8718 {
8719 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8720 GCPhysFirst, rc));
8721 return rc;
8722 }
8723 }
8724 }
8725
8726#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8727 if ( !pVCpu->iem.s.fNoRem
8728 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8729 {
8730 /*
8731 * Record the read.
8732 */
8733 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8734 if (pEvtRec)
8735 {
8736 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8737 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8738 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8739 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8740 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8741 }
8742 }
8743#endif
8744 }
8745#ifdef VBOX_STRICT
8746 else
8747 memset(pbBuf, 0xcc, cbMem);
8748#endif
8749#ifdef VBOX_STRICT
8750 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8751 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8752#endif
8753
8754 /*
8755 * Commit the bounce buffer entry.
8756 */
8757 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8758 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8759 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8760 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8761 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8762 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8763 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8764 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8765 pVCpu->iem.s.cActiveMappings++;
8766
8767 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8768 *ppvMem = pbBuf;
8769 return VINF_SUCCESS;
8770}
8771
8772
8773
8774/**
8775 * Maps the specified guest memory for the given kind of access.
8776 *
8777 * This may be using bounce buffering of the memory if it's crossing a page
8778 * boundary or if there is an access handler installed for any of it. Because
8779 * of lock prefix guarantees, we're in for some extra clutter when this
8780 * happens.
8781 *
8782 * This may raise a \#GP, \#SS, \#PF or \#AC.
8783 *
8784 * @returns VBox strict status code.
8785 *
8786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8787 * @param ppvMem Where to return the pointer to the mapped
8788 * memory.
8789 * @param cbMem The number of bytes to map. This is usually 1,
8790 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8791 * string operations it can be up to a page.
8792 * @param iSegReg The index of the segment register to use for
8793 * this access. The base and limits are checked.
8794 * Use UINT8_MAX to indicate that no segmentation
8795 * is required (for IDT, GDT and LDT accesses).
8796 * @param GCPtrMem The address of the guest memory.
8797 * @param fAccess How the memory is being accessed. The
8798 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8799 * how to map the memory, while the
8800 * IEM_ACCESS_WHAT_XXX bit is used when raising
8801 * exceptions.
8802 */
8803IEM_STATIC VBOXSTRICTRC
8804iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8805{
8806 /*
8807 * Check the input and figure out which mapping entry to use.
8808 */
8809 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8810 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8811 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8812
8813 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8814 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8815 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8816 {
8817 iMemMap = iemMemMapFindFree(pVCpu);
8818 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8819 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8820 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8821 pVCpu->iem.s.aMemMappings[2].fAccess),
8822 VERR_IEM_IPE_9);
8823 }
8824
8825 /*
8826 * Map the memory, checking that we can actually access it. If something
8827 * slightly complicated happens, fall back on bounce buffering.
8828 */
8829 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8830 if (rcStrict != VINF_SUCCESS)
8831 return rcStrict;
8832
8833 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8834 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8835
8836 RTGCPHYS GCPhysFirst;
8837 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8838 if (rcStrict != VINF_SUCCESS)
8839 return rcStrict;
8840
8841 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8842 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8843 if (fAccess & IEM_ACCESS_TYPE_READ)
8844 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8845
8846 void *pvMem;
8847 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8848 if (rcStrict != VINF_SUCCESS)
8849 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8850
8851 /*
8852 * Fill in the mapping table entry.
8853 */
8854 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8855 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8856 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8857 pVCpu->iem.s.cActiveMappings++;
8858
8859 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8860 *ppvMem = pvMem;
8861 return VINF_SUCCESS;
8862}
8863
8864
8865/**
8866 * Commits the guest memory if bounce buffered and unmaps it.
8867 *
8868 * @returns Strict VBox status code.
8869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8870 * @param pvMem The mapping.
8871 * @param fAccess The kind of access.
8872 */
8873IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8874{
8875 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8876 AssertReturn(iMemMap >= 0, iMemMap);
8877
8878 /* If it's bounce buffered, we may need to write back the buffer. */
8879 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8880 {
8881 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8882 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8883 }
8884 /* Otherwise unlock it. */
8885 else
8886 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8887
8888 /* Free the entry. */
8889 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8890 Assert(pVCpu->iem.s.cActiveMappings != 0);
8891 pVCpu->iem.s.cActiveMappings--;
8892 return VINF_SUCCESS;
8893}
8894
8895#ifdef IEM_WITH_SETJMP
8896
8897/**
8898 * Maps the specified guest memory for the given kind of access, longjmp on
8899 * error.
8900 *
8901 * This may be using bounce buffering of the memory if it's crossing a page
8902 * boundary or if there is an access handler installed for any of it. Because
8903 * of lock prefix guarantees, we're in for some extra clutter when this
8904 * happens.
8905 *
8906 * This may raise a \#GP, \#SS, \#PF or \#AC.
8907 *
8908 * @returns Pointer to the mapped memory.
8909 *
8910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8911 * @param cbMem The number of bytes to map. This is usually 1,
8912 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8913 * string operations it can be up to a page.
8914 * @param iSegReg The index of the segment register to use for
8915 * this access. The base and limits are checked.
8916 * Use UINT8_MAX to indicate that no segmentation
8917 * is required (for IDT, GDT and LDT accesses).
8918 * @param GCPtrMem The address of the guest memory.
8919 * @param fAccess How the memory is being accessed. The
8920 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8921 * how to map the memory, while the
8922 * IEM_ACCESS_WHAT_XXX bit is used when raising
8923 * exceptions.
8924 */
8925IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8926{
8927 /*
8928 * Check the input and figure out which mapping entry to use.
8929 */
8930 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8931 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8932 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8933
8934 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8935 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8936 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8937 {
8938 iMemMap = iemMemMapFindFree(pVCpu);
8939 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8940 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8941 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8942 pVCpu->iem.s.aMemMappings[2].fAccess),
8943 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8944 }
8945
8946 /*
8947 * Map the memory, checking that we can actually access it. If something
8948 * slightly complicated happens, fall back on bounce buffering.
8949 */
8950 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8951 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8952 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8953
8954 /* Crossing a page boundary? */
8955 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8956 { /* No (likely). */ }
8957 else
8958 {
8959 void *pvMem;
8960 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8961 if (rcStrict == VINF_SUCCESS)
8962 return pvMem;
8963 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8964 }
8965
8966 RTGCPHYS GCPhysFirst;
8967 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8968 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8969 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8970
8971 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8972 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8973 if (fAccess & IEM_ACCESS_TYPE_READ)
8974 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8975
8976 void *pvMem;
8977 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8978 if (rcStrict == VINF_SUCCESS)
8979 { /* likely */ }
8980 else
8981 {
8982 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8983 if (rcStrict == VINF_SUCCESS)
8984 return pvMem;
8985 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8986 }
8987
8988 /*
8989 * Fill in the mapping table entry.
8990 */
8991 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8992 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8993 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8994 pVCpu->iem.s.cActiveMappings++;
8995
8996 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8997 return pvMem;
8998}
8999
9000
9001/**
9002 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9003 *
9004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9005 * @param pvMem The mapping.
9006 * @param fAccess The kind of access.
9007 */
9008IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9009{
9010 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9011 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9012
9013 /* If it's bounce buffered, we may need to write back the buffer. */
9014 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9015 {
9016 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9017 {
9018 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9019 if (rcStrict == VINF_SUCCESS)
9020 return;
9021 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9022 }
9023 }
9024 /* Otherwise unlock it. */
9025 else
9026 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9027
9028 /* Free the entry. */
9029 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9030 Assert(pVCpu->iem.s.cActiveMappings != 0);
9031 pVCpu->iem.s.cActiveMappings--;
9032}
9033
9034#endif
9035
9036#ifndef IN_RING3
9037/**
9038 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9039 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9040 *
9041 * Allows the instruction to be completed and retired, while the IEM user will
9042 * return to ring-3 immediately afterwards and do the postponed writes there.
9043 *
9044 * @returns VBox status code (no strict statuses). Caller must check
9045 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9047 * @param pvMem The mapping.
9048 * @param fAccess The kind of access.
9049 */
9050IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9051{
9052 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9053 AssertReturn(iMemMap >= 0, iMemMap);
9054
9055 /* If it's bounce buffered, we may need to write back the buffer. */
9056 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9057 {
9058 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9059 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9060 }
9061 /* Otherwise unlock it. */
9062 else
9063 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9064
9065 /* Free the entry. */
9066 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9067 Assert(pVCpu->iem.s.cActiveMappings != 0);
9068 pVCpu->iem.s.cActiveMappings--;
9069 return VINF_SUCCESS;
9070}
9071#endif
9072
9073
9074/**
9075 * Rollbacks mappings, releasing page locks and such.
9076 *
9077 * The caller shall only call this after checking cActiveMappings.
9078 *
9079 * @returns Strict VBox status code to pass up.
9080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9081 */
9082IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9083{
9084 Assert(pVCpu->iem.s.cActiveMappings > 0);
9085
9086 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9087 while (iMemMap-- > 0)
9088 {
9089 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9090 if (fAccess != IEM_ACCESS_INVALID)
9091 {
9092 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9093 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9094 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9095 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9096 Assert(pVCpu->iem.s.cActiveMappings > 0);
9097 pVCpu->iem.s.cActiveMappings--;
9098 }
9099 }
9100}
9101
9102
9103/**
9104 * Fetches a data byte.
9105 *
9106 * @returns Strict VBox status code.
9107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9108 * @param pu8Dst Where to return the byte.
9109 * @param iSegReg The index of the segment register to use for
9110 * this access. The base and limits are checked.
9111 * @param GCPtrMem The address of the guest memory.
9112 */
9113IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9114{
9115 /* The lazy approach for now... */
9116 uint8_t const *pu8Src;
9117 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9118 if (rc == VINF_SUCCESS)
9119 {
9120 *pu8Dst = *pu8Src;
9121 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9122 }
9123 return rc;
9124}
9125
9126
9127#ifdef IEM_WITH_SETJMP
9128/**
9129 * Fetches a data byte, longjmp on error.
9130 *
9131 * @returns The byte.
9132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9133 * @param iSegReg The index of the segment register to use for
9134 * this access. The base and limits are checked.
9135 * @param GCPtrMem The address of the guest memory.
9136 */
9137DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9138{
9139 /* The lazy approach for now... */
9140 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9141 uint8_t const bRet = *pu8Src;
9142 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9143 return bRet;
9144}
9145#endif /* IEM_WITH_SETJMP */
9146
9147
9148/**
9149 * Fetches a data word.
9150 *
9151 * @returns Strict VBox status code.
9152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9153 * @param pu16Dst Where to return the word.
9154 * @param iSegReg The index of the segment register to use for
9155 * this access. The base and limits are checked.
9156 * @param GCPtrMem The address of the guest memory.
9157 */
9158IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9159{
9160 /* The lazy approach for now... */
9161 uint16_t const *pu16Src;
9162 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9163 if (rc == VINF_SUCCESS)
9164 {
9165 *pu16Dst = *pu16Src;
9166 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9167 }
9168 return rc;
9169}
9170
9171
9172#ifdef IEM_WITH_SETJMP
9173/**
9174 * Fetches a data word, longjmp on error.
9175 *
9176 * @returns The word
9177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9178 * @param iSegReg The index of the segment register to use for
9179 * this access. The base and limits are checked.
9180 * @param GCPtrMem The address of the guest memory.
9181 */
9182DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9183{
9184 /* The lazy approach for now... */
9185 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9186 uint16_t const u16Ret = *pu16Src;
9187 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9188 return u16Ret;
9189}
9190#endif
9191
9192
9193/**
9194 * Fetches a data dword.
9195 *
9196 * @returns Strict VBox status code.
9197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9198 * @param pu32Dst Where to return the dword.
9199 * @param iSegReg The index of the segment register to use for
9200 * this access. The base and limits are checked.
9201 * @param GCPtrMem The address of the guest memory.
9202 */
9203IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9204{
9205 /* The lazy approach for now... */
9206 uint32_t const *pu32Src;
9207 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9208 if (rc == VINF_SUCCESS)
9209 {
9210 *pu32Dst = *pu32Src;
9211 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9212 }
9213 return rc;
9214}
9215
9216
9217#ifdef IEM_WITH_SETJMP
9218
9219IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9220{
9221 Assert(cbMem >= 1);
9222 Assert(iSegReg < X86_SREG_COUNT);
9223
9224 /*
9225 * 64-bit mode is simpler.
9226 */
9227 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9228 {
9229 if (iSegReg >= X86_SREG_FS)
9230 {
9231 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9232 GCPtrMem += pSel->u64Base;
9233 }
9234
9235 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9236 return GCPtrMem;
9237 }
9238 /*
9239 * 16-bit and 32-bit segmentation.
9240 */
9241 else
9242 {
9243 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9244 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9245 == X86DESCATTR_P /* data, expand up */
9246 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9247 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9248 {
9249 /* expand up */
9250 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9251 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9252 && GCPtrLast32 > (uint32_t)GCPtrMem))
9253 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9254 }
9255 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9256 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9257 {
9258 /* expand down */
9259 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9260 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9261 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9262 && GCPtrLast32 > (uint32_t)GCPtrMem))
9263 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9264 }
9265 else
9266 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9267 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9268 }
9269 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9270}
9271
9272
9273IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9274{
9275 Assert(cbMem >= 1);
9276 Assert(iSegReg < X86_SREG_COUNT);
9277
9278 /*
9279 * 64-bit mode is simpler.
9280 */
9281 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9282 {
9283 if (iSegReg >= X86_SREG_FS)
9284 {
9285 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9286 GCPtrMem += pSel->u64Base;
9287 }
9288
9289 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9290 return GCPtrMem;
9291 }
9292 /*
9293 * 16-bit and 32-bit segmentation.
9294 */
9295 else
9296 {
9297 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9298 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9299 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9300 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9301 {
9302 /* expand up */
9303 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9304 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9305 && GCPtrLast32 > (uint32_t)GCPtrMem))
9306 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9307 }
9308 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9309 {
9310 /* expand down */
9311 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9312 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9313 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9314 && GCPtrLast32 > (uint32_t)GCPtrMem))
9315 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9316 }
9317 else
9318 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9319 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9320 }
9321 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9322}
9323
9324
9325/**
9326 * Fetches a data dword, longjmp on error, fallback/safe version.
9327 *
9328 * @returns The dword
9329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9330 * @param iSegReg The index of the segment register to use for
9331 * this access. The base and limits are checked.
9332 * @param GCPtrMem The address of the guest memory.
9333 */
9334IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9335{
9336 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9337 uint32_t const u32Ret = *pu32Src;
9338 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9339 return u32Ret;
9340}
9341
9342
9343/**
9344 * Fetches a data dword, longjmp on error.
9345 *
9346 * @returns The dword
9347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9348 * @param iSegReg The index of the segment register to use for
9349 * this access. The base and limits are checked.
9350 * @param GCPtrMem The address of the guest memory.
9351 */
9352DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9353{
9354# ifdef IEM_WITH_DATA_TLB
9355 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9356 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9357 {
9358 /// @todo more later.
9359 }
9360
9361 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9362# else
9363 /* The lazy approach. */
9364 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9365 uint32_t const u32Ret = *pu32Src;
9366 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9367 return u32Ret;
9368# endif
9369}
9370#endif
9371
9372
9373#ifdef SOME_UNUSED_FUNCTION
9374/**
9375 * Fetches a data dword and sign extends it to a qword.
9376 *
9377 * @returns Strict VBox status code.
9378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9379 * @param pu64Dst Where to return the sign extended value.
9380 * @param iSegReg The index of the segment register to use for
9381 * this access. The base and limits are checked.
9382 * @param GCPtrMem The address of the guest memory.
9383 */
9384IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9385{
9386 /* The lazy approach for now... */
9387 int32_t const *pi32Src;
9388 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9389 if (rc == VINF_SUCCESS)
9390 {
9391 *pu64Dst = *pi32Src;
9392 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9393 }
9394#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9395 else
9396 *pu64Dst = 0;
9397#endif
9398 return rc;
9399}
9400#endif
9401
9402
9403/**
9404 * Fetches a data qword.
9405 *
9406 * @returns Strict VBox status code.
9407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9408 * @param pu64Dst Where to return the qword.
9409 * @param iSegReg The index of the segment register to use for
9410 * this access. The base and limits are checked.
9411 * @param GCPtrMem The address of the guest memory.
9412 */
9413IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9414{
9415 /* The lazy approach for now... */
9416 uint64_t const *pu64Src;
9417 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9418 if (rc == VINF_SUCCESS)
9419 {
9420 *pu64Dst = *pu64Src;
9421 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9422 }
9423 return rc;
9424}
9425
9426
9427#ifdef IEM_WITH_SETJMP
9428/**
9429 * Fetches a data qword, longjmp on error.
9430 *
9431 * @returns The qword.
9432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9433 * @param iSegReg The index of the segment register to use for
9434 * this access. The base and limits are checked.
9435 * @param GCPtrMem The address of the guest memory.
9436 */
9437DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9438{
9439 /* The lazy approach for now... */
9440 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9441 uint64_t const u64Ret = *pu64Src;
9442 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9443 return u64Ret;
9444}
9445#endif
9446
9447
9448/**
9449 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9450 *
9451 * @returns Strict VBox status code.
9452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9453 * @param pu64Dst Where to return the qword.
9454 * @param iSegReg The index of the segment register to use for
9455 * this access. The base and limits are checked.
9456 * @param GCPtrMem The address of the guest memory.
9457 */
9458IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9459{
9460 /* The lazy approach for now... */
9461 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9462 if (RT_UNLIKELY(GCPtrMem & 15))
9463 return iemRaiseGeneralProtectionFault0(pVCpu);
9464
9465 uint64_t const *pu64Src;
9466 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9467 if (rc == VINF_SUCCESS)
9468 {
9469 *pu64Dst = *pu64Src;
9470 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9471 }
9472 return rc;
9473}
9474
9475
9476#ifdef IEM_WITH_SETJMP
9477/**
9478 * Fetches a data qword, longjmp on error.
9479 *
9480 * @returns The qword.
9481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9482 * @param iSegReg The index of the segment register to use for
9483 * this access. The base and limits are checked.
9484 * @param GCPtrMem The address of the guest memory.
9485 */
9486DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9487{
9488 /* The lazy approach for now... */
9489 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9490 if (RT_LIKELY(!(GCPtrMem & 15)))
9491 {
9492 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9493 uint64_t const u64Ret = *pu64Src;
9494 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9495 return u64Ret;
9496 }
9497
9498 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9499 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9500}
9501#endif
9502
9503
9504/**
9505 * Fetches a data tword.
9506 *
9507 * @returns Strict VBox status code.
9508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9509 * @param pr80Dst Where to return the tword.
9510 * @param iSegReg The index of the segment register to use for
9511 * this access. The base and limits are checked.
9512 * @param GCPtrMem The address of the guest memory.
9513 */
9514IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9515{
9516 /* The lazy approach for now... */
9517 PCRTFLOAT80U pr80Src;
9518 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9519 if (rc == VINF_SUCCESS)
9520 {
9521 *pr80Dst = *pr80Src;
9522 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9523 }
9524 return rc;
9525}
9526
9527
9528#ifdef IEM_WITH_SETJMP
9529/**
9530 * Fetches a data tword, longjmp on error.
9531 *
9532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9533 * @param pr80Dst Where to return the tword.
9534 * @param iSegReg The index of the segment register to use for
9535 * this access. The base and limits are checked.
9536 * @param GCPtrMem The address of the guest memory.
9537 */
9538DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9539{
9540 /* The lazy approach for now... */
9541 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9542 *pr80Dst = *pr80Src;
9543 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9544}
9545#endif
9546
9547
9548/**
9549 * Fetches a data dqword (double qword), generally SSE related.
9550 *
9551 * @returns Strict VBox status code.
9552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9553 * @param pu128Dst Where to return the qword.
9554 * @param iSegReg The index of the segment register to use for
9555 * this access. The base and limits are checked.
9556 * @param GCPtrMem The address of the guest memory.
9557 */
9558IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9559{
9560 /* The lazy approach for now... */
9561 PCRTUINT128U pu128Src;
9562 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9563 if (rc == VINF_SUCCESS)
9564 {
9565 pu128Dst->au64[0] = pu128Src->au64[0];
9566 pu128Dst->au64[1] = pu128Src->au64[1];
9567 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9568 }
9569 return rc;
9570}
9571
9572
9573#ifdef IEM_WITH_SETJMP
9574/**
9575 * Fetches a data dqword (double qword), generally SSE related.
9576 *
9577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9578 * @param pu128Dst Where to return the qword.
9579 * @param iSegReg The index of the segment register to use for
9580 * this access. The base and limits are checked.
9581 * @param GCPtrMem The address of the guest memory.
9582 */
9583IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9584{
9585 /* The lazy approach for now... */
9586 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9587 pu128Dst->au64[0] = pu128Src->au64[0];
9588 pu128Dst->au64[1] = pu128Src->au64[1];
9589 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9590}
9591#endif
9592
9593
9594/**
9595 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9596 * related.
9597 *
9598 * Raises \#GP(0) if not aligned.
9599 *
9600 * @returns Strict VBox status code.
9601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9602 * @param pu128Dst Where to return the qword.
9603 * @param iSegReg The index of the segment register to use for
9604 * this access. The base and limits are checked.
9605 * @param GCPtrMem The address of the guest memory.
9606 */
9607IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9608{
9609 /* The lazy approach for now... */
9610 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9611 if ( (GCPtrMem & 15)
9612 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9613 return iemRaiseGeneralProtectionFault0(pVCpu);
9614
9615 PCRTUINT128U pu128Src;
9616 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9617 if (rc == VINF_SUCCESS)
9618 {
9619 pu128Dst->au64[0] = pu128Src->au64[0];
9620 pu128Dst->au64[1] = pu128Src->au64[1];
9621 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9622 }
9623 return rc;
9624}
9625
9626
9627#ifdef IEM_WITH_SETJMP
9628/**
9629 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9630 * related, longjmp on error.
9631 *
9632 * Raises \#GP(0) if not aligned.
9633 *
9634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9635 * @param pu128Dst Where to return the qword.
9636 * @param iSegReg The index of the segment register to use for
9637 * this access. The base and limits are checked.
9638 * @param GCPtrMem The address of the guest memory.
9639 */
9640DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9641{
9642 /* The lazy approach for now... */
9643 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9644 if ( (GCPtrMem & 15) == 0
9645 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9646 {
9647 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9648 pu128Dst->au64[0] = pu128Src->au64[0];
9649 pu128Dst->au64[1] = pu128Src->au64[1];
9650 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9651 return;
9652 }
9653
9654 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9655 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9656}
9657#endif
9658
9659
9660/**
9661 * Fetches a data oword (octo word), generally AVX related.
9662 *
9663 * @returns Strict VBox status code.
9664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9665 * @param pu256Dst Where to return the qword.
9666 * @param iSegReg The index of the segment register to use for
9667 * this access. The base and limits are checked.
9668 * @param GCPtrMem The address of the guest memory.
9669 */
9670IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9671{
9672 /* The lazy approach for now... */
9673 PCRTUINT256U pu256Src;
9674 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9675 if (rc == VINF_SUCCESS)
9676 {
9677 pu256Dst->au64[0] = pu256Src->au64[0];
9678 pu256Dst->au64[1] = pu256Src->au64[1];
9679 pu256Dst->au64[2] = pu256Src->au64[2];
9680 pu256Dst->au64[3] = pu256Src->au64[3];
9681 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9682 }
9683 return rc;
9684}
9685
9686
9687#ifdef IEM_WITH_SETJMP
9688/**
9689 * Fetches a data oword (octo word), generally AVX related.
9690 *
9691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9692 * @param pu256Dst Where to return the qword.
9693 * @param iSegReg The index of the segment register to use for
9694 * this access. The base and limits are checked.
9695 * @param GCPtrMem The address of the guest memory.
9696 */
9697IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9698{
9699 /* The lazy approach for now... */
9700 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9701 pu256Dst->au64[0] = pu256Src->au64[0];
9702 pu256Dst->au64[1] = pu256Src->au64[1];
9703 pu256Dst->au64[2] = pu256Src->au64[2];
9704 pu256Dst->au64[3] = pu256Src->au64[3];
9705 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9706}
9707#endif
9708
9709
9710/**
9711 * Fetches a data oword (octo word) at an aligned address, generally AVX
9712 * related.
9713 *
9714 * Raises \#GP(0) if not aligned.
9715 *
9716 * @returns Strict VBox status code.
9717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9718 * @param pu256Dst Where to return the qword.
9719 * @param iSegReg The index of the segment register to use for
9720 * this access. The base and limits are checked.
9721 * @param GCPtrMem The address of the guest memory.
9722 */
9723IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9724{
9725 /* The lazy approach for now... */
9726 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9727 if (GCPtrMem & 31)
9728 return iemRaiseGeneralProtectionFault0(pVCpu);
9729
9730 PCRTUINT256U pu256Src;
9731 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9732 if (rc == VINF_SUCCESS)
9733 {
9734 pu256Dst->au64[0] = pu256Src->au64[0];
9735 pu256Dst->au64[1] = pu256Src->au64[1];
9736 pu256Dst->au64[2] = pu256Src->au64[2];
9737 pu256Dst->au64[3] = pu256Src->au64[3];
9738 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9739 }
9740 return rc;
9741}
9742
9743
9744#ifdef IEM_WITH_SETJMP
9745/**
9746 * Fetches a data oword (octo word) at an aligned address, generally AVX
9747 * related, longjmp on error.
9748 *
9749 * Raises \#GP(0) if not aligned.
9750 *
9751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9752 * @param pu256Dst Where to return the qword.
9753 * @param iSegReg The index of the segment register to use for
9754 * this access. The base and limits are checked.
9755 * @param GCPtrMem The address of the guest memory.
9756 */
9757DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9758{
9759 /* The lazy approach for now... */
9760 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9761 if ((GCPtrMem & 31) == 0)
9762 {
9763 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9764 pu256Dst->au64[0] = pu256Src->au64[0];
9765 pu256Dst->au64[1] = pu256Src->au64[1];
9766 pu256Dst->au64[2] = pu256Src->au64[2];
9767 pu256Dst->au64[3] = pu256Src->au64[3];
9768 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9769 return;
9770 }
9771
9772 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9773 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9774}
9775#endif
9776
9777
9778
9779/**
9780 * Fetches a descriptor register (lgdt, lidt).
9781 *
9782 * @returns Strict VBox status code.
9783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9784 * @param pcbLimit Where to return the limit.
9785 * @param pGCPtrBase Where to return the base.
9786 * @param iSegReg The index of the segment register to use for
9787 * this access. The base and limits are checked.
9788 * @param GCPtrMem The address of the guest memory.
9789 * @param enmOpSize The effective operand size.
9790 */
9791IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9792 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9793{
9794 /*
9795 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9796 * little special:
9797 * - The two reads are done separately.
9798 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9799 * - We suspect the 386 to actually commit the limit before the base in
9800 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9801 * don't try emulate this eccentric behavior, because it's not well
9802 * enough understood and rather hard to trigger.
9803 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9804 */
9805 VBOXSTRICTRC rcStrict;
9806 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9807 {
9808 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9809 if (rcStrict == VINF_SUCCESS)
9810 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9811 }
9812 else
9813 {
9814 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9815 if (enmOpSize == IEMMODE_32BIT)
9816 {
9817 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9818 {
9819 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9820 if (rcStrict == VINF_SUCCESS)
9821 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9822 }
9823 else
9824 {
9825 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9826 if (rcStrict == VINF_SUCCESS)
9827 {
9828 *pcbLimit = (uint16_t)uTmp;
9829 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9830 }
9831 }
9832 if (rcStrict == VINF_SUCCESS)
9833 *pGCPtrBase = uTmp;
9834 }
9835 else
9836 {
9837 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9838 if (rcStrict == VINF_SUCCESS)
9839 {
9840 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9841 if (rcStrict == VINF_SUCCESS)
9842 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9843 }
9844 }
9845 }
9846 return rcStrict;
9847}
9848
9849
9850
9851/**
9852 * Stores a data byte.
9853 *
9854 * @returns Strict VBox status code.
9855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9856 * @param iSegReg The index of the segment register to use for
9857 * this access. The base and limits are checked.
9858 * @param GCPtrMem The address of the guest memory.
9859 * @param u8Value The value to store.
9860 */
9861IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9862{
9863 /* The lazy approach for now... */
9864 uint8_t *pu8Dst;
9865 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9866 if (rc == VINF_SUCCESS)
9867 {
9868 *pu8Dst = u8Value;
9869 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9870 }
9871 return rc;
9872}
9873
9874
9875#ifdef IEM_WITH_SETJMP
9876/**
9877 * Stores a data byte, longjmp on error.
9878 *
9879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9880 * @param iSegReg The index of the segment register to use for
9881 * this access. The base and limits are checked.
9882 * @param GCPtrMem The address of the guest memory.
9883 * @param u8Value The value to store.
9884 */
9885IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9886{
9887 /* The lazy approach for now... */
9888 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9889 *pu8Dst = u8Value;
9890 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9891}
9892#endif
9893
9894
9895/**
9896 * Stores a data word.
9897 *
9898 * @returns Strict VBox status code.
9899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9900 * @param iSegReg The index of the segment register to use for
9901 * this access. The base and limits are checked.
9902 * @param GCPtrMem The address of the guest memory.
9903 * @param u16Value The value to store.
9904 */
9905IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9906{
9907 /* The lazy approach for now... */
9908 uint16_t *pu16Dst;
9909 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9910 if (rc == VINF_SUCCESS)
9911 {
9912 *pu16Dst = u16Value;
9913 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9914 }
9915 return rc;
9916}
9917
9918
9919#ifdef IEM_WITH_SETJMP
9920/**
9921 * Stores a data word, longjmp on error.
9922 *
9923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9924 * @param iSegReg The index of the segment register to use for
9925 * this access. The base and limits are checked.
9926 * @param GCPtrMem The address of the guest memory.
9927 * @param u16Value The value to store.
9928 */
9929IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9930{
9931 /* The lazy approach for now... */
9932 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9933 *pu16Dst = u16Value;
9934 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9935}
9936#endif
9937
9938
9939/**
9940 * Stores a data dword.
9941 *
9942 * @returns Strict VBox status code.
9943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9944 * @param iSegReg The index of the segment register to use for
9945 * this access. The base and limits are checked.
9946 * @param GCPtrMem The address of the guest memory.
9947 * @param u32Value The value to store.
9948 */
9949IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9950{
9951 /* The lazy approach for now... */
9952 uint32_t *pu32Dst;
9953 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9954 if (rc == VINF_SUCCESS)
9955 {
9956 *pu32Dst = u32Value;
9957 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9958 }
9959 return rc;
9960}
9961
9962
9963#ifdef IEM_WITH_SETJMP
9964/**
9965 * Stores a data dword.
9966 *
9967 * @returns Strict VBox status code.
9968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9969 * @param iSegReg The index of the segment register to use for
9970 * this access. The base and limits are checked.
9971 * @param GCPtrMem The address of the guest memory.
9972 * @param u32Value The value to store.
9973 */
9974IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9975{
9976 /* The lazy approach for now... */
9977 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9978 *pu32Dst = u32Value;
9979 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9980}
9981#endif
9982
9983
9984/**
9985 * Stores a data qword.
9986 *
9987 * @returns Strict VBox status code.
9988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9989 * @param iSegReg The index of the segment register to use for
9990 * this access. The base and limits are checked.
9991 * @param GCPtrMem The address of the guest memory.
9992 * @param u64Value The value to store.
9993 */
9994IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9995{
9996 /* The lazy approach for now... */
9997 uint64_t *pu64Dst;
9998 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9999 if (rc == VINF_SUCCESS)
10000 {
10001 *pu64Dst = u64Value;
10002 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10003 }
10004 return rc;
10005}
10006
10007
10008#ifdef IEM_WITH_SETJMP
10009/**
10010 * Stores a data qword, longjmp on error.
10011 *
10012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10013 * @param iSegReg The index of the segment register to use for
10014 * this access. The base and limits are checked.
10015 * @param GCPtrMem The address of the guest memory.
10016 * @param u64Value The value to store.
10017 */
10018IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10019{
10020 /* The lazy approach for now... */
10021 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10022 *pu64Dst = u64Value;
10023 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10024}
10025#endif
10026
10027
10028/**
10029 * Stores a data dqword.
10030 *
10031 * @returns Strict VBox status code.
10032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10033 * @param iSegReg The index of the segment register to use for
10034 * this access. The base and limits are checked.
10035 * @param GCPtrMem The address of the guest memory.
10036 * @param u128Value The value to store.
10037 */
10038IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10039{
10040 /* The lazy approach for now... */
10041 PRTUINT128U pu128Dst;
10042 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10043 if (rc == VINF_SUCCESS)
10044 {
10045 pu128Dst->au64[0] = u128Value.au64[0];
10046 pu128Dst->au64[1] = u128Value.au64[1];
10047 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10048 }
10049 return rc;
10050}
10051
10052
10053#ifdef IEM_WITH_SETJMP
10054/**
10055 * Stores a data dqword, longjmp on error.
10056 *
10057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10058 * @param iSegReg The index of the segment register to use for
10059 * this access. The base and limits are checked.
10060 * @param GCPtrMem The address of the guest memory.
10061 * @param u128Value The value to store.
10062 */
10063IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10064{
10065 /* The lazy approach for now... */
10066 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10067 pu128Dst->au64[0] = u128Value.au64[0];
10068 pu128Dst->au64[1] = u128Value.au64[1];
10069 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10070}
10071#endif
10072
10073
10074/**
10075 * Stores a data dqword, SSE aligned.
10076 *
10077 * @returns Strict VBox status code.
10078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10079 * @param iSegReg The index of the segment register to use for
10080 * this access. The base and limits are checked.
10081 * @param GCPtrMem The address of the guest memory.
10082 * @param u128Value The value to store.
10083 */
10084IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10085{
10086 /* The lazy approach for now... */
10087 if ( (GCPtrMem & 15)
10088 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10089 return iemRaiseGeneralProtectionFault0(pVCpu);
10090
10091 PRTUINT128U pu128Dst;
10092 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10093 if (rc == VINF_SUCCESS)
10094 {
10095 pu128Dst->au64[0] = u128Value.au64[0];
10096 pu128Dst->au64[1] = u128Value.au64[1];
10097 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10098 }
10099 return rc;
10100}
10101
10102
10103#ifdef IEM_WITH_SETJMP
10104/**
10105 * Stores a data dqword, SSE aligned.
10106 *
10107 * @returns Strict VBox status code.
10108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10109 * @param iSegReg The index of the segment register to use for
10110 * this access. The base and limits are checked.
10111 * @param GCPtrMem The address of the guest memory.
10112 * @param u128Value The value to store.
10113 */
10114DECL_NO_INLINE(IEM_STATIC, void)
10115iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10116{
10117 /* The lazy approach for now... */
10118 if ( (GCPtrMem & 15) == 0
10119 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10120 {
10121 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10122 pu128Dst->au64[0] = u128Value.au64[0];
10123 pu128Dst->au64[1] = u128Value.au64[1];
10124 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10125 return;
10126 }
10127
10128 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10129 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10130}
10131#endif
10132
10133
10134/**
10135 * Stores a data dqword.
10136 *
10137 * @returns Strict VBox status code.
10138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10139 * @param iSegReg The index of the segment register to use for
10140 * this access. The base and limits are checked.
10141 * @param GCPtrMem The address of the guest memory.
10142 * @param pu256Value Pointer to the value to store.
10143 */
10144IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10145{
10146 /* The lazy approach for now... */
10147 PRTUINT256U pu256Dst;
10148 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10149 if (rc == VINF_SUCCESS)
10150 {
10151 pu256Dst->au64[0] = pu256Value->au64[0];
10152 pu256Dst->au64[1] = pu256Value->au64[1];
10153 pu256Dst->au64[2] = pu256Value->au64[2];
10154 pu256Dst->au64[3] = pu256Value->au64[3];
10155 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10156 }
10157 return rc;
10158}
10159
10160
10161#ifdef IEM_WITH_SETJMP
10162/**
10163 * Stores a data dqword, longjmp on error.
10164 *
10165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10166 * @param iSegReg The index of the segment register to use for
10167 * this access. The base and limits are checked.
10168 * @param GCPtrMem The address of the guest memory.
10169 * @param pu256Value Pointer to the value to store.
10170 */
10171IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10172{
10173 /* The lazy approach for now... */
10174 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10175 pu256Dst->au64[0] = pu256Value->au64[0];
10176 pu256Dst->au64[1] = pu256Value->au64[1];
10177 pu256Dst->au64[2] = pu256Value->au64[2];
10178 pu256Dst->au64[3] = pu256Value->au64[3];
10179 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10180}
10181#endif
10182
10183
10184/**
10185 * Stores a data dqword, AVX aligned.
10186 *
10187 * @returns Strict VBox status code.
10188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10189 * @param iSegReg The index of the segment register to use for
10190 * this access. The base and limits are checked.
10191 * @param GCPtrMem The address of the guest memory.
10192 * @param pu256Value Pointer to the value to store.
10193 */
10194IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10195{
10196 /* The lazy approach for now... */
10197 if (GCPtrMem & 31)
10198 return iemRaiseGeneralProtectionFault0(pVCpu);
10199
10200 PRTUINT256U pu256Dst;
10201 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10202 if (rc == VINF_SUCCESS)
10203 {
10204 pu256Dst->au64[0] = pu256Value->au64[0];
10205 pu256Dst->au64[1] = pu256Value->au64[1];
10206 pu256Dst->au64[2] = pu256Value->au64[2];
10207 pu256Dst->au64[3] = pu256Value->au64[3];
10208 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10209 }
10210 return rc;
10211}
10212
10213
10214#ifdef IEM_WITH_SETJMP
10215/**
10216 * Stores a data dqword, AVX aligned.
10217 *
10218 * @returns Strict VBox status code.
10219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10220 * @param iSegReg The index of the segment register to use for
10221 * this access. The base and limits are checked.
10222 * @param GCPtrMem The address of the guest memory.
10223 * @param pu256Value Pointer to the value to store.
10224 */
10225DECL_NO_INLINE(IEM_STATIC, void)
10226iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10227{
10228 /* The lazy approach for now... */
10229 if ((GCPtrMem & 31) == 0)
10230 {
10231 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10232 pu256Dst->au64[0] = pu256Value->au64[0];
10233 pu256Dst->au64[1] = pu256Value->au64[1];
10234 pu256Dst->au64[2] = pu256Value->au64[2];
10235 pu256Dst->au64[3] = pu256Value->au64[3];
10236 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10237 return;
10238 }
10239
10240 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10241 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10242}
10243#endif
10244
10245
10246/**
10247 * Stores a descriptor register (sgdt, sidt).
10248 *
10249 * @returns Strict VBox status code.
10250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10251 * @param cbLimit The limit.
10252 * @param GCPtrBase The base address.
10253 * @param iSegReg The index of the segment register to use for
10254 * this access. The base and limits are checked.
10255 * @param GCPtrMem The address of the guest memory.
10256 */
10257IEM_STATIC VBOXSTRICTRC
10258iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10259{
10260 VBOXSTRICTRC rcStrict;
10261 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
10262 {
10263 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
10264 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
10265 }
10266
10267 /*
10268 * The SIDT and SGDT instructions actually stores the data using two
10269 * independent writes. The instructions does not respond to opsize prefixes.
10270 */
10271 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10272 if (rcStrict == VINF_SUCCESS)
10273 {
10274 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10275 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10276 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10277 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10278 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10279 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10280 else
10281 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10282 }
10283 return rcStrict;
10284}
10285
10286
10287/**
10288 * Pushes a word onto the stack.
10289 *
10290 * @returns Strict VBox status code.
10291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10292 * @param u16Value The value to push.
10293 */
10294IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10295{
10296 /* Increment the stack pointer. */
10297 uint64_t uNewRsp;
10298 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10299 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10300
10301 /* Write the word the lazy way. */
10302 uint16_t *pu16Dst;
10303 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10304 if (rc == VINF_SUCCESS)
10305 {
10306 *pu16Dst = u16Value;
10307 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10308 }
10309
10310 /* Commit the new RSP value unless we an access handler made trouble. */
10311 if (rc == VINF_SUCCESS)
10312 pCtx->rsp = uNewRsp;
10313
10314 return rc;
10315}
10316
10317
10318/**
10319 * Pushes a dword onto the stack.
10320 *
10321 * @returns Strict VBox status code.
10322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10323 * @param u32Value The value to push.
10324 */
10325IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10326{
10327 /* Increment the stack pointer. */
10328 uint64_t uNewRsp;
10329 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10330 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10331
10332 /* Write the dword the lazy way. */
10333 uint32_t *pu32Dst;
10334 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10335 if (rc == VINF_SUCCESS)
10336 {
10337 *pu32Dst = u32Value;
10338 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10339 }
10340
10341 /* Commit the new RSP value unless we an access handler made trouble. */
10342 if (rc == VINF_SUCCESS)
10343 pCtx->rsp = uNewRsp;
10344
10345 return rc;
10346}
10347
10348
10349/**
10350 * Pushes a dword segment register value onto the stack.
10351 *
10352 * @returns Strict VBox status code.
10353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10354 * @param u32Value The value to push.
10355 */
10356IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10357{
10358 /* Increment the stack pointer. */
10359 uint64_t uNewRsp;
10360 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10361 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10362
10363 VBOXSTRICTRC rc;
10364 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10365 {
10366 /* The recompiler writes a full dword. */
10367 uint32_t *pu32Dst;
10368 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10369 if (rc == VINF_SUCCESS)
10370 {
10371 *pu32Dst = u32Value;
10372 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10373 }
10374 }
10375 else
10376 {
10377 /* The intel docs talks about zero extending the selector register
10378 value. My actual intel CPU here might be zero extending the value
10379 but it still only writes the lower word... */
10380 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10381 * happens when crossing an electric page boundrary, is the high word checked
10382 * for write accessibility or not? Probably it is. What about segment limits?
10383 * It appears this behavior is also shared with trap error codes.
10384 *
10385 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10386 * ancient hardware when it actually did change. */
10387 uint16_t *pu16Dst;
10388 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10389 if (rc == VINF_SUCCESS)
10390 {
10391 *pu16Dst = (uint16_t)u32Value;
10392 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10393 }
10394 }
10395
10396 /* Commit the new RSP value unless we an access handler made trouble. */
10397 if (rc == VINF_SUCCESS)
10398 pCtx->rsp = uNewRsp;
10399
10400 return rc;
10401}
10402
10403
10404/**
10405 * Pushes a qword onto the stack.
10406 *
10407 * @returns Strict VBox status code.
10408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10409 * @param u64Value The value to push.
10410 */
10411IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10412{
10413 /* Increment the stack pointer. */
10414 uint64_t uNewRsp;
10415 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10416 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10417
10418 /* Write the word the lazy way. */
10419 uint64_t *pu64Dst;
10420 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10421 if (rc == VINF_SUCCESS)
10422 {
10423 *pu64Dst = u64Value;
10424 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10425 }
10426
10427 /* Commit the new RSP value unless we an access handler made trouble. */
10428 if (rc == VINF_SUCCESS)
10429 pCtx->rsp = uNewRsp;
10430
10431 return rc;
10432}
10433
10434
10435/**
10436 * Pops a word from the stack.
10437 *
10438 * @returns Strict VBox status code.
10439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10440 * @param pu16Value Where to store the popped value.
10441 */
10442IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10443{
10444 /* Increment the stack pointer. */
10445 uint64_t uNewRsp;
10446 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10447 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10448
10449 /* Write the word the lazy way. */
10450 uint16_t const *pu16Src;
10451 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10452 if (rc == VINF_SUCCESS)
10453 {
10454 *pu16Value = *pu16Src;
10455 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10456
10457 /* Commit the new RSP value. */
10458 if (rc == VINF_SUCCESS)
10459 pCtx->rsp = uNewRsp;
10460 }
10461
10462 return rc;
10463}
10464
10465
10466/**
10467 * Pops a dword from the stack.
10468 *
10469 * @returns Strict VBox status code.
10470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10471 * @param pu32Value Where to store the popped value.
10472 */
10473IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10474{
10475 /* Increment the stack pointer. */
10476 uint64_t uNewRsp;
10477 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10478 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10479
10480 /* Write the word the lazy way. */
10481 uint32_t const *pu32Src;
10482 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10483 if (rc == VINF_SUCCESS)
10484 {
10485 *pu32Value = *pu32Src;
10486 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10487
10488 /* Commit the new RSP value. */
10489 if (rc == VINF_SUCCESS)
10490 pCtx->rsp = uNewRsp;
10491 }
10492
10493 return rc;
10494}
10495
10496
10497/**
10498 * Pops a qword from the stack.
10499 *
10500 * @returns Strict VBox status code.
10501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10502 * @param pu64Value Where to store the popped value.
10503 */
10504IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10505{
10506 /* Increment the stack pointer. */
10507 uint64_t uNewRsp;
10508 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10509 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10510
10511 /* Write the word the lazy way. */
10512 uint64_t const *pu64Src;
10513 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10514 if (rc == VINF_SUCCESS)
10515 {
10516 *pu64Value = *pu64Src;
10517 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10518
10519 /* Commit the new RSP value. */
10520 if (rc == VINF_SUCCESS)
10521 pCtx->rsp = uNewRsp;
10522 }
10523
10524 return rc;
10525}
10526
10527
10528/**
10529 * Pushes a word onto the stack, using a temporary stack pointer.
10530 *
10531 * @returns Strict VBox status code.
10532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10533 * @param u16Value The value to push.
10534 * @param pTmpRsp Pointer to the temporary stack pointer.
10535 */
10536IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10537{
10538 /* Increment the stack pointer. */
10539 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10540 RTUINT64U NewRsp = *pTmpRsp;
10541 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10542
10543 /* Write the word the lazy way. */
10544 uint16_t *pu16Dst;
10545 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10546 if (rc == VINF_SUCCESS)
10547 {
10548 *pu16Dst = u16Value;
10549 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10550 }
10551
10552 /* Commit the new RSP value unless we an access handler made trouble. */
10553 if (rc == VINF_SUCCESS)
10554 *pTmpRsp = NewRsp;
10555
10556 return rc;
10557}
10558
10559
10560/**
10561 * Pushes a dword onto the stack, using a temporary stack pointer.
10562 *
10563 * @returns Strict VBox status code.
10564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10565 * @param u32Value The value to push.
10566 * @param pTmpRsp Pointer to the temporary stack pointer.
10567 */
10568IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10569{
10570 /* Increment the stack pointer. */
10571 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10572 RTUINT64U NewRsp = *pTmpRsp;
10573 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10574
10575 /* Write the word the lazy way. */
10576 uint32_t *pu32Dst;
10577 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10578 if (rc == VINF_SUCCESS)
10579 {
10580 *pu32Dst = u32Value;
10581 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10582 }
10583
10584 /* Commit the new RSP value unless we an access handler made trouble. */
10585 if (rc == VINF_SUCCESS)
10586 *pTmpRsp = NewRsp;
10587
10588 return rc;
10589}
10590
10591
10592/**
10593 * Pushes a dword onto the stack, using a temporary stack pointer.
10594 *
10595 * @returns Strict VBox status code.
10596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10597 * @param u64Value The value to push.
10598 * @param pTmpRsp Pointer to the temporary stack pointer.
10599 */
10600IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10601{
10602 /* Increment the stack pointer. */
10603 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10604 RTUINT64U NewRsp = *pTmpRsp;
10605 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10606
10607 /* Write the word the lazy way. */
10608 uint64_t *pu64Dst;
10609 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10610 if (rc == VINF_SUCCESS)
10611 {
10612 *pu64Dst = u64Value;
10613 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10614 }
10615
10616 /* Commit the new RSP value unless we an access handler made trouble. */
10617 if (rc == VINF_SUCCESS)
10618 *pTmpRsp = NewRsp;
10619
10620 return rc;
10621}
10622
10623
10624/**
10625 * Pops a word from the stack, using a temporary stack pointer.
10626 *
10627 * @returns Strict VBox status code.
10628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10629 * @param pu16Value Where to store the popped value.
10630 * @param pTmpRsp Pointer to the temporary stack pointer.
10631 */
10632IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10633{
10634 /* Increment the stack pointer. */
10635 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10636 RTUINT64U NewRsp = *pTmpRsp;
10637 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10638
10639 /* Write the word the lazy way. */
10640 uint16_t const *pu16Src;
10641 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10642 if (rc == VINF_SUCCESS)
10643 {
10644 *pu16Value = *pu16Src;
10645 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10646
10647 /* Commit the new RSP value. */
10648 if (rc == VINF_SUCCESS)
10649 *pTmpRsp = NewRsp;
10650 }
10651
10652 return rc;
10653}
10654
10655
10656/**
10657 * Pops a dword from the stack, using a temporary stack pointer.
10658 *
10659 * @returns Strict VBox status code.
10660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10661 * @param pu32Value Where to store the popped value.
10662 * @param pTmpRsp Pointer to the temporary stack pointer.
10663 */
10664IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10665{
10666 /* Increment the stack pointer. */
10667 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10668 RTUINT64U NewRsp = *pTmpRsp;
10669 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10670
10671 /* Write the word the lazy way. */
10672 uint32_t const *pu32Src;
10673 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10674 if (rc == VINF_SUCCESS)
10675 {
10676 *pu32Value = *pu32Src;
10677 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10678
10679 /* Commit the new RSP value. */
10680 if (rc == VINF_SUCCESS)
10681 *pTmpRsp = NewRsp;
10682 }
10683
10684 return rc;
10685}
10686
10687
10688/**
10689 * Pops a qword from the stack, using a temporary stack pointer.
10690 *
10691 * @returns Strict VBox status code.
10692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10693 * @param pu64Value Where to store the popped value.
10694 * @param pTmpRsp Pointer to the temporary stack pointer.
10695 */
10696IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10697{
10698 /* Increment the stack pointer. */
10699 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10700 RTUINT64U NewRsp = *pTmpRsp;
10701 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10702
10703 /* Write the word the lazy way. */
10704 uint64_t const *pu64Src;
10705 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10706 if (rcStrict == VINF_SUCCESS)
10707 {
10708 *pu64Value = *pu64Src;
10709 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10710
10711 /* Commit the new RSP value. */
10712 if (rcStrict == VINF_SUCCESS)
10713 *pTmpRsp = NewRsp;
10714 }
10715
10716 return rcStrict;
10717}
10718
10719
10720/**
10721 * Begin a special stack push (used by interrupt, exceptions and such).
10722 *
10723 * This will raise \#SS or \#PF if appropriate.
10724 *
10725 * @returns Strict VBox status code.
10726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10727 * @param cbMem The number of bytes to push onto the stack.
10728 * @param ppvMem Where to return the pointer to the stack memory.
10729 * As with the other memory functions this could be
10730 * direct access or bounce buffered access, so
10731 * don't commit register until the commit call
10732 * succeeds.
10733 * @param puNewRsp Where to return the new RSP value. This must be
10734 * passed unchanged to
10735 * iemMemStackPushCommitSpecial().
10736 */
10737IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10738{
10739 Assert(cbMem < UINT8_MAX);
10740 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10741 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10742 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10743}
10744
10745
10746/**
10747 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10748 *
10749 * This will update the rSP.
10750 *
10751 * @returns Strict VBox status code.
10752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10753 * @param pvMem The pointer returned by
10754 * iemMemStackPushBeginSpecial().
10755 * @param uNewRsp The new RSP value returned by
10756 * iemMemStackPushBeginSpecial().
10757 */
10758IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10759{
10760 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10761 if (rcStrict == VINF_SUCCESS)
10762 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10763 return rcStrict;
10764}
10765
10766
10767/**
10768 * Begin a special stack pop (used by iret, retf and such).
10769 *
10770 * This will raise \#SS or \#PF if appropriate.
10771 *
10772 * @returns Strict VBox status code.
10773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10774 * @param cbMem The number of bytes to pop from the stack.
10775 * @param ppvMem Where to return the pointer to the stack memory.
10776 * @param puNewRsp Where to return the new RSP value. This must be
10777 * assigned to CPUMCTX::rsp manually some time
10778 * after iemMemStackPopDoneSpecial() has been
10779 * called.
10780 */
10781IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10782{
10783 Assert(cbMem < UINT8_MAX);
10784 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10785 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10786 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10787}
10788
10789
10790/**
10791 * Continue a special stack pop (used by iret and retf).
10792 *
10793 * This will raise \#SS or \#PF if appropriate.
10794 *
10795 * @returns Strict VBox status code.
10796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10797 * @param cbMem The number of bytes to pop from the stack.
10798 * @param ppvMem Where to return the pointer to the stack memory.
10799 * @param puNewRsp Where to return the new RSP value. This must be
10800 * assigned to CPUMCTX::rsp manually some time
10801 * after iemMemStackPopDoneSpecial() has been
10802 * called.
10803 */
10804IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10805{
10806 Assert(cbMem < UINT8_MAX);
10807 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10808 RTUINT64U NewRsp;
10809 NewRsp.u = *puNewRsp;
10810 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10811 *puNewRsp = NewRsp.u;
10812 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10813}
10814
10815
10816/**
10817 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10818 * iemMemStackPopContinueSpecial).
10819 *
10820 * The caller will manually commit the rSP.
10821 *
10822 * @returns Strict VBox status code.
10823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10824 * @param pvMem The pointer returned by
10825 * iemMemStackPopBeginSpecial() or
10826 * iemMemStackPopContinueSpecial().
10827 */
10828IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10829{
10830 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10831}
10832
10833
10834/**
10835 * Fetches a system table byte.
10836 *
10837 * @returns Strict VBox status code.
10838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10839 * @param pbDst Where to return the byte.
10840 * @param iSegReg The index of the segment register to use for
10841 * this access. The base and limits are checked.
10842 * @param GCPtrMem The address of the guest memory.
10843 */
10844IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10845{
10846 /* The lazy approach for now... */
10847 uint8_t const *pbSrc;
10848 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10849 if (rc == VINF_SUCCESS)
10850 {
10851 *pbDst = *pbSrc;
10852 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10853 }
10854 return rc;
10855}
10856
10857
10858/**
10859 * Fetches a system table word.
10860 *
10861 * @returns Strict VBox status code.
10862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10863 * @param pu16Dst Where to return the word.
10864 * @param iSegReg The index of the segment register to use for
10865 * this access. The base and limits are checked.
10866 * @param GCPtrMem The address of the guest memory.
10867 */
10868IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10869{
10870 /* The lazy approach for now... */
10871 uint16_t const *pu16Src;
10872 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10873 if (rc == VINF_SUCCESS)
10874 {
10875 *pu16Dst = *pu16Src;
10876 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10877 }
10878 return rc;
10879}
10880
10881
10882/**
10883 * Fetches a system table dword.
10884 *
10885 * @returns Strict VBox status code.
10886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10887 * @param pu32Dst Where to return the dword.
10888 * @param iSegReg The index of the segment register to use for
10889 * this access. The base and limits are checked.
10890 * @param GCPtrMem The address of the guest memory.
10891 */
10892IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10893{
10894 /* The lazy approach for now... */
10895 uint32_t const *pu32Src;
10896 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10897 if (rc == VINF_SUCCESS)
10898 {
10899 *pu32Dst = *pu32Src;
10900 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10901 }
10902 return rc;
10903}
10904
10905
10906/**
10907 * Fetches a system table qword.
10908 *
10909 * @returns Strict VBox status code.
10910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10911 * @param pu64Dst Where to return the qword.
10912 * @param iSegReg The index of the segment register to use for
10913 * this access. The base and limits are checked.
10914 * @param GCPtrMem The address of the guest memory.
10915 */
10916IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10917{
10918 /* The lazy approach for now... */
10919 uint64_t const *pu64Src;
10920 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10921 if (rc == VINF_SUCCESS)
10922 {
10923 *pu64Dst = *pu64Src;
10924 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10925 }
10926 return rc;
10927}
10928
10929
10930/**
10931 * Fetches a descriptor table entry with caller specified error code.
10932 *
10933 * @returns Strict VBox status code.
10934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10935 * @param pDesc Where to return the descriptor table entry.
10936 * @param uSel The selector which table entry to fetch.
10937 * @param uXcpt The exception to raise on table lookup error.
10938 * @param uErrorCode The error code associated with the exception.
10939 */
10940IEM_STATIC VBOXSTRICTRC
10941iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10942{
10943 AssertPtr(pDesc);
10944 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10945
10946 /** @todo did the 286 require all 8 bytes to be accessible? */
10947 /*
10948 * Get the selector table base and check bounds.
10949 */
10950 RTGCPTR GCPtrBase;
10951 if (uSel & X86_SEL_LDT)
10952 {
10953 if ( !pCtx->ldtr.Attr.n.u1Present
10954 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10955 {
10956 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10957 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10958 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10959 uErrorCode, 0);
10960 }
10961
10962 Assert(pCtx->ldtr.Attr.n.u1Present);
10963 GCPtrBase = pCtx->ldtr.u64Base;
10964 }
10965 else
10966 {
10967 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10968 {
10969 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10970 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10971 uErrorCode, 0);
10972 }
10973 GCPtrBase = pCtx->gdtr.pGdt;
10974 }
10975
10976 /*
10977 * Read the legacy descriptor and maybe the long mode extensions if
10978 * required.
10979 */
10980 VBOXSTRICTRC rcStrict;
10981 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10982 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10983 else
10984 {
10985 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10986 if (rcStrict == VINF_SUCCESS)
10987 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10988 if (rcStrict == VINF_SUCCESS)
10989 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10990 if (rcStrict == VINF_SUCCESS)
10991 pDesc->Legacy.au16[3] = 0;
10992 else
10993 return rcStrict;
10994 }
10995
10996 if (rcStrict == VINF_SUCCESS)
10997 {
10998 if ( !IEM_IS_LONG_MODE(pVCpu)
10999 || pDesc->Legacy.Gen.u1DescType)
11000 pDesc->Long.au64[1] = 0;
11001 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
11002 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11003 else
11004 {
11005 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11006 /** @todo is this the right exception? */
11007 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11008 }
11009 }
11010 return rcStrict;
11011}
11012
11013
11014/**
11015 * Fetches a descriptor table entry.
11016 *
11017 * @returns Strict VBox status code.
11018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11019 * @param pDesc Where to return the descriptor table entry.
11020 * @param uSel The selector which table entry to fetch.
11021 * @param uXcpt The exception to raise on table lookup error.
11022 */
11023IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11024{
11025 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11026}
11027
11028
11029/**
11030 * Fakes a long mode stack selector for SS = 0.
11031 *
11032 * @param pDescSs Where to return the fake stack descriptor.
11033 * @param uDpl The DPL we want.
11034 */
11035IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11036{
11037 pDescSs->Long.au64[0] = 0;
11038 pDescSs->Long.au64[1] = 0;
11039 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11040 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11041 pDescSs->Long.Gen.u2Dpl = uDpl;
11042 pDescSs->Long.Gen.u1Present = 1;
11043 pDescSs->Long.Gen.u1Long = 1;
11044}
11045
11046
11047/**
11048 * Marks the selector descriptor as accessed (only non-system descriptors).
11049 *
11050 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11051 * will therefore skip the limit checks.
11052 *
11053 * @returns Strict VBox status code.
11054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11055 * @param uSel The selector.
11056 */
11057IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11058{
11059 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11060
11061 /*
11062 * Get the selector table base and calculate the entry address.
11063 */
11064 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11065 ? pCtx->ldtr.u64Base
11066 : pCtx->gdtr.pGdt;
11067 GCPtr += uSel & X86_SEL_MASK;
11068
11069 /*
11070 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11071 * ugly stuff to avoid this. This will make sure it's an atomic access
11072 * as well more or less remove any question about 8-bit or 32-bit accesss.
11073 */
11074 VBOXSTRICTRC rcStrict;
11075 uint32_t volatile *pu32;
11076 if ((GCPtr & 3) == 0)
11077 {
11078 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11079 GCPtr += 2 + 2;
11080 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11081 if (rcStrict != VINF_SUCCESS)
11082 return rcStrict;
11083 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11084 }
11085 else
11086 {
11087 /* The misaligned GDT/LDT case, map the whole thing. */
11088 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11089 if (rcStrict != VINF_SUCCESS)
11090 return rcStrict;
11091 switch ((uintptr_t)pu32 & 3)
11092 {
11093 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11094 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11095 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11096 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11097 }
11098 }
11099
11100 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11101}
11102
11103/** @} */
11104
11105
11106/*
11107 * Include the C/C++ implementation of instruction.
11108 */
11109#include "IEMAllCImpl.cpp.h"
11110
11111
11112
11113/** @name "Microcode" macros.
11114 *
11115 * The idea is that we should be able to use the same code to interpret
11116 * instructions as well as recompiler instructions. Thus this obfuscation.
11117 *
11118 * @{
11119 */
11120#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11121#define IEM_MC_END() }
11122#define IEM_MC_PAUSE() do {} while (0)
11123#define IEM_MC_CONTINUE() do {} while (0)
11124
11125/** Internal macro. */
11126#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11127 do \
11128 { \
11129 VBOXSTRICTRC rcStrict2 = a_Expr; \
11130 if (rcStrict2 != VINF_SUCCESS) \
11131 return rcStrict2; \
11132 } while (0)
11133
11134
11135#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11136#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11137#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11138#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11139#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11140#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11141#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11142#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11143#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11144 do { \
11145 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11146 return iemRaiseDeviceNotAvailable(pVCpu); \
11147 } while (0)
11148#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11149 do { \
11150 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11151 return iemRaiseDeviceNotAvailable(pVCpu); \
11152 } while (0)
11153#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11154 do { \
11155 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11156 return iemRaiseMathFault(pVCpu); \
11157 } while (0)
11158#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11159 do { \
11160 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11161 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11162 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11163 return iemRaiseUndefinedOpcode(pVCpu); \
11164 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11165 return iemRaiseDeviceNotAvailable(pVCpu); \
11166 } while (0)
11167#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11168 do { \
11169 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11170 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11171 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11172 return iemRaiseUndefinedOpcode(pVCpu); \
11173 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11174 return iemRaiseDeviceNotAvailable(pVCpu); \
11175 } while (0)
11176#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11177 do { \
11178 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11179 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11180 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11181 return iemRaiseUndefinedOpcode(pVCpu); \
11182 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11183 return iemRaiseDeviceNotAvailable(pVCpu); \
11184 } while (0)
11185#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11186 do { \
11187 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11188 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11189 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11190 return iemRaiseUndefinedOpcode(pVCpu); \
11191 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11192 return iemRaiseDeviceNotAvailable(pVCpu); \
11193 } while (0)
11194#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11195 do { \
11196 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11197 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11198 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11199 return iemRaiseUndefinedOpcode(pVCpu); \
11200 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11201 return iemRaiseDeviceNotAvailable(pVCpu); \
11202 } while (0)
11203#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11204 do { \
11205 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11206 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11207 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11208 return iemRaiseUndefinedOpcode(pVCpu); \
11209 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11210 return iemRaiseDeviceNotAvailable(pVCpu); \
11211 } while (0)
11212#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11213 do { \
11214 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11215 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11216 return iemRaiseUndefinedOpcode(pVCpu); \
11217 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11218 return iemRaiseDeviceNotAvailable(pVCpu); \
11219 } while (0)
11220#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11221 do { \
11222 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11223 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11224 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11225 return iemRaiseUndefinedOpcode(pVCpu); \
11226 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11227 return iemRaiseDeviceNotAvailable(pVCpu); \
11228 } while (0)
11229#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11230 do { \
11231 if (pVCpu->iem.s.uCpl != 0) \
11232 return iemRaiseGeneralProtectionFault0(pVCpu); \
11233 } while (0)
11234#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11235 do { \
11236 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11237 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11238 } while (0)
11239
11240
11241#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11242#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11243#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11244#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11245#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11246#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11247#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11248 uint32_t a_Name; \
11249 uint32_t *a_pName = &a_Name
11250#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11251 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11252
11253#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11254#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11255
11256#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11257#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11258#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11259#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11260#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11261#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11262#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11263#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11264#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11265#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11266#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11267#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11268#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11269#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11270#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11271#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11272#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11273#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11274#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11275#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11276#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11277#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11278#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11279#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11280#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11281#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11282#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11283#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11284#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11285/** @note Not for IOPL or IF testing or modification. */
11286#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11287#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11288#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11289#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11290
11291#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11292#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11293#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11294#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11295#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11296#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11297#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11298#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11299#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11300#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11301#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11302 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11303
11304#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11305#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11306/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11307 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11308#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11309#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11310/** @note Not for IOPL or IF testing or modification. */
11311#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11312
11313#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11314#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11315#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11316 do { \
11317 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11318 *pu32Reg += (a_u32Value); \
11319 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11320 } while (0)
11321#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11322
11323#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11324#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11325#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11326 do { \
11327 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11328 *pu32Reg -= (a_u32Value); \
11329 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11330 } while (0)
11331#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11332#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11333
11334#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11335#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11336#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11337#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11338#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11339#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11340#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11341
11342#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11343#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11344#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11345#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11346
11347#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11348#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11349#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11350
11351#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11352#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11353#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11354
11355#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11356#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11357#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11358
11359#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11360#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11361#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11362
11363#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11364
11365#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11366
11367#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11368#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11369#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11370 do { \
11371 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11372 *pu32Reg &= (a_u32Value); \
11373 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11374 } while (0)
11375#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11376
11377#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11378#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11379#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11380 do { \
11381 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11382 *pu32Reg |= (a_u32Value); \
11383 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11384 } while (0)
11385#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11386
11387
11388/** @note Not for IOPL or IF modification. */
11389#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11390/** @note Not for IOPL or IF modification. */
11391#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11392/** @note Not for IOPL or IF modification. */
11393#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11394
11395#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11396
11397/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11398#define IEM_MC_FPU_TO_MMX_MODE() do { \
11399 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11400 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11401 } while (0)
11402
11403/** Switches the FPU state from MMX mode (FTW=0xffff). */
11404#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11405 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \
11406 } while (0)
11407
11408#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11409 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11410#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11411 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11412#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11413 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11414 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11415 } while (0)
11416#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11417 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11418 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11419 } while (0)
11420#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11421 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11422#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11423 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11424#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11425 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11426
11427#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11428 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11429 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11430 } while (0)
11431#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11432 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11433#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11434 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11435#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11436 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11437#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11438 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11439 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11440 } while (0)
11441#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11442 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11443#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11444 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11445 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11446 } while (0)
11447#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11448 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11449#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11450 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11451 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11452 } while (0)
11453#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11454 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11455#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11456 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11457#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11458 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11459#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11460 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11461#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11462 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11463 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11464 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11465 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11466 } while (0)
11467
11468#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11469 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11470 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11471 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11472 } while (0)
11473#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11474 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11475 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11476 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11477 } while (0)
11478#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11479 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11480 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11481 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11482 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11483 } while (0)
11484#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11485 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11486 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11487 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11488 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11489 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11490 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11491 } while (0)
11492
11493#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11494#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11495 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11496 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11497 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11498 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11499 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11500 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11501 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11502 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11503 } while (0)
11504#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11505 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11506 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11507 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11508 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11509 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11510 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11511 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11512 } while (0)
11513#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11514 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11515 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11516 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11517 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11518 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11519 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11520 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11521 } while (0)
11522#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11523 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11524 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11525 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11526 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11527 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11528 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11529 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11530 } while (0)
11531
11532#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11533 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11534#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11535 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11536#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11537 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11538#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11539 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11540 uintptr_t const iYRegTmp = (a_iYReg); \
11541 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11542 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11543 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11544 } while (0)
11545
11546#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11547 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11548 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11549 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11550 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11551 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11552 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11553 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11554 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11555 } while (0)
11556#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11557 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11558 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11559 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11560 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11561 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11562 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11563 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11564 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11565 } while (0)
11566#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11567 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11568 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11569 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11570 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11571 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11572 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11573 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11574 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11575 } while (0)
11576
11577#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11578 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11579 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11580 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11581 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11582 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11583 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11584 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11585 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11586 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11587 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11588 } while (0)
11589#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11590 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11591 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11592 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11593 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11594 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11595 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11596 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11597 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11598 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11599 } while (0)
11600#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11601 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11602 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11603 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11604 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11605 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11606 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11607 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11608 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11609 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11610 } while (0)
11611#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11612 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11613 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11614 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11615 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11616 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11617 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11618 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11619 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11620 } while (0)
11621
11622#ifndef IEM_WITH_SETJMP
11623# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11624 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11625# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11626 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11627# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11628 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11629#else
11630# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11631 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11632# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11633 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11634# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11635 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11636#endif
11637
11638#ifndef IEM_WITH_SETJMP
11639# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11640 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11641# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11642 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11643# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11644 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11645#else
11646# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11647 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11648# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11649 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11650# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11651 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11652#endif
11653
11654#ifndef IEM_WITH_SETJMP
11655# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11656 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11657# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11658 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11659# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11660 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11661#else
11662# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11663 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11664# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11665 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11666# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11667 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11668#endif
11669
11670#ifdef SOME_UNUSED_FUNCTION
11671# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11672 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11673#endif
11674
11675#ifndef IEM_WITH_SETJMP
11676# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11678# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11680# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11682# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11684#else
11685# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11686 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11687# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11688 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11689# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11690 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11691# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11692 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11693#endif
11694
11695#ifndef IEM_WITH_SETJMP
11696# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11698# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11700# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11701 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11702#else
11703# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11704 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11705# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11706 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11707# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11708 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11709#endif
11710
11711#ifndef IEM_WITH_SETJMP
11712# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11714# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11715 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11716#else
11717# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11718 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11719# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11720 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11721#endif
11722
11723#ifndef IEM_WITH_SETJMP
11724# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11725 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11726# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11728#else
11729# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11730 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11731# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11732 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11733#endif
11734
11735
11736
11737#ifndef IEM_WITH_SETJMP
11738# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11739 do { \
11740 uint8_t u8Tmp; \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11742 (a_u16Dst) = u8Tmp; \
11743 } while (0)
11744# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11745 do { \
11746 uint8_t u8Tmp; \
11747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11748 (a_u32Dst) = u8Tmp; \
11749 } while (0)
11750# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11751 do { \
11752 uint8_t u8Tmp; \
11753 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11754 (a_u64Dst) = u8Tmp; \
11755 } while (0)
11756# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11757 do { \
11758 uint16_t u16Tmp; \
11759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11760 (a_u32Dst) = u16Tmp; \
11761 } while (0)
11762# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11763 do { \
11764 uint16_t u16Tmp; \
11765 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11766 (a_u64Dst) = u16Tmp; \
11767 } while (0)
11768# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11769 do { \
11770 uint32_t u32Tmp; \
11771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11772 (a_u64Dst) = u32Tmp; \
11773 } while (0)
11774#else /* IEM_WITH_SETJMP */
11775# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11776 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11777# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11778 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11779# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11780 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11781# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11782 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11783# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11784 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11785# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11786 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11787#endif /* IEM_WITH_SETJMP */
11788
11789#ifndef IEM_WITH_SETJMP
11790# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11791 do { \
11792 uint8_t u8Tmp; \
11793 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11794 (a_u16Dst) = (int8_t)u8Tmp; \
11795 } while (0)
11796# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11797 do { \
11798 uint8_t u8Tmp; \
11799 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11800 (a_u32Dst) = (int8_t)u8Tmp; \
11801 } while (0)
11802# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11803 do { \
11804 uint8_t u8Tmp; \
11805 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11806 (a_u64Dst) = (int8_t)u8Tmp; \
11807 } while (0)
11808# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11809 do { \
11810 uint16_t u16Tmp; \
11811 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11812 (a_u32Dst) = (int16_t)u16Tmp; \
11813 } while (0)
11814# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11815 do { \
11816 uint16_t u16Tmp; \
11817 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11818 (a_u64Dst) = (int16_t)u16Tmp; \
11819 } while (0)
11820# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11821 do { \
11822 uint32_t u32Tmp; \
11823 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11824 (a_u64Dst) = (int32_t)u32Tmp; \
11825 } while (0)
11826#else /* IEM_WITH_SETJMP */
11827# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11828 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11829# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11830 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11831# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11832 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11833# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11834 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11835# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11836 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11837# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11838 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11839#endif /* IEM_WITH_SETJMP */
11840
11841#ifndef IEM_WITH_SETJMP
11842# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11843 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11844# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11845 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11846# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11847 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11848# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11849 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11850#else
11851# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11852 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11853# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11854 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11855# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11856 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11857# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11858 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11859#endif
11860
11861#ifndef IEM_WITH_SETJMP
11862# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11863 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11864# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11865 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11866# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11867 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11868# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11869 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11870#else
11871# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11872 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11873# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11874 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11875# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11876 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11877# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11878 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11879#endif
11880
11881#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11882#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11883#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11884#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11885#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11886#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11887#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11888 do { \
11889 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11890 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11891 } while (0)
11892
11893#ifndef IEM_WITH_SETJMP
11894# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11895 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11896# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11897 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11898#else
11899# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11900 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11901# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11902 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11903#endif
11904
11905#ifndef IEM_WITH_SETJMP
11906# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11907 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11908# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11909 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11910#else
11911# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11912 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11913# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11914 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11915#endif
11916
11917
11918#define IEM_MC_PUSH_U16(a_u16Value) \
11919 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11920#define IEM_MC_PUSH_U32(a_u32Value) \
11921 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11922#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11923 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11924#define IEM_MC_PUSH_U64(a_u64Value) \
11925 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11926
11927#define IEM_MC_POP_U16(a_pu16Value) \
11928 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11929#define IEM_MC_POP_U32(a_pu32Value) \
11930 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11931#define IEM_MC_POP_U64(a_pu64Value) \
11932 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11933
11934/** Maps guest memory for direct or bounce buffered access.
11935 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11936 * @remarks May return.
11937 */
11938#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11939 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11940
11941/** Maps guest memory for direct or bounce buffered access.
11942 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11943 * @remarks May return.
11944 */
11945#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11946 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11947
11948/** Commits the memory and unmaps the guest memory.
11949 * @remarks May return.
11950 */
11951#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11952 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11953
11954/** Commits the memory and unmaps the guest memory unless the FPU status word
11955 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11956 * that would cause FLD not to store.
11957 *
11958 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11959 * store, while \#P will not.
11960 *
11961 * @remarks May in theory return - for now.
11962 */
11963#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11964 do { \
11965 if ( !(a_u16FSW & X86_FSW_ES) \
11966 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11967 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11968 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11969 } while (0)
11970
11971/** Calculate efficient address from R/M. */
11972#ifndef IEM_WITH_SETJMP
11973# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11974 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11975#else
11976# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11977 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11978#endif
11979
11980#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11981#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11982#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11983#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11984#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11985#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11986#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11987
11988/**
11989 * Defers the rest of the instruction emulation to a C implementation routine
11990 * and returns, only taking the standard parameters.
11991 *
11992 * @param a_pfnCImpl The pointer to the C routine.
11993 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11994 */
11995#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11996
11997/**
11998 * Defers the rest of instruction emulation to a C implementation routine and
11999 * returns, taking one argument in addition to the standard ones.
12000 *
12001 * @param a_pfnCImpl The pointer to the C routine.
12002 * @param a0 The argument.
12003 */
12004#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12005
12006/**
12007 * Defers the rest of the instruction emulation to a C implementation routine
12008 * and returns, taking two arguments in addition to the standard ones.
12009 *
12010 * @param a_pfnCImpl The pointer to the C routine.
12011 * @param a0 The first extra argument.
12012 * @param a1 The second extra argument.
12013 */
12014#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12015
12016/**
12017 * Defers the rest of the instruction emulation to a C implementation routine
12018 * and returns, taking three arguments in addition to the standard ones.
12019 *
12020 * @param a_pfnCImpl The pointer to the C routine.
12021 * @param a0 The first extra argument.
12022 * @param a1 The second extra argument.
12023 * @param a2 The third extra argument.
12024 */
12025#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12026
12027/**
12028 * Defers the rest of the instruction emulation to a C implementation routine
12029 * and returns, taking four arguments in addition to the standard ones.
12030 *
12031 * @param a_pfnCImpl The pointer to the C routine.
12032 * @param a0 The first extra argument.
12033 * @param a1 The second extra argument.
12034 * @param a2 The third extra argument.
12035 * @param a3 The fourth extra argument.
12036 */
12037#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12038
12039/**
12040 * Defers the rest of the instruction emulation to a C implementation routine
12041 * and returns, taking two arguments in addition to the standard ones.
12042 *
12043 * @param a_pfnCImpl The pointer to the C routine.
12044 * @param a0 The first extra argument.
12045 * @param a1 The second extra argument.
12046 * @param a2 The third extra argument.
12047 * @param a3 The fourth extra argument.
12048 * @param a4 The fifth extra argument.
12049 */
12050#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12051
12052/**
12053 * Defers the entire instruction emulation to a C implementation routine and
12054 * returns, only taking the standard parameters.
12055 *
12056 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12057 *
12058 * @param a_pfnCImpl The pointer to the C routine.
12059 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12060 */
12061#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12062
12063/**
12064 * Defers the entire instruction emulation to a C implementation routine and
12065 * returns, taking one argument in addition to the standard ones.
12066 *
12067 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12068 *
12069 * @param a_pfnCImpl The pointer to the C routine.
12070 * @param a0 The argument.
12071 */
12072#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12073
12074/**
12075 * Defers the entire instruction emulation to a C implementation routine and
12076 * returns, taking two arguments in addition to the standard ones.
12077 *
12078 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12079 *
12080 * @param a_pfnCImpl The pointer to the C routine.
12081 * @param a0 The first extra argument.
12082 * @param a1 The second extra argument.
12083 */
12084#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12085
12086/**
12087 * Defers the entire instruction emulation to a C implementation routine and
12088 * returns, taking three arguments in addition to the standard ones.
12089 *
12090 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12091 *
12092 * @param a_pfnCImpl The pointer to the C routine.
12093 * @param a0 The first extra argument.
12094 * @param a1 The second extra argument.
12095 * @param a2 The third extra argument.
12096 */
12097#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12098
12099/**
12100 * Calls a FPU assembly implementation taking one visible argument.
12101 *
12102 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12103 * @param a0 The first extra argument.
12104 */
12105#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12106 do { \
12107 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12108 } while (0)
12109
12110/**
12111 * Calls a FPU assembly implementation taking two visible arguments.
12112 *
12113 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12114 * @param a0 The first extra argument.
12115 * @param a1 The second extra argument.
12116 */
12117#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12118 do { \
12119 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12120 } while (0)
12121
12122/**
12123 * Calls a FPU assembly implementation taking three visible arguments.
12124 *
12125 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12126 * @param a0 The first extra argument.
12127 * @param a1 The second extra argument.
12128 * @param a2 The third extra argument.
12129 */
12130#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12131 do { \
12132 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12133 } while (0)
12134
12135#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12136 do { \
12137 (a_FpuData).FSW = (a_FSW); \
12138 (a_FpuData).r80Result = *(a_pr80Value); \
12139 } while (0)
12140
12141/** Pushes FPU result onto the stack. */
12142#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12143 iemFpuPushResult(pVCpu, &a_FpuData)
12144/** Pushes FPU result onto the stack and sets the FPUDP. */
12145#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12146 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12147
12148/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12149#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12150 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12151
12152/** Stores FPU result in a stack register. */
12153#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12154 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12155/** Stores FPU result in a stack register and pops the stack. */
12156#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12157 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12158/** Stores FPU result in a stack register and sets the FPUDP. */
12159#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12160 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12161/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12162 * stack. */
12163#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12164 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12165
12166/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12167#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12168 iemFpuUpdateOpcodeAndIp(pVCpu)
12169/** Free a stack register (for FFREE and FFREEP). */
12170#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12171 iemFpuStackFree(pVCpu, a_iStReg)
12172/** Increment the FPU stack pointer. */
12173#define IEM_MC_FPU_STACK_INC_TOP() \
12174 iemFpuStackIncTop(pVCpu)
12175/** Decrement the FPU stack pointer. */
12176#define IEM_MC_FPU_STACK_DEC_TOP() \
12177 iemFpuStackDecTop(pVCpu)
12178
12179/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12180#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12181 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12182/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12183#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12184 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12185/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12186#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12187 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12188/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12189#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12190 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12191/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12192 * stack. */
12193#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12194 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12195/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12196#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12197 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12198
12199/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12200#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12201 iemFpuStackUnderflow(pVCpu, a_iStDst)
12202/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12203 * stack. */
12204#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12205 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12206/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12207 * FPUDS. */
12208#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12209 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12210/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12211 * FPUDS. Pops stack. */
12212#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12213 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12214/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12215 * stack twice. */
12216#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12217 iemFpuStackUnderflowThenPopPop(pVCpu)
12218/** Raises a FPU stack underflow exception for an instruction pushing a result
12219 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12220#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12221 iemFpuStackPushUnderflow(pVCpu)
12222/** Raises a FPU stack underflow exception for an instruction pushing a result
12223 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12224#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12225 iemFpuStackPushUnderflowTwo(pVCpu)
12226
12227/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12228 * FPUIP, FPUCS and FOP. */
12229#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12230 iemFpuStackPushOverflow(pVCpu)
12231/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12232 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12233#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12234 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12235/** Prepares for using the FPU state.
12236 * Ensures that we can use the host FPU in the current context (RC+R0.
12237 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12238#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12239/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12240#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12241/** Actualizes the guest FPU state so it can be accessed and modified. */
12242#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12243
12244/** Prepares for using the SSE state.
12245 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12246 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12247#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12248/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12249#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12250/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12251#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12252
12253/** Prepares for using the AVX state.
12254 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12255 * Ensures the guest AVX state in the CPUMCTX is up to date.
12256 * @note This will include the AVX512 state too when support for it is added
12257 * due to the zero extending feature of VEX instruction. */
12258#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12259/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12260#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12261/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12262#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12263
12264/**
12265 * Calls a MMX assembly implementation taking two visible arguments.
12266 *
12267 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12268 * @param a0 The first extra argument.
12269 * @param a1 The second extra argument.
12270 */
12271#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12272 do { \
12273 IEM_MC_PREPARE_FPU_USAGE(); \
12274 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12275 } while (0)
12276
12277/**
12278 * Calls a MMX assembly implementation taking three visible arguments.
12279 *
12280 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12281 * @param a0 The first extra argument.
12282 * @param a1 The second extra argument.
12283 * @param a2 The third extra argument.
12284 */
12285#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12286 do { \
12287 IEM_MC_PREPARE_FPU_USAGE(); \
12288 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12289 } while (0)
12290
12291
12292/**
12293 * Calls a SSE assembly implementation taking two visible arguments.
12294 *
12295 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12296 * @param a0 The first extra argument.
12297 * @param a1 The second extra argument.
12298 */
12299#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12300 do { \
12301 IEM_MC_PREPARE_SSE_USAGE(); \
12302 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12303 } while (0)
12304
12305/**
12306 * Calls a SSE assembly implementation taking three visible arguments.
12307 *
12308 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12309 * @param a0 The first extra argument.
12310 * @param a1 The second extra argument.
12311 * @param a2 The third extra argument.
12312 */
12313#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12314 do { \
12315 IEM_MC_PREPARE_SSE_USAGE(); \
12316 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12317 } while (0)
12318
12319
12320/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12321 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12322#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12323 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12324
12325/**
12326 * Calls a AVX assembly implementation taking two visible arguments.
12327 *
12328 * There is one implicit zero'th argument, a pointer to the extended state.
12329 *
12330 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12331 * @param a1 The first extra argument.
12332 * @param a2 The second extra argument.
12333 */
12334#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12335 do { \
12336 IEM_MC_PREPARE_AVX_USAGE(); \
12337 a_pfnAImpl(pXState, (a1), (a2)); \
12338 } while (0)
12339
12340/**
12341 * Calls a AVX assembly implementation taking three visible arguments.
12342 *
12343 * There is one implicit zero'th argument, a pointer to the extended state.
12344 *
12345 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12346 * @param a1 The first extra argument.
12347 * @param a2 The second extra argument.
12348 * @param a3 The third extra argument.
12349 */
12350#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12351 do { \
12352 IEM_MC_PREPARE_AVX_USAGE(); \
12353 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12354 } while (0)
12355
12356/** @note Not for IOPL or IF testing. */
12357#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12358/** @note Not for IOPL or IF testing. */
12359#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12360/** @note Not for IOPL or IF testing. */
12361#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12362/** @note Not for IOPL or IF testing. */
12363#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12364/** @note Not for IOPL or IF testing. */
12365#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12366 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12367 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12368/** @note Not for IOPL or IF testing. */
12369#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12370 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12371 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12372/** @note Not for IOPL or IF testing. */
12373#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12374 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12375 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12376 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12377/** @note Not for IOPL or IF testing. */
12378#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12379 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12380 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12381 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12382#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12383#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12384#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12385/** @note Not for IOPL or IF testing. */
12386#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12387 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12388 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12389/** @note Not for IOPL or IF testing. */
12390#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12391 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12392 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12393/** @note Not for IOPL or IF testing. */
12394#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12395 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12396 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12397/** @note Not for IOPL or IF testing. */
12398#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12399 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12400 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12401/** @note Not for IOPL or IF testing. */
12402#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12403 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12404 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12405/** @note Not for IOPL or IF testing. */
12406#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12407 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12408 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12409#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12410#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12411
12412#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12413 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12414#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12415 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12416#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12417 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12418#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12419 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12420#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12421 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12422#define IEM_MC_IF_FCW_IM() \
12423 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12424
12425#define IEM_MC_ELSE() } else {
12426#define IEM_MC_ENDIF() } do {} while (0)
12427
12428/** @} */
12429
12430
12431/** @name Opcode Debug Helpers.
12432 * @{
12433 */
12434#ifdef VBOX_WITH_STATISTICS
12435# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12436#else
12437# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12438#endif
12439
12440#ifdef DEBUG
12441# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12442 do { \
12443 IEMOP_INC_STATS(a_Stats); \
12444 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12445 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12446 } while (0)
12447
12448# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12449 do { \
12450 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12451 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12452 (void)RT_CONCAT(OP_,a_Upper); \
12453 (void)(a_fDisHints); \
12454 (void)(a_fIemHints); \
12455 } while (0)
12456
12457# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12458 do { \
12459 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12460 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12461 (void)RT_CONCAT(OP_,a_Upper); \
12462 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12463 (void)(a_fDisHints); \
12464 (void)(a_fIemHints); \
12465 } while (0)
12466
12467# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12468 do { \
12469 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12470 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12471 (void)RT_CONCAT(OP_,a_Upper); \
12472 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12473 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12474 (void)(a_fDisHints); \
12475 (void)(a_fIemHints); \
12476 } while (0)
12477
12478# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12479 do { \
12480 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12481 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12482 (void)RT_CONCAT(OP_,a_Upper); \
12483 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12484 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12485 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12486 (void)(a_fDisHints); \
12487 (void)(a_fIemHints); \
12488 } while (0)
12489
12490# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12491 do { \
12492 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12493 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12494 (void)RT_CONCAT(OP_,a_Upper); \
12495 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12496 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12497 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12498 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12499 (void)(a_fDisHints); \
12500 (void)(a_fIemHints); \
12501 } while (0)
12502
12503#else
12504# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12505
12506# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12507 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12508# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12509 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12510# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12511 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12512# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12513 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12514# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12515 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12516
12517#endif
12518
12519#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12520 IEMOP_MNEMONIC0EX(a_Lower, \
12521 #a_Lower, \
12522 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12523#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12524 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12525 #a_Lower " " #a_Op1, \
12526 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12527#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12528 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12529 #a_Lower " " #a_Op1 "," #a_Op2, \
12530 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12531#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12532 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12533 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12534 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12535#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12536 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12537 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12538 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12539
12540/** @} */
12541
12542
12543/** @name Opcode Helpers.
12544 * @{
12545 */
12546
12547#ifdef IN_RING3
12548# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12549 do { \
12550 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12551 else \
12552 { \
12553 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12554 return IEMOP_RAISE_INVALID_OPCODE(); \
12555 } \
12556 } while (0)
12557#else
12558# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12559 do { \
12560 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12561 else return IEMOP_RAISE_INVALID_OPCODE(); \
12562 } while (0)
12563#endif
12564
12565/** The instruction requires a 186 or later. */
12566#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12567# define IEMOP_HLP_MIN_186() do { } while (0)
12568#else
12569# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12570#endif
12571
12572/** The instruction requires a 286 or later. */
12573#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12574# define IEMOP_HLP_MIN_286() do { } while (0)
12575#else
12576# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12577#endif
12578
12579/** The instruction requires a 386 or later. */
12580#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12581# define IEMOP_HLP_MIN_386() do { } while (0)
12582#else
12583# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12584#endif
12585
12586/** The instruction requires a 386 or later if the given expression is true. */
12587#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12588# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12589#else
12590# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12591#endif
12592
12593/** The instruction requires a 486 or later. */
12594#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12595# define IEMOP_HLP_MIN_486() do { } while (0)
12596#else
12597# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12598#endif
12599
12600/** The instruction requires a Pentium (586) or later. */
12601#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12602# define IEMOP_HLP_MIN_586() do { } while (0)
12603#else
12604# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12605#endif
12606
12607/** The instruction requires a PentiumPro (686) or later. */
12608#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12609# define IEMOP_HLP_MIN_686() do { } while (0)
12610#else
12611# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12612#endif
12613
12614
12615/** The instruction raises an \#UD in real and V8086 mode. */
12616#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12617 do \
12618 { \
12619 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12620 else return IEMOP_RAISE_INVALID_OPCODE(); \
12621 } while (0)
12622
12623/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12624 * 64-bit mode. */
12625#define IEMOP_HLP_NO_64BIT() \
12626 do \
12627 { \
12628 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12629 return IEMOP_RAISE_INVALID_OPCODE(); \
12630 } while (0)
12631
12632/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12633 * 64-bit mode. */
12634#define IEMOP_HLP_ONLY_64BIT() \
12635 do \
12636 { \
12637 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12638 return IEMOP_RAISE_INVALID_OPCODE(); \
12639 } while (0)
12640
12641/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12642#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12643 do \
12644 { \
12645 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12646 iemRecalEffOpSize64Default(pVCpu); \
12647 } while (0)
12648
12649/** The instruction has 64-bit operand size if 64-bit mode. */
12650#define IEMOP_HLP_64BIT_OP_SIZE() \
12651 do \
12652 { \
12653 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12654 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12655 } while (0)
12656
12657/** Only a REX prefix immediately preceeding the first opcode byte takes
12658 * effect. This macro helps ensuring this as well as logging bad guest code. */
12659#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12660 do \
12661 { \
12662 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12663 { \
12664 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12665 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12666 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12667 pVCpu->iem.s.uRexB = 0; \
12668 pVCpu->iem.s.uRexIndex = 0; \
12669 pVCpu->iem.s.uRexReg = 0; \
12670 iemRecalEffOpSize(pVCpu); \
12671 } \
12672 } while (0)
12673
12674/**
12675 * Done decoding.
12676 */
12677#define IEMOP_HLP_DONE_DECODING() \
12678 do \
12679 { \
12680 /*nothing for now, maybe later... */ \
12681 } while (0)
12682
12683/**
12684 * Done decoding, raise \#UD exception if lock prefix present.
12685 */
12686#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12687 do \
12688 { \
12689 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12690 { /* likely */ } \
12691 else \
12692 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12693 } while (0)
12694
12695
12696/**
12697 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12698 * repnz or size prefixes are present, or if in real or v8086 mode.
12699 */
12700#define IEMOP_HLP_DONE_VEX_DECODING() \
12701 do \
12702 { \
12703 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12704 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12705 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12706 { /* likely */ } \
12707 else \
12708 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12709 } while (0)
12710
12711/**
12712 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12713 * repnz or size prefixes are present, or if in real or v8086 mode.
12714 */
12715#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12716 do \
12717 { \
12718 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12719 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12720 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12721 && pVCpu->iem.s.uVexLength == 0)) \
12722 { /* likely */ } \
12723 else \
12724 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12725 } while (0)
12726
12727
12728/**
12729 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12730 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12731 * register 0, or if in real or v8086 mode.
12732 */
12733#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12734 do \
12735 { \
12736 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12737 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12738 && !pVCpu->iem.s.uVex3rdReg \
12739 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12740 { /* likely */ } \
12741 else \
12742 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12743 } while (0)
12744
12745/**
12746 * Done decoding VEX, no V, L=0.
12747 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12748 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12749 */
12750#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12751 do \
12752 { \
12753 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12754 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12755 && pVCpu->iem.s.uVexLength == 0 \
12756 && pVCpu->iem.s.uVex3rdReg == 0 \
12757 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12758 { /* likely */ } \
12759 else \
12760 return IEMOP_RAISE_INVALID_OPCODE(); \
12761 } while (0)
12762
12763#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12764 do \
12765 { \
12766 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12767 { /* likely */ } \
12768 else \
12769 { \
12770 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12771 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12772 } \
12773 } while (0)
12774#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12775 do \
12776 { \
12777 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12778 { /* likely */ } \
12779 else \
12780 { \
12781 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12782 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12783 } \
12784 } while (0)
12785
12786/**
12787 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12788 * are present.
12789 */
12790#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12791 do \
12792 { \
12793 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12794 { /* likely */ } \
12795 else \
12796 return IEMOP_RAISE_INVALID_OPCODE(); \
12797 } while (0)
12798
12799
12800#ifdef VBOX_WITH_NESTED_HWVIRT
12801/** Check and handles SVM nested-guest control & instruction intercept. */
12802# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12803 do \
12804 { \
12805 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12806 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12807 } while (0)
12808
12809/** Check and handle SVM nested-guest CR0 read intercept. */
12810# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12811 do \
12812 { \
12813 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12814 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12815 } while (0)
12816
12817#else /* !VBOX_WITH_NESTED_HWVIRT */
12818# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12819# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12820#endif /* !VBOX_WITH_NESTED_HWVIRT */
12821
12822
12823/**
12824 * Calculates the effective address of a ModR/M memory operand.
12825 *
12826 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12827 *
12828 * @return Strict VBox status code.
12829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12830 * @param bRm The ModRM byte.
12831 * @param cbImm The size of any immediate following the
12832 * effective address opcode bytes. Important for
12833 * RIP relative addressing.
12834 * @param pGCPtrEff Where to return the effective address.
12835 */
12836IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12837{
12838 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12839 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12840# define SET_SS_DEF() \
12841 do \
12842 { \
12843 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12844 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12845 } while (0)
12846
12847 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12848 {
12849/** @todo Check the effective address size crap! */
12850 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12851 {
12852 uint16_t u16EffAddr;
12853
12854 /* Handle the disp16 form with no registers first. */
12855 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12856 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12857 else
12858 {
12859 /* Get the displacment. */
12860 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12861 {
12862 case 0: u16EffAddr = 0; break;
12863 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12864 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12865 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12866 }
12867
12868 /* Add the base and index registers to the disp. */
12869 switch (bRm & X86_MODRM_RM_MASK)
12870 {
12871 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12872 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12873 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12874 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12875 case 4: u16EffAddr += pCtx->si; break;
12876 case 5: u16EffAddr += pCtx->di; break;
12877 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12878 case 7: u16EffAddr += pCtx->bx; break;
12879 }
12880 }
12881
12882 *pGCPtrEff = u16EffAddr;
12883 }
12884 else
12885 {
12886 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12887 uint32_t u32EffAddr;
12888
12889 /* Handle the disp32 form with no registers first. */
12890 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12891 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12892 else
12893 {
12894 /* Get the register (or SIB) value. */
12895 switch ((bRm & X86_MODRM_RM_MASK))
12896 {
12897 case 0: u32EffAddr = pCtx->eax; break;
12898 case 1: u32EffAddr = pCtx->ecx; break;
12899 case 2: u32EffAddr = pCtx->edx; break;
12900 case 3: u32EffAddr = pCtx->ebx; break;
12901 case 4: /* SIB */
12902 {
12903 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12904
12905 /* Get the index and scale it. */
12906 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12907 {
12908 case 0: u32EffAddr = pCtx->eax; break;
12909 case 1: u32EffAddr = pCtx->ecx; break;
12910 case 2: u32EffAddr = pCtx->edx; break;
12911 case 3: u32EffAddr = pCtx->ebx; break;
12912 case 4: u32EffAddr = 0; /*none */ break;
12913 case 5: u32EffAddr = pCtx->ebp; break;
12914 case 6: u32EffAddr = pCtx->esi; break;
12915 case 7: u32EffAddr = pCtx->edi; break;
12916 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12917 }
12918 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12919
12920 /* add base */
12921 switch (bSib & X86_SIB_BASE_MASK)
12922 {
12923 case 0: u32EffAddr += pCtx->eax; break;
12924 case 1: u32EffAddr += pCtx->ecx; break;
12925 case 2: u32EffAddr += pCtx->edx; break;
12926 case 3: u32EffAddr += pCtx->ebx; break;
12927 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12928 case 5:
12929 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12930 {
12931 u32EffAddr += pCtx->ebp;
12932 SET_SS_DEF();
12933 }
12934 else
12935 {
12936 uint32_t u32Disp;
12937 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12938 u32EffAddr += u32Disp;
12939 }
12940 break;
12941 case 6: u32EffAddr += pCtx->esi; break;
12942 case 7: u32EffAddr += pCtx->edi; break;
12943 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12944 }
12945 break;
12946 }
12947 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12948 case 6: u32EffAddr = pCtx->esi; break;
12949 case 7: u32EffAddr = pCtx->edi; break;
12950 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12951 }
12952
12953 /* Get and add the displacement. */
12954 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12955 {
12956 case 0:
12957 break;
12958 case 1:
12959 {
12960 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12961 u32EffAddr += i8Disp;
12962 break;
12963 }
12964 case 2:
12965 {
12966 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12967 u32EffAddr += u32Disp;
12968 break;
12969 }
12970 default:
12971 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12972 }
12973
12974 }
12975 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12976 *pGCPtrEff = u32EffAddr;
12977 else
12978 {
12979 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12980 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12981 }
12982 }
12983 }
12984 else
12985 {
12986 uint64_t u64EffAddr;
12987
12988 /* Handle the rip+disp32 form with no registers first. */
12989 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12990 {
12991 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12992 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12993 }
12994 else
12995 {
12996 /* Get the register (or SIB) value. */
12997 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12998 {
12999 case 0: u64EffAddr = pCtx->rax; break;
13000 case 1: u64EffAddr = pCtx->rcx; break;
13001 case 2: u64EffAddr = pCtx->rdx; break;
13002 case 3: u64EffAddr = pCtx->rbx; break;
13003 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13004 case 6: u64EffAddr = pCtx->rsi; break;
13005 case 7: u64EffAddr = pCtx->rdi; break;
13006 case 8: u64EffAddr = pCtx->r8; break;
13007 case 9: u64EffAddr = pCtx->r9; break;
13008 case 10: u64EffAddr = pCtx->r10; break;
13009 case 11: u64EffAddr = pCtx->r11; break;
13010 case 13: u64EffAddr = pCtx->r13; break;
13011 case 14: u64EffAddr = pCtx->r14; break;
13012 case 15: u64EffAddr = pCtx->r15; break;
13013 /* SIB */
13014 case 4:
13015 case 12:
13016 {
13017 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13018
13019 /* Get the index and scale it. */
13020 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13021 {
13022 case 0: u64EffAddr = pCtx->rax; break;
13023 case 1: u64EffAddr = pCtx->rcx; break;
13024 case 2: u64EffAddr = pCtx->rdx; break;
13025 case 3: u64EffAddr = pCtx->rbx; break;
13026 case 4: u64EffAddr = 0; /*none */ break;
13027 case 5: u64EffAddr = pCtx->rbp; break;
13028 case 6: u64EffAddr = pCtx->rsi; break;
13029 case 7: u64EffAddr = pCtx->rdi; break;
13030 case 8: u64EffAddr = pCtx->r8; break;
13031 case 9: u64EffAddr = pCtx->r9; break;
13032 case 10: u64EffAddr = pCtx->r10; break;
13033 case 11: u64EffAddr = pCtx->r11; break;
13034 case 12: u64EffAddr = pCtx->r12; break;
13035 case 13: u64EffAddr = pCtx->r13; break;
13036 case 14: u64EffAddr = pCtx->r14; break;
13037 case 15: u64EffAddr = pCtx->r15; break;
13038 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13039 }
13040 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13041
13042 /* add base */
13043 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13044 {
13045 case 0: u64EffAddr += pCtx->rax; break;
13046 case 1: u64EffAddr += pCtx->rcx; break;
13047 case 2: u64EffAddr += pCtx->rdx; break;
13048 case 3: u64EffAddr += pCtx->rbx; break;
13049 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13050 case 6: u64EffAddr += pCtx->rsi; break;
13051 case 7: u64EffAddr += pCtx->rdi; break;
13052 case 8: u64EffAddr += pCtx->r8; break;
13053 case 9: u64EffAddr += pCtx->r9; break;
13054 case 10: u64EffAddr += pCtx->r10; break;
13055 case 11: u64EffAddr += pCtx->r11; break;
13056 case 12: u64EffAddr += pCtx->r12; break;
13057 case 14: u64EffAddr += pCtx->r14; break;
13058 case 15: u64EffAddr += pCtx->r15; break;
13059 /* complicated encodings */
13060 case 5:
13061 case 13:
13062 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13063 {
13064 if (!pVCpu->iem.s.uRexB)
13065 {
13066 u64EffAddr += pCtx->rbp;
13067 SET_SS_DEF();
13068 }
13069 else
13070 u64EffAddr += pCtx->r13;
13071 }
13072 else
13073 {
13074 uint32_t u32Disp;
13075 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13076 u64EffAddr += (int32_t)u32Disp;
13077 }
13078 break;
13079 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13080 }
13081 break;
13082 }
13083 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13084 }
13085
13086 /* Get and add the displacement. */
13087 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13088 {
13089 case 0:
13090 break;
13091 case 1:
13092 {
13093 int8_t i8Disp;
13094 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13095 u64EffAddr += i8Disp;
13096 break;
13097 }
13098 case 2:
13099 {
13100 uint32_t u32Disp;
13101 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13102 u64EffAddr += (int32_t)u32Disp;
13103 break;
13104 }
13105 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13106 }
13107
13108 }
13109
13110 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13111 *pGCPtrEff = u64EffAddr;
13112 else
13113 {
13114 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13115 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13116 }
13117 }
13118
13119 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13120 return VINF_SUCCESS;
13121}
13122
13123
13124/**
13125 * Calculates the effective address of a ModR/M memory operand.
13126 *
13127 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13128 *
13129 * @return Strict VBox status code.
13130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13131 * @param bRm The ModRM byte.
13132 * @param cbImm The size of any immediate following the
13133 * effective address opcode bytes. Important for
13134 * RIP relative addressing.
13135 * @param pGCPtrEff Where to return the effective address.
13136 * @param offRsp RSP displacement.
13137 */
13138IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13139{
13140 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13141 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13142# define SET_SS_DEF() \
13143 do \
13144 { \
13145 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13146 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13147 } while (0)
13148
13149 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13150 {
13151/** @todo Check the effective address size crap! */
13152 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13153 {
13154 uint16_t u16EffAddr;
13155
13156 /* Handle the disp16 form with no registers first. */
13157 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13158 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13159 else
13160 {
13161 /* Get the displacment. */
13162 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13163 {
13164 case 0: u16EffAddr = 0; break;
13165 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13166 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13167 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13168 }
13169
13170 /* Add the base and index registers to the disp. */
13171 switch (bRm & X86_MODRM_RM_MASK)
13172 {
13173 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13174 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13175 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13176 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13177 case 4: u16EffAddr += pCtx->si; break;
13178 case 5: u16EffAddr += pCtx->di; break;
13179 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13180 case 7: u16EffAddr += pCtx->bx; break;
13181 }
13182 }
13183
13184 *pGCPtrEff = u16EffAddr;
13185 }
13186 else
13187 {
13188 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13189 uint32_t u32EffAddr;
13190
13191 /* Handle the disp32 form with no registers first. */
13192 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13193 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13194 else
13195 {
13196 /* Get the register (or SIB) value. */
13197 switch ((bRm & X86_MODRM_RM_MASK))
13198 {
13199 case 0: u32EffAddr = pCtx->eax; break;
13200 case 1: u32EffAddr = pCtx->ecx; break;
13201 case 2: u32EffAddr = pCtx->edx; break;
13202 case 3: u32EffAddr = pCtx->ebx; break;
13203 case 4: /* SIB */
13204 {
13205 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13206
13207 /* Get the index and scale it. */
13208 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13209 {
13210 case 0: u32EffAddr = pCtx->eax; break;
13211 case 1: u32EffAddr = pCtx->ecx; break;
13212 case 2: u32EffAddr = pCtx->edx; break;
13213 case 3: u32EffAddr = pCtx->ebx; break;
13214 case 4: u32EffAddr = 0; /*none */ break;
13215 case 5: u32EffAddr = pCtx->ebp; break;
13216 case 6: u32EffAddr = pCtx->esi; break;
13217 case 7: u32EffAddr = pCtx->edi; break;
13218 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13219 }
13220 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13221
13222 /* add base */
13223 switch (bSib & X86_SIB_BASE_MASK)
13224 {
13225 case 0: u32EffAddr += pCtx->eax; break;
13226 case 1: u32EffAddr += pCtx->ecx; break;
13227 case 2: u32EffAddr += pCtx->edx; break;
13228 case 3: u32EffAddr += pCtx->ebx; break;
13229 case 4:
13230 u32EffAddr += pCtx->esp + offRsp;
13231 SET_SS_DEF();
13232 break;
13233 case 5:
13234 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13235 {
13236 u32EffAddr += pCtx->ebp;
13237 SET_SS_DEF();
13238 }
13239 else
13240 {
13241 uint32_t u32Disp;
13242 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13243 u32EffAddr += u32Disp;
13244 }
13245 break;
13246 case 6: u32EffAddr += pCtx->esi; break;
13247 case 7: u32EffAddr += pCtx->edi; break;
13248 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13249 }
13250 break;
13251 }
13252 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13253 case 6: u32EffAddr = pCtx->esi; break;
13254 case 7: u32EffAddr = pCtx->edi; break;
13255 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13256 }
13257
13258 /* Get and add the displacement. */
13259 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13260 {
13261 case 0:
13262 break;
13263 case 1:
13264 {
13265 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13266 u32EffAddr += i8Disp;
13267 break;
13268 }
13269 case 2:
13270 {
13271 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13272 u32EffAddr += u32Disp;
13273 break;
13274 }
13275 default:
13276 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13277 }
13278
13279 }
13280 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13281 *pGCPtrEff = u32EffAddr;
13282 else
13283 {
13284 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13285 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13286 }
13287 }
13288 }
13289 else
13290 {
13291 uint64_t u64EffAddr;
13292
13293 /* Handle the rip+disp32 form with no registers first. */
13294 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13295 {
13296 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13297 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13298 }
13299 else
13300 {
13301 /* Get the register (or SIB) value. */
13302 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13303 {
13304 case 0: u64EffAddr = pCtx->rax; break;
13305 case 1: u64EffAddr = pCtx->rcx; break;
13306 case 2: u64EffAddr = pCtx->rdx; break;
13307 case 3: u64EffAddr = pCtx->rbx; break;
13308 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13309 case 6: u64EffAddr = pCtx->rsi; break;
13310 case 7: u64EffAddr = pCtx->rdi; break;
13311 case 8: u64EffAddr = pCtx->r8; break;
13312 case 9: u64EffAddr = pCtx->r9; break;
13313 case 10: u64EffAddr = pCtx->r10; break;
13314 case 11: u64EffAddr = pCtx->r11; break;
13315 case 13: u64EffAddr = pCtx->r13; break;
13316 case 14: u64EffAddr = pCtx->r14; break;
13317 case 15: u64EffAddr = pCtx->r15; break;
13318 /* SIB */
13319 case 4:
13320 case 12:
13321 {
13322 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13323
13324 /* Get the index and scale it. */
13325 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13326 {
13327 case 0: u64EffAddr = pCtx->rax; break;
13328 case 1: u64EffAddr = pCtx->rcx; break;
13329 case 2: u64EffAddr = pCtx->rdx; break;
13330 case 3: u64EffAddr = pCtx->rbx; break;
13331 case 4: u64EffAddr = 0; /*none */ break;
13332 case 5: u64EffAddr = pCtx->rbp; break;
13333 case 6: u64EffAddr = pCtx->rsi; break;
13334 case 7: u64EffAddr = pCtx->rdi; break;
13335 case 8: u64EffAddr = pCtx->r8; break;
13336 case 9: u64EffAddr = pCtx->r9; break;
13337 case 10: u64EffAddr = pCtx->r10; break;
13338 case 11: u64EffAddr = pCtx->r11; break;
13339 case 12: u64EffAddr = pCtx->r12; break;
13340 case 13: u64EffAddr = pCtx->r13; break;
13341 case 14: u64EffAddr = pCtx->r14; break;
13342 case 15: u64EffAddr = pCtx->r15; break;
13343 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13344 }
13345 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13346
13347 /* add base */
13348 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13349 {
13350 case 0: u64EffAddr += pCtx->rax; break;
13351 case 1: u64EffAddr += pCtx->rcx; break;
13352 case 2: u64EffAddr += pCtx->rdx; break;
13353 case 3: u64EffAddr += pCtx->rbx; break;
13354 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13355 case 6: u64EffAddr += pCtx->rsi; break;
13356 case 7: u64EffAddr += pCtx->rdi; break;
13357 case 8: u64EffAddr += pCtx->r8; break;
13358 case 9: u64EffAddr += pCtx->r9; break;
13359 case 10: u64EffAddr += pCtx->r10; break;
13360 case 11: u64EffAddr += pCtx->r11; break;
13361 case 12: u64EffAddr += pCtx->r12; break;
13362 case 14: u64EffAddr += pCtx->r14; break;
13363 case 15: u64EffAddr += pCtx->r15; break;
13364 /* complicated encodings */
13365 case 5:
13366 case 13:
13367 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13368 {
13369 if (!pVCpu->iem.s.uRexB)
13370 {
13371 u64EffAddr += pCtx->rbp;
13372 SET_SS_DEF();
13373 }
13374 else
13375 u64EffAddr += pCtx->r13;
13376 }
13377 else
13378 {
13379 uint32_t u32Disp;
13380 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13381 u64EffAddr += (int32_t)u32Disp;
13382 }
13383 break;
13384 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13385 }
13386 break;
13387 }
13388 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13389 }
13390
13391 /* Get and add the displacement. */
13392 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13393 {
13394 case 0:
13395 break;
13396 case 1:
13397 {
13398 int8_t i8Disp;
13399 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13400 u64EffAddr += i8Disp;
13401 break;
13402 }
13403 case 2:
13404 {
13405 uint32_t u32Disp;
13406 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13407 u64EffAddr += (int32_t)u32Disp;
13408 break;
13409 }
13410 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13411 }
13412
13413 }
13414
13415 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13416 *pGCPtrEff = u64EffAddr;
13417 else
13418 {
13419 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13420 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13421 }
13422 }
13423
13424 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13425 return VINF_SUCCESS;
13426}
13427
13428
13429#ifdef IEM_WITH_SETJMP
13430/**
13431 * Calculates the effective address of a ModR/M memory operand.
13432 *
13433 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13434 *
13435 * May longjmp on internal error.
13436 *
13437 * @return The effective address.
13438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13439 * @param bRm The ModRM byte.
13440 * @param cbImm The size of any immediate following the
13441 * effective address opcode bytes. Important for
13442 * RIP relative addressing.
13443 */
13444IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13445{
13446 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13447 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13448# define SET_SS_DEF() \
13449 do \
13450 { \
13451 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13452 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13453 } while (0)
13454
13455 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13456 {
13457/** @todo Check the effective address size crap! */
13458 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13459 {
13460 uint16_t u16EffAddr;
13461
13462 /* Handle the disp16 form with no registers first. */
13463 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13464 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13465 else
13466 {
13467 /* Get the displacment. */
13468 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13469 {
13470 case 0: u16EffAddr = 0; break;
13471 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13472 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13473 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13474 }
13475
13476 /* Add the base and index registers to the disp. */
13477 switch (bRm & X86_MODRM_RM_MASK)
13478 {
13479 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13480 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13481 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13482 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13483 case 4: u16EffAddr += pCtx->si; break;
13484 case 5: u16EffAddr += pCtx->di; break;
13485 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13486 case 7: u16EffAddr += pCtx->bx; break;
13487 }
13488 }
13489
13490 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13491 return u16EffAddr;
13492 }
13493
13494 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13495 uint32_t u32EffAddr;
13496
13497 /* Handle the disp32 form with no registers first. */
13498 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13499 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13500 else
13501 {
13502 /* Get the register (or SIB) value. */
13503 switch ((bRm & X86_MODRM_RM_MASK))
13504 {
13505 case 0: u32EffAddr = pCtx->eax; break;
13506 case 1: u32EffAddr = pCtx->ecx; break;
13507 case 2: u32EffAddr = pCtx->edx; break;
13508 case 3: u32EffAddr = pCtx->ebx; break;
13509 case 4: /* SIB */
13510 {
13511 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13512
13513 /* Get the index and scale it. */
13514 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13515 {
13516 case 0: u32EffAddr = pCtx->eax; break;
13517 case 1: u32EffAddr = pCtx->ecx; break;
13518 case 2: u32EffAddr = pCtx->edx; break;
13519 case 3: u32EffAddr = pCtx->ebx; break;
13520 case 4: u32EffAddr = 0; /*none */ break;
13521 case 5: u32EffAddr = pCtx->ebp; break;
13522 case 6: u32EffAddr = pCtx->esi; break;
13523 case 7: u32EffAddr = pCtx->edi; break;
13524 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13525 }
13526 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13527
13528 /* add base */
13529 switch (bSib & X86_SIB_BASE_MASK)
13530 {
13531 case 0: u32EffAddr += pCtx->eax; break;
13532 case 1: u32EffAddr += pCtx->ecx; break;
13533 case 2: u32EffAddr += pCtx->edx; break;
13534 case 3: u32EffAddr += pCtx->ebx; break;
13535 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13536 case 5:
13537 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13538 {
13539 u32EffAddr += pCtx->ebp;
13540 SET_SS_DEF();
13541 }
13542 else
13543 {
13544 uint32_t u32Disp;
13545 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13546 u32EffAddr += u32Disp;
13547 }
13548 break;
13549 case 6: u32EffAddr += pCtx->esi; break;
13550 case 7: u32EffAddr += pCtx->edi; break;
13551 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13552 }
13553 break;
13554 }
13555 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13556 case 6: u32EffAddr = pCtx->esi; break;
13557 case 7: u32EffAddr = pCtx->edi; break;
13558 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13559 }
13560
13561 /* Get and add the displacement. */
13562 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13563 {
13564 case 0:
13565 break;
13566 case 1:
13567 {
13568 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13569 u32EffAddr += i8Disp;
13570 break;
13571 }
13572 case 2:
13573 {
13574 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13575 u32EffAddr += u32Disp;
13576 break;
13577 }
13578 default:
13579 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13580 }
13581 }
13582
13583 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13584 {
13585 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13586 return u32EffAddr;
13587 }
13588 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13589 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13590 return u32EffAddr & UINT16_MAX;
13591 }
13592
13593 uint64_t u64EffAddr;
13594
13595 /* Handle the rip+disp32 form with no registers first. */
13596 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13597 {
13598 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13599 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13600 }
13601 else
13602 {
13603 /* Get the register (or SIB) value. */
13604 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13605 {
13606 case 0: u64EffAddr = pCtx->rax; break;
13607 case 1: u64EffAddr = pCtx->rcx; break;
13608 case 2: u64EffAddr = pCtx->rdx; break;
13609 case 3: u64EffAddr = pCtx->rbx; break;
13610 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13611 case 6: u64EffAddr = pCtx->rsi; break;
13612 case 7: u64EffAddr = pCtx->rdi; break;
13613 case 8: u64EffAddr = pCtx->r8; break;
13614 case 9: u64EffAddr = pCtx->r9; break;
13615 case 10: u64EffAddr = pCtx->r10; break;
13616 case 11: u64EffAddr = pCtx->r11; break;
13617 case 13: u64EffAddr = pCtx->r13; break;
13618 case 14: u64EffAddr = pCtx->r14; break;
13619 case 15: u64EffAddr = pCtx->r15; break;
13620 /* SIB */
13621 case 4:
13622 case 12:
13623 {
13624 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13625
13626 /* Get the index and scale it. */
13627 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13628 {
13629 case 0: u64EffAddr = pCtx->rax; break;
13630 case 1: u64EffAddr = pCtx->rcx; break;
13631 case 2: u64EffAddr = pCtx->rdx; break;
13632 case 3: u64EffAddr = pCtx->rbx; break;
13633 case 4: u64EffAddr = 0; /*none */ break;
13634 case 5: u64EffAddr = pCtx->rbp; break;
13635 case 6: u64EffAddr = pCtx->rsi; break;
13636 case 7: u64EffAddr = pCtx->rdi; break;
13637 case 8: u64EffAddr = pCtx->r8; break;
13638 case 9: u64EffAddr = pCtx->r9; break;
13639 case 10: u64EffAddr = pCtx->r10; break;
13640 case 11: u64EffAddr = pCtx->r11; break;
13641 case 12: u64EffAddr = pCtx->r12; break;
13642 case 13: u64EffAddr = pCtx->r13; break;
13643 case 14: u64EffAddr = pCtx->r14; break;
13644 case 15: u64EffAddr = pCtx->r15; break;
13645 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13646 }
13647 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13648
13649 /* add base */
13650 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13651 {
13652 case 0: u64EffAddr += pCtx->rax; break;
13653 case 1: u64EffAddr += pCtx->rcx; break;
13654 case 2: u64EffAddr += pCtx->rdx; break;
13655 case 3: u64EffAddr += pCtx->rbx; break;
13656 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13657 case 6: u64EffAddr += pCtx->rsi; break;
13658 case 7: u64EffAddr += pCtx->rdi; break;
13659 case 8: u64EffAddr += pCtx->r8; break;
13660 case 9: u64EffAddr += pCtx->r9; break;
13661 case 10: u64EffAddr += pCtx->r10; break;
13662 case 11: u64EffAddr += pCtx->r11; break;
13663 case 12: u64EffAddr += pCtx->r12; break;
13664 case 14: u64EffAddr += pCtx->r14; break;
13665 case 15: u64EffAddr += pCtx->r15; break;
13666 /* complicated encodings */
13667 case 5:
13668 case 13:
13669 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13670 {
13671 if (!pVCpu->iem.s.uRexB)
13672 {
13673 u64EffAddr += pCtx->rbp;
13674 SET_SS_DEF();
13675 }
13676 else
13677 u64EffAddr += pCtx->r13;
13678 }
13679 else
13680 {
13681 uint32_t u32Disp;
13682 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13683 u64EffAddr += (int32_t)u32Disp;
13684 }
13685 break;
13686 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13687 }
13688 break;
13689 }
13690 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13691 }
13692
13693 /* Get and add the displacement. */
13694 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13695 {
13696 case 0:
13697 break;
13698 case 1:
13699 {
13700 int8_t i8Disp;
13701 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13702 u64EffAddr += i8Disp;
13703 break;
13704 }
13705 case 2:
13706 {
13707 uint32_t u32Disp;
13708 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13709 u64EffAddr += (int32_t)u32Disp;
13710 break;
13711 }
13712 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13713 }
13714
13715 }
13716
13717 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13718 {
13719 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13720 return u64EffAddr;
13721 }
13722 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13723 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13724 return u64EffAddr & UINT32_MAX;
13725}
13726#endif /* IEM_WITH_SETJMP */
13727
13728
13729/** @} */
13730
13731
13732
13733/*
13734 * Include the instructions
13735 */
13736#include "IEMAllInstructions.cpp.h"
13737
13738
13739
13740
13741#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13742
13743/**
13744 * Sets up execution verification mode.
13745 */
13746IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13747{
13748 PVMCPU pVCpu = pVCpu;
13749 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13750
13751 /*
13752 * Always note down the address of the current instruction.
13753 */
13754 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13755 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13756
13757 /*
13758 * Enable verification and/or logging.
13759 */
13760 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13761 if ( fNewNoRem
13762 && ( 0
13763#if 0 /* auto enable on first paged protected mode interrupt */
13764 || ( pOrgCtx->eflags.Bits.u1IF
13765 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13766 && TRPMHasTrap(pVCpu)
13767 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13768#endif
13769#if 0
13770 || ( pOrgCtx->cs == 0x10
13771 && ( pOrgCtx->rip == 0x90119e3e
13772 || pOrgCtx->rip == 0x901d9810)
13773#endif
13774#if 0 /* Auto enable DSL - FPU stuff. */
13775 || ( pOrgCtx->cs == 0x10
13776 && (// pOrgCtx->rip == 0xc02ec07f
13777 //|| pOrgCtx->rip == 0xc02ec082
13778 //|| pOrgCtx->rip == 0xc02ec0c9
13779 0
13780 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13781#endif
13782#if 0 /* Auto enable DSL - fstp st0 stuff. */
13783 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13784#endif
13785#if 0
13786 || pOrgCtx->rip == 0x9022bb3a
13787#endif
13788#if 0
13789 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13790#endif
13791#if 0
13792 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13793 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13794#endif
13795#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13796 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13797 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13798 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13799#endif
13800#if 0 /* NT4SP1 - xadd early boot. */
13801 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13802#endif
13803#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13804 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13805#endif
13806#if 0 /* NT4SP1 - cmpxchg (AMD). */
13807 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13808#endif
13809#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13810 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13811#endif
13812#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13813 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13814
13815#endif
13816#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13817 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13818
13819#endif
13820#if 0 /* NT4SP1 - frstor [ecx] */
13821 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13822#endif
13823#if 0 /* xxxxxx - All long mode code. */
13824 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13825#endif
13826#if 0 /* rep movsq linux 3.7 64-bit boot. */
13827 || (pOrgCtx->rip == 0x0000000000100241)
13828#endif
13829#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13830 || (pOrgCtx->rip == 0x000000000215e240)
13831#endif
13832#if 0 /* DOS's size-overridden iret to v8086. */
13833 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13834#endif
13835 )
13836 )
13837 {
13838 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13839 RTLogFlags(NULL, "enabled");
13840 fNewNoRem = false;
13841 }
13842 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13843 {
13844 pVCpu->iem.s.fNoRem = fNewNoRem;
13845 if (!fNewNoRem)
13846 {
13847 LogAlways(("Enabling verification mode!\n"));
13848 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13849 }
13850 else
13851 LogAlways(("Disabling verification mode!\n"));
13852 }
13853
13854 /*
13855 * Switch state.
13856 */
13857 if (IEM_VERIFICATION_ENABLED(pVCpu))
13858 {
13859 static CPUMCTX s_DebugCtx; /* Ugly! */
13860
13861 s_DebugCtx = *pOrgCtx;
13862 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13863 }
13864
13865 /*
13866 * See if there is an interrupt pending in TRPM and inject it if we can.
13867 */
13868 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13869 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
13870#if defined(VBOX_WITH_NESTED_HWVIRT)
13871 bool fIntrEnabled = pOrgCtx->hwvirt.svm.fGif;
13872 if (fIntrEnabled)
13873 {
13874 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
13875 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
13876 else
13877 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13878 }
13879#else
13880 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13881#endif
13882 if ( fIntrEnabled
13883 && TRPMHasTrap(pVCpu)
13884 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13885 {
13886 uint8_t u8TrapNo;
13887 TRPMEVENT enmType;
13888 RTGCUINT uErrCode;
13889 RTGCPTR uCr2;
13890 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13891 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13892 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13893 TRPMResetTrap(pVCpu);
13894 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13895 }
13896
13897 /*
13898 * Reset the counters.
13899 */
13900 pVCpu->iem.s.cIOReads = 0;
13901 pVCpu->iem.s.cIOWrites = 0;
13902 pVCpu->iem.s.fIgnoreRaxRdx = false;
13903 pVCpu->iem.s.fOverlappingMovs = false;
13904 pVCpu->iem.s.fProblematicMemory = false;
13905 pVCpu->iem.s.fUndefinedEFlags = 0;
13906
13907 if (IEM_VERIFICATION_ENABLED(pVCpu))
13908 {
13909 /*
13910 * Free all verification records.
13911 */
13912 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13913 pVCpu->iem.s.pIemEvtRecHead = NULL;
13914 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13915 do
13916 {
13917 while (pEvtRec)
13918 {
13919 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13920 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13921 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13922 pEvtRec = pNext;
13923 }
13924 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13925 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13926 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13927 } while (pEvtRec);
13928 }
13929}
13930
13931
13932/**
13933 * Allocate an event record.
13934 * @returns Pointer to a record.
13935 */
13936IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13937{
13938 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13939 return NULL;
13940
13941 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13942 if (pEvtRec)
13943 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13944 else
13945 {
13946 if (!pVCpu->iem.s.ppIemEvtRecNext)
13947 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13948
13949 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13950 if (!pEvtRec)
13951 return NULL;
13952 }
13953 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13954 pEvtRec->pNext = NULL;
13955 return pEvtRec;
13956}
13957
13958
13959/**
13960 * IOMMMIORead notification.
13961 */
13962VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13963{
13964 PVMCPU pVCpu = VMMGetCpu(pVM);
13965 if (!pVCpu)
13966 return;
13967 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13968 if (!pEvtRec)
13969 return;
13970 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13971 pEvtRec->u.RamRead.GCPhys = GCPhys;
13972 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13973 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13974 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13975}
13976
13977
13978/**
13979 * IOMMMIOWrite notification.
13980 */
13981VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13982{
13983 PVMCPU pVCpu = VMMGetCpu(pVM);
13984 if (!pVCpu)
13985 return;
13986 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13987 if (!pEvtRec)
13988 return;
13989 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
13990 pEvtRec->u.RamWrite.GCPhys = GCPhys;
13991 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
13992 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
13993 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
13994 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
13995 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
13996 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13997 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13998}
13999
14000
14001/**
14002 * IOMIOPortRead notification.
14003 */
14004VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
14005{
14006 PVMCPU pVCpu = VMMGetCpu(pVM);
14007 if (!pVCpu)
14008 return;
14009 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14010 if (!pEvtRec)
14011 return;
14012 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14013 pEvtRec->u.IOPortRead.Port = Port;
14014 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14015 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14016 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14017}
14018
14019/**
14020 * IOMIOPortWrite notification.
14021 */
14022VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14023{
14024 PVMCPU pVCpu = VMMGetCpu(pVM);
14025 if (!pVCpu)
14026 return;
14027 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14028 if (!pEvtRec)
14029 return;
14030 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14031 pEvtRec->u.IOPortWrite.Port = Port;
14032 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14033 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14034 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14035 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14036}
14037
14038
14039VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14040{
14041 PVMCPU pVCpu = VMMGetCpu(pVM);
14042 if (!pVCpu)
14043 return;
14044 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14045 if (!pEvtRec)
14046 return;
14047 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14048 pEvtRec->u.IOPortStrRead.Port = Port;
14049 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14050 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14051 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14052 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14053}
14054
14055
14056VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14057{
14058 PVMCPU pVCpu = VMMGetCpu(pVM);
14059 if (!pVCpu)
14060 return;
14061 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14062 if (!pEvtRec)
14063 return;
14064 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14065 pEvtRec->u.IOPortStrWrite.Port = Port;
14066 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14067 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14068 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14069 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14070}
14071
14072
14073/**
14074 * Fakes and records an I/O port read.
14075 *
14076 * @returns VINF_SUCCESS.
14077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14078 * @param Port The I/O port.
14079 * @param pu32Value Where to store the fake value.
14080 * @param cbValue The size of the access.
14081 */
14082IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14083{
14084 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14085 if (pEvtRec)
14086 {
14087 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14088 pEvtRec->u.IOPortRead.Port = Port;
14089 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14090 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14091 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14092 }
14093 pVCpu->iem.s.cIOReads++;
14094 *pu32Value = 0xcccccccc;
14095 return VINF_SUCCESS;
14096}
14097
14098
14099/**
14100 * Fakes and records an I/O port write.
14101 *
14102 * @returns VINF_SUCCESS.
14103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14104 * @param Port The I/O port.
14105 * @param u32Value The value being written.
14106 * @param cbValue The size of the access.
14107 */
14108IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14109{
14110 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14111 if (pEvtRec)
14112 {
14113 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14114 pEvtRec->u.IOPortWrite.Port = Port;
14115 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14116 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14117 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14118 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14119 }
14120 pVCpu->iem.s.cIOWrites++;
14121 return VINF_SUCCESS;
14122}
14123
14124
14125/**
14126 * Used to add extra details about a stub case.
14127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14128 */
14129IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14130{
14131 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14132 PVM pVM = pVCpu->CTX_SUFF(pVM);
14133 PVMCPU pVCpu = pVCpu;
14134 char szRegs[4096];
14135 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14136 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14137 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14138 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14139 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14140 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14141 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14142 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14143 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14144 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14145 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14146 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14147 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14148 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14149 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14150 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14151 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14152 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14153 " efer=%016VR{efer}\n"
14154 " pat=%016VR{pat}\n"
14155 " sf_mask=%016VR{sf_mask}\n"
14156 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14157 " lstar=%016VR{lstar}\n"
14158 " star=%016VR{star} cstar=%016VR{cstar}\n"
14159 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14160 );
14161
14162 char szInstr1[256];
14163 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14164 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14165 szInstr1, sizeof(szInstr1), NULL);
14166 char szInstr2[256];
14167 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14168 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14169 szInstr2, sizeof(szInstr2), NULL);
14170
14171 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14172}
14173
14174
14175/**
14176 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14177 * dump to the assertion info.
14178 *
14179 * @param pEvtRec The record to dump.
14180 */
14181IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14182{
14183 switch (pEvtRec->enmEvent)
14184 {
14185 case IEMVERIFYEVENT_IOPORT_READ:
14186 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14187 pEvtRec->u.IOPortWrite.Port,
14188 pEvtRec->u.IOPortWrite.cbValue);
14189 break;
14190 case IEMVERIFYEVENT_IOPORT_WRITE:
14191 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14192 pEvtRec->u.IOPortWrite.Port,
14193 pEvtRec->u.IOPortWrite.cbValue,
14194 pEvtRec->u.IOPortWrite.u32Value);
14195 break;
14196 case IEMVERIFYEVENT_IOPORT_STR_READ:
14197 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14198 pEvtRec->u.IOPortStrWrite.Port,
14199 pEvtRec->u.IOPortStrWrite.cbValue,
14200 pEvtRec->u.IOPortStrWrite.cTransfers);
14201 break;
14202 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14203 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14204 pEvtRec->u.IOPortStrWrite.Port,
14205 pEvtRec->u.IOPortStrWrite.cbValue,
14206 pEvtRec->u.IOPortStrWrite.cTransfers);
14207 break;
14208 case IEMVERIFYEVENT_RAM_READ:
14209 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14210 pEvtRec->u.RamRead.GCPhys,
14211 pEvtRec->u.RamRead.cb);
14212 break;
14213 case IEMVERIFYEVENT_RAM_WRITE:
14214 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14215 pEvtRec->u.RamWrite.GCPhys,
14216 pEvtRec->u.RamWrite.cb,
14217 (int)pEvtRec->u.RamWrite.cb,
14218 pEvtRec->u.RamWrite.ab);
14219 break;
14220 default:
14221 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14222 break;
14223 }
14224}
14225
14226
14227/**
14228 * Raises an assertion on the specified record, showing the given message with
14229 * a record dump attached.
14230 *
14231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14232 * @param pEvtRec1 The first record.
14233 * @param pEvtRec2 The second record.
14234 * @param pszMsg The message explaining why we're asserting.
14235 */
14236IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14237{
14238 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14239 iemVerifyAssertAddRecordDump(pEvtRec1);
14240 iemVerifyAssertAddRecordDump(pEvtRec2);
14241 iemVerifyAssertMsg2(pVCpu);
14242 RTAssertPanic();
14243}
14244
14245
14246/**
14247 * Raises an assertion on the specified record, showing the given message with
14248 * a record dump attached.
14249 *
14250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14251 * @param pEvtRec1 The first record.
14252 * @param pszMsg The message explaining why we're asserting.
14253 */
14254IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14255{
14256 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14257 iemVerifyAssertAddRecordDump(pEvtRec);
14258 iemVerifyAssertMsg2(pVCpu);
14259 RTAssertPanic();
14260}
14261
14262
14263/**
14264 * Verifies a write record.
14265 *
14266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14267 * @param pEvtRec The write record.
14268 * @param fRem Set if REM was doing the other executing. If clear
14269 * it was HM.
14270 */
14271IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14272{
14273 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14274 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14275 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14276 if ( RT_FAILURE(rc)
14277 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14278 {
14279 /* fend off ins */
14280 if ( !pVCpu->iem.s.cIOReads
14281 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14282 || ( pEvtRec->u.RamWrite.cb != 1
14283 && pEvtRec->u.RamWrite.cb != 2
14284 && pEvtRec->u.RamWrite.cb != 4) )
14285 {
14286 /* fend off ROMs and MMIO */
14287 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14288 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14289 {
14290 /* fend off fxsave */
14291 if (pEvtRec->u.RamWrite.cb != 512)
14292 {
14293 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14294 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14295 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14296 RTAssertMsg2Add("%s: %.*Rhxs\n"
14297 "iem: %.*Rhxs\n",
14298 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14299 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14300 iemVerifyAssertAddRecordDump(pEvtRec);
14301 iemVerifyAssertMsg2(pVCpu);
14302 RTAssertPanic();
14303 }
14304 }
14305 }
14306 }
14307
14308}
14309
14310/**
14311 * Performs the post-execution verfication checks.
14312 */
14313IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14314{
14315 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14316 return rcStrictIem;
14317
14318 /*
14319 * Switch back the state.
14320 */
14321 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14322 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14323 Assert(pOrgCtx != pDebugCtx);
14324 IEM_GET_CTX(pVCpu) = pOrgCtx;
14325
14326 /*
14327 * Execute the instruction in REM.
14328 */
14329 bool fRem = false;
14330 PVM pVM = pVCpu->CTX_SUFF(pVM);
14331 PVMCPU pVCpu = pVCpu;
14332 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14333#ifdef IEM_VERIFICATION_MODE_FULL_HM
14334 if ( HMIsEnabled(pVM)
14335 && pVCpu->iem.s.cIOReads == 0
14336 && pVCpu->iem.s.cIOWrites == 0
14337 && !pVCpu->iem.s.fProblematicMemory)
14338 {
14339 uint64_t uStartRip = pOrgCtx->rip;
14340 unsigned iLoops = 0;
14341 do
14342 {
14343 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14344 iLoops++;
14345 } while ( rc == VINF_SUCCESS
14346 || ( rc == VINF_EM_DBG_STEPPED
14347 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14348 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14349 || ( pOrgCtx->rip != pDebugCtx->rip
14350 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14351 && iLoops < 8) );
14352 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14353 rc = VINF_SUCCESS;
14354 }
14355#endif
14356 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14357 || rc == VINF_IOM_R3_IOPORT_READ
14358 || rc == VINF_IOM_R3_IOPORT_WRITE
14359 || rc == VINF_IOM_R3_MMIO_READ
14360 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14361 || rc == VINF_IOM_R3_MMIO_WRITE
14362 || rc == VINF_CPUM_R3_MSR_READ
14363 || rc == VINF_CPUM_R3_MSR_WRITE
14364 || rc == VINF_EM_RESCHEDULE
14365 )
14366 {
14367 EMRemLock(pVM);
14368 rc = REMR3EmulateInstruction(pVM, pVCpu);
14369 AssertRC(rc);
14370 EMRemUnlock(pVM);
14371 fRem = true;
14372 }
14373
14374# if 1 /* Skip unimplemented instructions for now. */
14375 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14376 {
14377 IEM_GET_CTX(pVCpu) = pOrgCtx;
14378 if (rc == VINF_EM_DBG_STEPPED)
14379 return VINF_SUCCESS;
14380 return rc;
14381 }
14382# endif
14383
14384 /*
14385 * Compare the register states.
14386 */
14387 unsigned cDiffs = 0;
14388 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14389 {
14390 //Log(("REM and IEM ends up with different registers!\n"));
14391 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14392
14393# define CHECK_FIELD(a_Field) \
14394 do \
14395 { \
14396 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14397 { \
14398 switch (sizeof(pOrgCtx->a_Field)) \
14399 { \
14400 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14401 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14402 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14403 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14404 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14405 } \
14406 cDiffs++; \
14407 } \
14408 } while (0)
14409# define CHECK_XSTATE_FIELD(a_Field) \
14410 do \
14411 { \
14412 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14413 { \
14414 switch (sizeof(pOrgXState->a_Field)) \
14415 { \
14416 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14417 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14418 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14419 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14420 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14421 } \
14422 cDiffs++; \
14423 } \
14424 } while (0)
14425
14426# define CHECK_BIT_FIELD(a_Field) \
14427 do \
14428 { \
14429 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14430 { \
14431 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14432 cDiffs++; \
14433 } \
14434 } while (0)
14435
14436# define CHECK_SEL(a_Sel) \
14437 do \
14438 { \
14439 CHECK_FIELD(a_Sel.Sel); \
14440 CHECK_FIELD(a_Sel.Attr.u); \
14441 CHECK_FIELD(a_Sel.u64Base); \
14442 CHECK_FIELD(a_Sel.u32Limit); \
14443 CHECK_FIELD(a_Sel.fFlags); \
14444 } while (0)
14445
14446 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14447 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14448
14449#if 1 /* The recompiler doesn't update these the intel way. */
14450 if (fRem)
14451 {
14452 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14453 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14454 pOrgXState->x87.CS = pDebugXState->x87.CS;
14455 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14456 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14457 pOrgXState->x87.DS = pDebugXState->x87.DS;
14458 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14459 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14460 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14461 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14462 }
14463#endif
14464 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14465 {
14466 RTAssertMsg2Weak(" the FPU state differs\n");
14467 cDiffs++;
14468 CHECK_XSTATE_FIELD(x87.FCW);
14469 CHECK_XSTATE_FIELD(x87.FSW);
14470 CHECK_XSTATE_FIELD(x87.FTW);
14471 CHECK_XSTATE_FIELD(x87.FOP);
14472 CHECK_XSTATE_FIELD(x87.FPUIP);
14473 CHECK_XSTATE_FIELD(x87.CS);
14474 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14475 CHECK_XSTATE_FIELD(x87.FPUDP);
14476 CHECK_XSTATE_FIELD(x87.DS);
14477 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14478 CHECK_XSTATE_FIELD(x87.MXCSR);
14479 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14480 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14481 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14482 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14483 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14484 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14485 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14486 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14487 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14488 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14489 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14490 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14491 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14492 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14493 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14494 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14495 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14496 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14497 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14498 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14499 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14500 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14501 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14502 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14503 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14504 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14505 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14506 }
14507 CHECK_FIELD(rip);
14508 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14509 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14510 {
14511 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14512 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14513 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14514 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14515 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14516 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14517 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14518 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14519 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14520 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14521 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14522 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14523 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14524 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14525 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14526 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14527 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14528 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14529 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14530 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14531 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14532 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14533 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14534 }
14535
14536 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14537 CHECK_FIELD(rax);
14538 CHECK_FIELD(rcx);
14539 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14540 CHECK_FIELD(rdx);
14541 CHECK_FIELD(rbx);
14542 CHECK_FIELD(rsp);
14543 CHECK_FIELD(rbp);
14544 CHECK_FIELD(rsi);
14545 CHECK_FIELD(rdi);
14546 CHECK_FIELD(r8);
14547 CHECK_FIELD(r9);
14548 CHECK_FIELD(r10);
14549 CHECK_FIELD(r11);
14550 CHECK_FIELD(r12);
14551 CHECK_FIELD(r13);
14552 CHECK_SEL(cs);
14553 CHECK_SEL(ss);
14554 CHECK_SEL(ds);
14555 CHECK_SEL(es);
14556 CHECK_SEL(fs);
14557 CHECK_SEL(gs);
14558 CHECK_FIELD(cr0);
14559
14560 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14561 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14562 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14563 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14564 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14565 {
14566 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14567 { /* ignore */ }
14568 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14569 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14570 && fRem)
14571 { /* ignore */ }
14572 else
14573 CHECK_FIELD(cr2);
14574 }
14575 CHECK_FIELD(cr3);
14576 CHECK_FIELD(cr4);
14577 CHECK_FIELD(dr[0]);
14578 CHECK_FIELD(dr[1]);
14579 CHECK_FIELD(dr[2]);
14580 CHECK_FIELD(dr[3]);
14581 CHECK_FIELD(dr[6]);
14582 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14583 CHECK_FIELD(dr[7]);
14584 CHECK_FIELD(gdtr.cbGdt);
14585 CHECK_FIELD(gdtr.pGdt);
14586 CHECK_FIELD(idtr.cbIdt);
14587 CHECK_FIELD(idtr.pIdt);
14588 CHECK_SEL(ldtr);
14589 CHECK_SEL(tr);
14590 CHECK_FIELD(SysEnter.cs);
14591 CHECK_FIELD(SysEnter.eip);
14592 CHECK_FIELD(SysEnter.esp);
14593 CHECK_FIELD(msrEFER);
14594 CHECK_FIELD(msrSTAR);
14595 CHECK_FIELD(msrPAT);
14596 CHECK_FIELD(msrLSTAR);
14597 CHECK_FIELD(msrCSTAR);
14598 CHECK_FIELD(msrSFMASK);
14599 CHECK_FIELD(msrKERNELGSBASE);
14600
14601 if (cDiffs != 0)
14602 {
14603 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14604 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14605 RTAssertPanic();
14606 static bool volatile s_fEnterDebugger = true;
14607 if (s_fEnterDebugger)
14608 DBGFSTOP(pVM);
14609
14610# if 1 /* Ignore unimplemented instructions for now. */
14611 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14612 rcStrictIem = VINF_SUCCESS;
14613# endif
14614 }
14615# undef CHECK_FIELD
14616# undef CHECK_BIT_FIELD
14617 }
14618
14619 /*
14620 * If the register state compared fine, check the verification event
14621 * records.
14622 */
14623 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14624 {
14625 /*
14626 * Compare verficiation event records.
14627 * - I/O port accesses should be a 1:1 match.
14628 */
14629 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14630 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14631 while (pIemRec && pOtherRec)
14632 {
14633 /* Since we might miss RAM writes and reads, ignore reads and check
14634 that any written memory is the same extra ones. */
14635 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14636 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14637 && pIemRec->pNext)
14638 {
14639 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14640 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14641 pIemRec = pIemRec->pNext;
14642 }
14643
14644 /* Do the compare. */
14645 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14646 {
14647 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14648 break;
14649 }
14650 bool fEquals;
14651 switch (pIemRec->enmEvent)
14652 {
14653 case IEMVERIFYEVENT_IOPORT_READ:
14654 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14655 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14656 break;
14657 case IEMVERIFYEVENT_IOPORT_WRITE:
14658 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14659 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14660 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14661 break;
14662 case IEMVERIFYEVENT_IOPORT_STR_READ:
14663 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14664 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14665 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14666 break;
14667 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14668 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14669 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14670 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14671 break;
14672 case IEMVERIFYEVENT_RAM_READ:
14673 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14674 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14675 break;
14676 case IEMVERIFYEVENT_RAM_WRITE:
14677 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14678 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14679 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14680 break;
14681 default:
14682 fEquals = false;
14683 break;
14684 }
14685 if (!fEquals)
14686 {
14687 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14688 break;
14689 }
14690
14691 /* advance */
14692 pIemRec = pIemRec->pNext;
14693 pOtherRec = pOtherRec->pNext;
14694 }
14695
14696 /* Ignore extra writes and reads. */
14697 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14698 {
14699 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14700 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14701 pIemRec = pIemRec->pNext;
14702 }
14703 if (pIemRec != NULL)
14704 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14705 else if (pOtherRec != NULL)
14706 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14707 }
14708 IEM_GET_CTX(pVCpu) = pOrgCtx;
14709
14710 return rcStrictIem;
14711}
14712
14713#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14714
14715/* stubs */
14716IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14717{
14718 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14719 return VERR_INTERNAL_ERROR;
14720}
14721
14722IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14723{
14724 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14725 return VERR_INTERNAL_ERROR;
14726}
14727
14728#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14729
14730
14731#ifdef LOG_ENABLED
14732/**
14733 * Logs the current instruction.
14734 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14735 * @param pCtx The current CPU context.
14736 * @param fSameCtx Set if we have the same context information as the VMM,
14737 * clear if we may have already executed an instruction in
14738 * our debug context. When clear, we assume IEMCPU holds
14739 * valid CPU mode info.
14740 */
14741IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14742{
14743# ifdef IN_RING3
14744 if (LogIs2Enabled())
14745 {
14746 char szInstr[256];
14747 uint32_t cbInstr = 0;
14748 if (fSameCtx)
14749 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14750 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14751 szInstr, sizeof(szInstr), &cbInstr);
14752 else
14753 {
14754 uint32_t fFlags = 0;
14755 switch (pVCpu->iem.s.enmCpuMode)
14756 {
14757 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14758 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14759 case IEMMODE_16BIT:
14760 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14761 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14762 else
14763 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14764 break;
14765 }
14766 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14767 szInstr, sizeof(szInstr), &cbInstr);
14768 }
14769
14770 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14771 Log2(("****\n"
14772 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14773 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14774 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14775 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14776 " %s\n"
14777 ,
14778 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14779 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14780 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14781 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14782 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14783 szInstr));
14784
14785 if (LogIs3Enabled())
14786 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14787 }
14788 else
14789# endif
14790 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14791 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14792 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14793}
14794#endif
14795
14796
14797/**
14798 * Makes status code addjustments (pass up from I/O and access handler)
14799 * as well as maintaining statistics.
14800 *
14801 * @returns Strict VBox status code to pass up.
14802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14803 * @param rcStrict The status from executing an instruction.
14804 */
14805DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14806{
14807 if (rcStrict != VINF_SUCCESS)
14808 {
14809 if (RT_SUCCESS(rcStrict))
14810 {
14811 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14812 || rcStrict == VINF_IOM_R3_IOPORT_READ
14813 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14814 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14815 || rcStrict == VINF_IOM_R3_MMIO_READ
14816 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14817 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14818 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14819 || rcStrict == VINF_CPUM_R3_MSR_READ
14820 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14821 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14822 || rcStrict == VINF_EM_RAW_TO_R3
14823 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14824 || rcStrict == VINF_EM_TRIPLE_FAULT
14825 /* raw-mode / virt handlers only: */
14826 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14827 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14828 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14829 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14830 || rcStrict == VINF_SELM_SYNC_GDT
14831 || rcStrict == VINF_CSAM_PENDING_ACTION
14832 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14833 /* nested hw.virt codes: */
14834 || rcStrict == VINF_SVM_VMEXIT
14835 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14836/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14837 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14838#ifdef VBOX_WITH_NESTED_HWVIRT
14839 if ( rcStrict == VINF_SVM_VMEXIT
14840 && rcPassUp == VINF_SUCCESS)
14841 rcStrict = VINF_SUCCESS;
14842 else
14843#endif
14844 if (rcPassUp == VINF_SUCCESS)
14845 pVCpu->iem.s.cRetInfStatuses++;
14846 else if ( rcPassUp < VINF_EM_FIRST
14847 || rcPassUp > VINF_EM_LAST
14848 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14849 {
14850 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14851 pVCpu->iem.s.cRetPassUpStatus++;
14852 rcStrict = rcPassUp;
14853 }
14854 else
14855 {
14856 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14857 pVCpu->iem.s.cRetInfStatuses++;
14858 }
14859 }
14860 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14861 pVCpu->iem.s.cRetAspectNotImplemented++;
14862 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14863 pVCpu->iem.s.cRetInstrNotImplemented++;
14864#ifdef IEM_VERIFICATION_MODE_FULL
14865 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14866 rcStrict = VINF_SUCCESS;
14867#endif
14868 else
14869 pVCpu->iem.s.cRetErrStatuses++;
14870 }
14871 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14872 {
14873 pVCpu->iem.s.cRetPassUpStatus++;
14874 rcStrict = pVCpu->iem.s.rcPassUp;
14875 }
14876
14877 return rcStrict;
14878}
14879
14880
14881/**
14882 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14883 * IEMExecOneWithPrefetchedByPC.
14884 *
14885 * Similar code is found in IEMExecLots.
14886 *
14887 * @return Strict VBox status code.
14888 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14890 * @param fExecuteInhibit If set, execute the instruction following CLI,
14891 * POP SS and MOV SS,GR.
14892 */
14893DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14894{
14895#ifdef IEM_WITH_SETJMP
14896 VBOXSTRICTRC rcStrict;
14897 jmp_buf JmpBuf;
14898 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14899 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14900 if ((rcStrict = setjmp(JmpBuf)) == 0)
14901 {
14902 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14903 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14904 }
14905 else
14906 pVCpu->iem.s.cLongJumps++;
14907 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14908#else
14909 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14910 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14911#endif
14912 if (rcStrict == VINF_SUCCESS)
14913 pVCpu->iem.s.cInstructions++;
14914 if (pVCpu->iem.s.cActiveMappings > 0)
14915 {
14916 Assert(rcStrict != VINF_SUCCESS);
14917 iemMemRollback(pVCpu);
14918 }
14919//#ifdef DEBUG
14920// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14921//#endif
14922
14923 /* Execute the next instruction as well if a cli, pop ss or
14924 mov ss, Gr has just completed successfully. */
14925 if ( fExecuteInhibit
14926 && rcStrict == VINF_SUCCESS
14927 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14928 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14929 {
14930 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14931 if (rcStrict == VINF_SUCCESS)
14932 {
14933#ifdef LOG_ENABLED
14934 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14935#endif
14936#ifdef IEM_WITH_SETJMP
14937 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14938 if ((rcStrict = setjmp(JmpBuf)) == 0)
14939 {
14940 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14941 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14942 }
14943 else
14944 pVCpu->iem.s.cLongJumps++;
14945 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14946#else
14947 IEM_OPCODE_GET_NEXT_U8(&b);
14948 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14949#endif
14950 if (rcStrict == VINF_SUCCESS)
14951 pVCpu->iem.s.cInstructions++;
14952 if (pVCpu->iem.s.cActiveMappings > 0)
14953 {
14954 Assert(rcStrict != VINF_SUCCESS);
14955 iemMemRollback(pVCpu);
14956 }
14957 }
14958 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14959 }
14960
14961 /*
14962 * Return value fiddling, statistics and sanity assertions.
14963 */
14964 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14965
14966 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14967 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14968#if defined(IEM_VERIFICATION_MODE_FULL)
14969 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14970 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14971 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14972 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14973#endif
14974 return rcStrict;
14975}
14976
14977
14978#ifdef IN_RC
14979/**
14980 * Re-enters raw-mode or ensure we return to ring-3.
14981 *
14982 * @returns rcStrict, maybe modified.
14983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14984 * @param pCtx The current CPU context.
14985 * @param rcStrict The status code returne by the interpreter.
14986 */
14987DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14988{
14989 if ( !pVCpu->iem.s.fInPatchCode
14990 && ( rcStrict == VINF_SUCCESS
14991 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14992 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14993 {
14994 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14995 CPUMRawEnter(pVCpu);
14996 else
14997 {
14998 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14999 rcStrict = VINF_EM_RESCHEDULE;
15000 }
15001 }
15002 return rcStrict;
15003}
15004#endif
15005
15006
15007/**
15008 * Execute one instruction.
15009 *
15010 * @return Strict VBox status code.
15011 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15012 */
15013VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
15014{
15015#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15016 if (++pVCpu->iem.s.cVerifyDepth == 1)
15017 iemExecVerificationModeSetup(pVCpu);
15018#endif
15019#ifdef LOG_ENABLED
15020 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15021 iemLogCurInstr(pVCpu, pCtx, true);
15022#endif
15023
15024 /*
15025 * Do the decoding and emulation.
15026 */
15027 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15028 if (rcStrict == VINF_SUCCESS)
15029 rcStrict = iemExecOneInner(pVCpu, true);
15030
15031#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15032 /*
15033 * Assert some sanity.
15034 */
15035 if (pVCpu->iem.s.cVerifyDepth == 1)
15036 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15037 pVCpu->iem.s.cVerifyDepth--;
15038#endif
15039#ifdef IN_RC
15040 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15041#endif
15042 if (rcStrict != VINF_SUCCESS)
15043 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15044 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15045 return rcStrict;
15046}
15047
15048
15049VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15050{
15051 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15052 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15053
15054 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15055 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15056 if (rcStrict == VINF_SUCCESS)
15057 {
15058 rcStrict = iemExecOneInner(pVCpu, true);
15059 if (pcbWritten)
15060 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15061 }
15062
15063#ifdef IN_RC
15064 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15065#endif
15066 return rcStrict;
15067}
15068
15069
15070VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15071 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15072{
15073 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15074 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15075
15076 VBOXSTRICTRC rcStrict;
15077 if ( cbOpcodeBytes
15078 && pCtx->rip == OpcodeBytesPC)
15079 {
15080 iemInitDecoder(pVCpu, false);
15081#ifdef IEM_WITH_CODE_TLB
15082 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15083 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15084 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15085 pVCpu->iem.s.offCurInstrStart = 0;
15086 pVCpu->iem.s.offInstrNextByte = 0;
15087#else
15088 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15089 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15090#endif
15091 rcStrict = VINF_SUCCESS;
15092 }
15093 else
15094 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15095 if (rcStrict == VINF_SUCCESS)
15096 {
15097 rcStrict = iemExecOneInner(pVCpu, true);
15098 }
15099
15100#ifdef IN_RC
15101 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15102#endif
15103 return rcStrict;
15104}
15105
15106
15107VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15108{
15109 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15110 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15111
15112 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15113 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15114 if (rcStrict == VINF_SUCCESS)
15115 {
15116 rcStrict = iemExecOneInner(pVCpu, false);
15117 if (pcbWritten)
15118 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15119 }
15120
15121#ifdef IN_RC
15122 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15123#endif
15124 return rcStrict;
15125}
15126
15127
15128VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15129 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15130{
15131 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15132 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15133
15134 VBOXSTRICTRC rcStrict;
15135 if ( cbOpcodeBytes
15136 && pCtx->rip == OpcodeBytesPC)
15137 {
15138 iemInitDecoder(pVCpu, true);
15139#ifdef IEM_WITH_CODE_TLB
15140 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15141 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15142 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15143 pVCpu->iem.s.offCurInstrStart = 0;
15144 pVCpu->iem.s.offInstrNextByte = 0;
15145#else
15146 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15147 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15148#endif
15149 rcStrict = VINF_SUCCESS;
15150 }
15151 else
15152 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15153 if (rcStrict == VINF_SUCCESS)
15154 rcStrict = iemExecOneInner(pVCpu, false);
15155
15156#ifdef IN_RC
15157 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15158#endif
15159 return rcStrict;
15160}
15161
15162
15163/**
15164 * For debugging DISGetParamSize, may come in handy.
15165 *
15166 * @returns Strict VBox status code.
15167 * @param pVCpu The cross context virtual CPU structure of the
15168 * calling EMT.
15169 * @param pCtxCore The context core structure.
15170 * @param OpcodeBytesPC The PC of the opcode bytes.
15171 * @param pvOpcodeBytes Prefeched opcode bytes.
15172 * @param cbOpcodeBytes Number of prefetched bytes.
15173 * @param pcbWritten Where to return the number of bytes written.
15174 * Optional.
15175 */
15176VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15177 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15178 uint32_t *pcbWritten)
15179{
15180 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15181 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15182
15183 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15184 VBOXSTRICTRC rcStrict;
15185 if ( cbOpcodeBytes
15186 && pCtx->rip == OpcodeBytesPC)
15187 {
15188 iemInitDecoder(pVCpu, true);
15189#ifdef IEM_WITH_CODE_TLB
15190 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15191 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15192 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15193 pVCpu->iem.s.offCurInstrStart = 0;
15194 pVCpu->iem.s.offInstrNextByte = 0;
15195#else
15196 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15197 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15198#endif
15199 rcStrict = VINF_SUCCESS;
15200 }
15201 else
15202 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15203 if (rcStrict == VINF_SUCCESS)
15204 {
15205 rcStrict = iemExecOneInner(pVCpu, false);
15206 if (pcbWritten)
15207 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15208 }
15209
15210#ifdef IN_RC
15211 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15212#endif
15213 return rcStrict;
15214}
15215
15216
15217VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15218{
15219 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15220
15221#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15222 /*
15223 * See if there is an interrupt pending in TRPM, inject it if we can.
15224 */
15225 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15226# ifdef IEM_VERIFICATION_MODE_FULL
15227 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15228# endif
15229
15230 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
15231#if defined(VBOX_WITH_NESTED_HWVIRT)
15232 bool fIntrEnabled = pCtx->hwvirt.svm.fGif;
15233 if (fIntrEnabled)
15234 {
15235 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15236 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15237 else
15238 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15239 }
15240#else
15241 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15242#endif
15243 if ( fIntrEnabled
15244 && TRPMHasTrap(pVCpu)
15245 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15246 {
15247 uint8_t u8TrapNo;
15248 TRPMEVENT enmType;
15249 RTGCUINT uErrCode;
15250 RTGCPTR uCr2;
15251 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15252 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15253 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15254 TRPMResetTrap(pVCpu);
15255 }
15256
15257 /*
15258 * Log the state.
15259 */
15260# ifdef LOG_ENABLED
15261 iemLogCurInstr(pVCpu, pCtx, true);
15262# endif
15263
15264 /*
15265 * Do the decoding and emulation.
15266 */
15267 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15268 if (rcStrict == VINF_SUCCESS)
15269 rcStrict = iemExecOneInner(pVCpu, true);
15270
15271 /*
15272 * Assert some sanity.
15273 */
15274 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15275
15276 /*
15277 * Log and return.
15278 */
15279 if (rcStrict != VINF_SUCCESS)
15280 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15281 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15282 if (pcInstructions)
15283 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15284 return rcStrict;
15285
15286#else /* Not verification mode */
15287
15288 /*
15289 * See if there is an interrupt pending in TRPM, inject it if we can.
15290 */
15291 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15292# ifdef IEM_VERIFICATION_MODE_FULL
15293 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15294# endif
15295
15296 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
15297#if defined(VBOX_WITH_NESTED_HWVIRT)
15298 bool fIntrEnabled = pCtx->hwvirt.svm.fGif;
15299 if (fIntrEnabled)
15300 {
15301 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15302 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15303 else
15304 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15305 }
15306#else
15307 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15308#endif
15309 if ( fIntrEnabled
15310 && TRPMHasTrap(pVCpu)
15311 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15312 {
15313 uint8_t u8TrapNo;
15314 TRPMEVENT enmType;
15315 RTGCUINT uErrCode;
15316 RTGCPTR uCr2;
15317 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15318 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15319 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15320 TRPMResetTrap(pVCpu);
15321 }
15322
15323 /*
15324 * Initial decoder init w/ prefetch, then setup setjmp.
15325 */
15326 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15327 if (rcStrict == VINF_SUCCESS)
15328 {
15329# ifdef IEM_WITH_SETJMP
15330 jmp_buf JmpBuf;
15331 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15332 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15333 pVCpu->iem.s.cActiveMappings = 0;
15334 if ((rcStrict = setjmp(JmpBuf)) == 0)
15335# endif
15336 {
15337 /*
15338 * The run loop. We limit ourselves to 4096 instructions right now.
15339 */
15340 PVM pVM = pVCpu->CTX_SUFF(pVM);
15341 uint32_t cInstr = 4096;
15342 for (;;)
15343 {
15344 /*
15345 * Log the state.
15346 */
15347# ifdef LOG_ENABLED
15348 iemLogCurInstr(pVCpu, pCtx, true);
15349# endif
15350
15351 /*
15352 * Do the decoding and emulation.
15353 */
15354 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15355 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15356 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15357 {
15358 Assert(pVCpu->iem.s.cActiveMappings == 0);
15359 pVCpu->iem.s.cInstructions++;
15360 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15361 {
15362 uint32_t fCpu = pVCpu->fLocalForcedActions
15363 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15364 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15365 | VMCPU_FF_TLB_FLUSH
15366# ifdef VBOX_WITH_RAW_MODE
15367 | VMCPU_FF_TRPM_SYNC_IDT
15368 | VMCPU_FF_SELM_SYNC_TSS
15369 | VMCPU_FF_SELM_SYNC_GDT
15370 | VMCPU_FF_SELM_SYNC_LDT
15371# endif
15372 | VMCPU_FF_INHIBIT_INTERRUPTS
15373 | VMCPU_FF_BLOCK_NMIS
15374 | VMCPU_FF_UNHALT ));
15375
15376 if (RT_LIKELY( ( !fCpu
15377 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15378 && !pCtx->rflags.Bits.u1IF) )
15379 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15380 {
15381 if (cInstr-- > 0)
15382 {
15383 Assert(pVCpu->iem.s.cActiveMappings == 0);
15384 iemReInitDecoder(pVCpu);
15385 continue;
15386 }
15387 }
15388 }
15389 Assert(pVCpu->iem.s.cActiveMappings == 0);
15390 }
15391 else if (pVCpu->iem.s.cActiveMappings > 0)
15392 iemMemRollback(pVCpu);
15393 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15394 break;
15395 }
15396 }
15397# ifdef IEM_WITH_SETJMP
15398 else
15399 {
15400 if (pVCpu->iem.s.cActiveMappings > 0)
15401 iemMemRollback(pVCpu);
15402 pVCpu->iem.s.cLongJumps++;
15403# ifdef VBOX_WITH_NESTED_HWVIRT
15404 /*
15405 * When a nested-guest causes an exception intercept when fetching memory
15406 * (e.g. IEM_MC_FETCH_MEM_U16) as part of instruction execution, we need this
15407 * to fix-up VINF_SVM_VMEXIT on the longjmp way out, otherwise we will guru.
15408 */
15409 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15410# endif
15411 }
15412 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15413# endif
15414
15415 /*
15416 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15417 */
15418 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15419 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15420# if defined(IEM_VERIFICATION_MODE_FULL)
15421 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15422 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15423 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15424 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15425# endif
15426 }
15427# ifdef VBOX_WITH_NESTED_HWVIRT
15428 else
15429 {
15430 /*
15431 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15432 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15433 */
15434 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15435 }
15436# endif
15437
15438 /*
15439 * Maybe re-enter raw-mode and log.
15440 */
15441# ifdef IN_RC
15442 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15443# endif
15444 if (rcStrict != VINF_SUCCESS)
15445 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15446 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15447 if (pcInstructions)
15448 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15449 return rcStrict;
15450#endif /* Not verification mode */
15451}
15452
15453
15454
15455/**
15456 * Injects a trap, fault, abort, software interrupt or external interrupt.
15457 *
15458 * The parameter list matches TRPMQueryTrapAll pretty closely.
15459 *
15460 * @returns Strict VBox status code.
15461 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15462 * @param u8TrapNo The trap number.
15463 * @param enmType What type is it (trap/fault/abort), software
15464 * interrupt or hardware interrupt.
15465 * @param uErrCode The error code if applicable.
15466 * @param uCr2 The CR2 value if applicable.
15467 * @param cbInstr The instruction length (only relevant for
15468 * software interrupts).
15469 */
15470VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15471 uint8_t cbInstr)
15472{
15473 iemInitDecoder(pVCpu, false);
15474#ifdef DBGFTRACE_ENABLED
15475 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15476 u8TrapNo, enmType, uErrCode, uCr2);
15477#endif
15478
15479 uint32_t fFlags;
15480 switch (enmType)
15481 {
15482 case TRPM_HARDWARE_INT:
15483 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15484 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15485 uErrCode = uCr2 = 0;
15486 break;
15487
15488 case TRPM_SOFTWARE_INT:
15489 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15490 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15491 uErrCode = uCr2 = 0;
15492 break;
15493
15494 case TRPM_TRAP:
15495 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15496 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15497 if (u8TrapNo == X86_XCPT_PF)
15498 fFlags |= IEM_XCPT_FLAGS_CR2;
15499 switch (u8TrapNo)
15500 {
15501 case X86_XCPT_DF:
15502 case X86_XCPT_TS:
15503 case X86_XCPT_NP:
15504 case X86_XCPT_SS:
15505 case X86_XCPT_PF:
15506 case X86_XCPT_AC:
15507 fFlags |= IEM_XCPT_FLAGS_ERR;
15508 break;
15509
15510 case X86_XCPT_NMI:
15511 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15512 break;
15513 }
15514 break;
15515
15516 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15517 }
15518
15519 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15520}
15521
15522
15523/**
15524 * Injects the active TRPM event.
15525 *
15526 * @returns Strict VBox status code.
15527 * @param pVCpu The cross context virtual CPU structure.
15528 */
15529VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15530{
15531#ifndef IEM_IMPLEMENTS_TASKSWITCH
15532 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15533#else
15534 uint8_t u8TrapNo;
15535 TRPMEVENT enmType;
15536 RTGCUINT uErrCode;
15537 RTGCUINTPTR uCr2;
15538 uint8_t cbInstr;
15539 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15540 if (RT_FAILURE(rc))
15541 return rc;
15542
15543 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15544
15545 /** @todo Are there any other codes that imply the event was successfully
15546 * delivered to the guest? See @bugref{6607}. */
15547 if ( rcStrict == VINF_SUCCESS
15548 || rcStrict == VINF_IEM_RAISED_XCPT)
15549 {
15550 TRPMResetTrap(pVCpu);
15551 }
15552 return rcStrict;
15553#endif
15554}
15555
15556
15557VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15558{
15559 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15560 return VERR_NOT_IMPLEMENTED;
15561}
15562
15563
15564VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15565{
15566 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15567 return VERR_NOT_IMPLEMENTED;
15568}
15569
15570
15571#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15572/**
15573 * Executes a IRET instruction with default operand size.
15574 *
15575 * This is for PATM.
15576 *
15577 * @returns VBox status code.
15578 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15579 * @param pCtxCore The register frame.
15580 */
15581VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15582{
15583 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15584
15585 iemCtxCoreToCtx(pCtx, pCtxCore);
15586 iemInitDecoder(pVCpu);
15587 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15588 if (rcStrict == VINF_SUCCESS)
15589 iemCtxToCtxCore(pCtxCore, pCtx);
15590 else
15591 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15592 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15593 return rcStrict;
15594}
15595#endif
15596
15597
15598/**
15599 * Macro used by the IEMExec* method to check the given instruction length.
15600 *
15601 * Will return on failure!
15602 *
15603 * @param a_cbInstr The given instruction length.
15604 * @param a_cbMin The minimum length.
15605 */
15606#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15607 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15608 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15609
15610
15611/**
15612 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15613 *
15614 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15615 *
15616 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15618 * @param rcStrict The status code to fiddle.
15619 */
15620DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15621{
15622 iemUninitExec(pVCpu);
15623#ifdef IN_RC
15624 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15625 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15626#else
15627 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15628#endif
15629}
15630
15631
15632/**
15633 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15634 *
15635 * This API ASSUMES that the caller has already verified that the guest code is
15636 * allowed to access the I/O port. (The I/O port is in the DX register in the
15637 * guest state.)
15638 *
15639 * @returns Strict VBox status code.
15640 * @param pVCpu The cross context virtual CPU structure.
15641 * @param cbValue The size of the I/O port access (1, 2, or 4).
15642 * @param enmAddrMode The addressing mode.
15643 * @param fRepPrefix Indicates whether a repeat prefix is used
15644 * (doesn't matter which for this instruction).
15645 * @param cbInstr The instruction length in bytes.
15646 * @param iEffSeg The effective segment address.
15647 * @param fIoChecked Whether the access to the I/O port has been
15648 * checked or not. It's typically checked in the
15649 * HM scenario.
15650 */
15651VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15652 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15653{
15654 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15655 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15656
15657 /*
15658 * State init.
15659 */
15660 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15661
15662 /*
15663 * Switch orgy for getting to the right handler.
15664 */
15665 VBOXSTRICTRC rcStrict;
15666 if (fRepPrefix)
15667 {
15668 switch (enmAddrMode)
15669 {
15670 case IEMMODE_16BIT:
15671 switch (cbValue)
15672 {
15673 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15674 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15675 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15676 default:
15677 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15678 }
15679 break;
15680
15681 case IEMMODE_32BIT:
15682 switch (cbValue)
15683 {
15684 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15685 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15686 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15687 default:
15688 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15689 }
15690 break;
15691
15692 case IEMMODE_64BIT:
15693 switch (cbValue)
15694 {
15695 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15696 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15697 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15698 default:
15699 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15700 }
15701 break;
15702
15703 default:
15704 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15705 }
15706 }
15707 else
15708 {
15709 switch (enmAddrMode)
15710 {
15711 case IEMMODE_16BIT:
15712 switch (cbValue)
15713 {
15714 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15715 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15716 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15717 default:
15718 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15719 }
15720 break;
15721
15722 case IEMMODE_32BIT:
15723 switch (cbValue)
15724 {
15725 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15726 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15727 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15728 default:
15729 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15730 }
15731 break;
15732
15733 case IEMMODE_64BIT:
15734 switch (cbValue)
15735 {
15736 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15737 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15738 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15739 default:
15740 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15741 }
15742 break;
15743
15744 default:
15745 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15746 }
15747 }
15748
15749 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15750}
15751
15752
15753/**
15754 * Interface for HM and EM for executing string I/O IN (read) instructions.
15755 *
15756 * This API ASSUMES that the caller has already verified that the guest code is
15757 * allowed to access the I/O port. (The I/O port is in the DX register in the
15758 * guest state.)
15759 *
15760 * @returns Strict VBox status code.
15761 * @param pVCpu The cross context virtual CPU structure.
15762 * @param cbValue The size of the I/O port access (1, 2, or 4).
15763 * @param enmAddrMode The addressing mode.
15764 * @param fRepPrefix Indicates whether a repeat prefix is used
15765 * (doesn't matter which for this instruction).
15766 * @param cbInstr The instruction length in bytes.
15767 * @param fIoChecked Whether the access to the I/O port has been
15768 * checked or not. It's typically checked in the
15769 * HM scenario.
15770 */
15771VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15772 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15773{
15774 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15775
15776 /*
15777 * State init.
15778 */
15779 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15780
15781 /*
15782 * Switch orgy for getting to the right handler.
15783 */
15784 VBOXSTRICTRC rcStrict;
15785 if (fRepPrefix)
15786 {
15787 switch (enmAddrMode)
15788 {
15789 case IEMMODE_16BIT:
15790 switch (cbValue)
15791 {
15792 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15793 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15794 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15795 default:
15796 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15797 }
15798 break;
15799
15800 case IEMMODE_32BIT:
15801 switch (cbValue)
15802 {
15803 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15804 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15805 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15806 default:
15807 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15808 }
15809 break;
15810
15811 case IEMMODE_64BIT:
15812 switch (cbValue)
15813 {
15814 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15815 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15816 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15817 default:
15818 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15819 }
15820 break;
15821
15822 default:
15823 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15824 }
15825 }
15826 else
15827 {
15828 switch (enmAddrMode)
15829 {
15830 case IEMMODE_16BIT:
15831 switch (cbValue)
15832 {
15833 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15834 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15835 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15836 default:
15837 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15838 }
15839 break;
15840
15841 case IEMMODE_32BIT:
15842 switch (cbValue)
15843 {
15844 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15845 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15846 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15847 default:
15848 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15849 }
15850 break;
15851
15852 case IEMMODE_64BIT:
15853 switch (cbValue)
15854 {
15855 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15856 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15857 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15858 default:
15859 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15860 }
15861 break;
15862
15863 default:
15864 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15865 }
15866 }
15867
15868 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15869}
15870
15871
15872/**
15873 * Interface for rawmode to write execute an OUT instruction.
15874 *
15875 * @returns Strict VBox status code.
15876 * @param pVCpu The cross context virtual CPU structure.
15877 * @param cbInstr The instruction length in bytes.
15878 * @param u16Port The port to read.
15879 * @param cbReg The register size.
15880 *
15881 * @remarks In ring-0 not all of the state needs to be synced in.
15882 */
15883VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15884{
15885 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15886 Assert(cbReg <= 4 && cbReg != 3);
15887
15888 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15889 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15890 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15891}
15892
15893
15894/**
15895 * Interface for rawmode to write execute an IN instruction.
15896 *
15897 * @returns Strict VBox status code.
15898 * @param pVCpu The cross context virtual CPU structure.
15899 * @param cbInstr The instruction length in bytes.
15900 * @param u16Port The port to read.
15901 * @param cbReg The register size.
15902 */
15903VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15904{
15905 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15906 Assert(cbReg <= 4 && cbReg != 3);
15907
15908 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15909 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15910 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15911}
15912
15913
15914/**
15915 * Interface for HM and EM to write to a CRx register.
15916 *
15917 * @returns Strict VBox status code.
15918 * @param pVCpu The cross context virtual CPU structure.
15919 * @param cbInstr The instruction length in bytes.
15920 * @param iCrReg The control register number (destination).
15921 * @param iGReg The general purpose register number (source).
15922 *
15923 * @remarks In ring-0 not all of the state needs to be synced in.
15924 */
15925VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15926{
15927 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15928 Assert(iCrReg < 16);
15929 Assert(iGReg < 16);
15930
15931 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15932 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15933 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15934}
15935
15936
15937/**
15938 * Interface for HM and EM to read from a CRx register.
15939 *
15940 * @returns Strict VBox status code.
15941 * @param pVCpu The cross context virtual CPU structure.
15942 * @param cbInstr The instruction length in bytes.
15943 * @param iGReg The general purpose register number (destination).
15944 * @param iCrReg The control register number (source).
15945 *
15946 * @remarks In ring-0 not all of the state needs to be synced in.
15947 */
15948VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15949{
15950 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15951 Assert(iCrReg < 16);
15952 Assert(iGReg < 16);
15953
15954 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15955 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15956 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15957}
15958
15959
15960/**
15961 * Interface for HM and EM to clear the CR0[TS] bit.
15962 *
15963 * @returns Strict VBox status code.
15964 * @param pVCpu The cross context virtual CPU structure.
15965 * @param cbInstr The instruction length in bytes.
15966 *
15967 * @remarks In ring-0 not all of the state needs to be synced in.
15968 */
15969VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15970{
15971 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15972
15973 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15974 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15975 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15976}
15977
15978
15979/**
15980 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15981 *
15982 * @returns Strict VBox status code.
15983 * @param pVCpu The cross context virtual CPU structure.
15984 * @param cbInstr The instruction length in bytes.
15985 * @param uValue The value to load into CR0.
15986 *
15987 * @remarks In ring-0 not all of the state needs to be synced in.
15988 */
15989VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15990{
15991 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15992
15993 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15994 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15995 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15996}
15997
15998
15999/**
16000 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
16001 *
16002 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
16003 *
16004 * @returns Strict VBox status code.
16005 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16006 * @param cbInstr The instruction length in bytes.
16007 * @remarks In ring-0 not all of the state needs to be synced in.
16008 * @thread EMT(pVCpu)
16009 */
16010VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
16011{
16012 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16013
16014 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16015 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
16016 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16017}
16018
16019
16020/**
16021 * Interface for HM and EM to emulate the INVLPG instruction.
16022 *
16023 * @param pVCpu The cross context virtual CPU structure.
16024 * @param cbInstr The instruction length in bytes.
16025 * @param GCPtrPage The effective address of the page to invalidate.
16026 *
16027 * @remarks In ring-0 not all of the state needs to be synced in.
16028 */
16029VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
16030{
16031 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16032
16033 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16034 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
16035 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16036}
16037
16038
16039/**
16040 * Checks if IEM is in the process of delivering an event (interrupt or
16041 * exception).
16042 *
16043 * @returns true if we're in the process of raising an interrupt or exception,
16044 * false otherwise.
16045 * @param pVCpu The cross context virtual CPU structure.
16046 * @param puVector Where to store the vector associated with the
16047 * currently delivered event, optional.
16048 * @param pfFlags Where to store th event delivery flags (see
16049 * IEM_XCPT_FLAGS_XXX), optional.
16050 * @param puErr Where to store the error code associated with the
16051 * event, optional.
16052 * @param puCr2 Where to store the CR2 associated with the event,
16053 * optional.
16054 * @remarks The caller should check the flags to determine if the error code and
16055 * CR2 are valid for the event.
16056 */
16057VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16058{
16059 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16060 if (fRaisingXcpt)
16061 {
16062 if (puVector)
16063 *puVector = pVCpu->iem.s.uCurXcpt;
16064 if (pfFlags)
16065 *pfFlags = pVCpu->iem.s.fCurXcpt;
16066 if (puErr)
16067 *puErr = pVCpu->iem.s.uCurXcptErr;
16068 if (puCr2)
16069 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16070 }
16071 return fRaisingXcpt;
16072}
16073
16074#ifdef VBOX_WITH_NESTED_HWVIRT
16075/**
16076 * Interface for HM and EM to emulate the CLGI instruction.
16077 *
16078 * @returns Strict VBox status code.
16079 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16080 * @param cbInstr The instruction length in bytes.
16081 * @thread EMT(pVCpu)
16082 */
16083VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16084{
16085 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16086
16087 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16088 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16089 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16090}
16091
16092
16093/**
16094 * Interface for HM and EM to emulate the STGI instruction.
16095 *
16096 * @returns Strict VBox status code.
16097 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16098 * @param cbInstr The instruction length in bytes.
16099 * @thread EMT(pVCpu)
16100 */
16101VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16102{
16103 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16104
16105 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16106 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16107 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16108}
16109
16110
16111/**
16112 * Interface for HM and EM to emulate the VMLOAD instruction.
16113 *
16114 * @returns Strict VBox status code.
16115 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16116 * @param cbInstr The instruction length in bytes.
16117 * @thread EMT(pVCpu)
16118 */
16119VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16120{
16121 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16122
16123 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16124 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16125 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16126}
16127
16128
16129/**
16130 * Interface for HM and EM to emulate the VMSAVE instruction.
16131 *
16132 * @returns Strict VBox status code.
16133 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16134 * @param cbInstr The instruction length in bytes.
16135 * @thread EMT(pVCpu)
16136 */
16137VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16138{
16139 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16140
16141 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16142 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16143 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16144}
16145
16146
16147/**
16148 * Interface for HM and EM to emulate the INVLPGA instruction.
16149 *
16150 * @returns Strict VBox status code.
16151 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16152 * @param cbInstr The instruction length in bytes.
16153 * @thread EMT(pVCpu)
16154 */
16155VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16156{
16157 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16158
16159 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16160 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16161 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16162}
16163
16164
16165/**
16166 * Interface for HM and EM to emulate the VMRUN instruction.
16167 *
16168 * @returns Strict VBox status code.
16169 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16170 * @param cbInstr The instruction length in bytes.
16171 * @thread EMT(pVCpu)
16172 */
16173VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
16174{
16175 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16176
16177 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16178 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
16179 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16180}
16181
16182
16183/**
16184 * Interface for HM and EM to emulate \#VMEXIT.
16185 *
16186 * @returns Strict VBox status code.
16187 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16188 * @param uExitCode The exit code.
16189 * @param uExitInfo1 The exit info. 1 field.
16190 * @param uExitInfo2 The exit info. 2 field.
16191 * @thread EMT(pVCpu)
16192 */
16193VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16194{
16195 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2);
16196 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16197}
16198#endif /* VBOX_WITH_NESTED_HWVIRT */
16199
16200#ifdef IN_RING3
16201
16202/**
16203 * Handles the unlikely and probably fatal merge cases.
16204 *
16205 * @returns Merged status code.
16206 * @param rcStrict Current EM status code.
16207 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16208 * with @a rcStrict.
16209 * @param iMemMap The memory mapping index. For error reporting only.
16210 * @param pVCpu The cross context virtual CPU structure of the calling
16211 * thread, for error reporting only.
16212 */
16213DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16214 unsigned iMemMap, PVMCPU pVCpu)
16215{
16216 if (RT_FAILURE_NP(rcStrict))
16217 return rcStrict;
16218
16219 if (RT_FAILURE_NP(rcStrictCommit))
16220 return rcStrictCommit;
16221
16222 if (rcStrict == rcStrictCommit)
16223 return rcStrictCommit;
16224
16225 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16226 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16227 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16228 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16229 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16230 return VERR_IOM_FF_STATUS_IPE;
16231}
16232
16233
16234/**
16235 * Helper for IOMR3ProcessForceFlag.
16236 *
16237 * @returns Merged status code.
16238 * @param rcStrict Current EM status code.
16239 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16240 * with @a rcStrict.
16241 * @param iMemMap The memory mapping index. For error reporting only.
16242 * @param pVCpu The cross context virtual CPU structure of the calling
16243 * thread, for error reporting only.
16244 */
16245DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16246{
16247 /* Simple. */
16248 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16249 return rcStrictCommit;
16250
16251 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16252 return rcStrict;
16253
16254 /* EM scheduling status codes. */
16255 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16256 && rcStrict <= VINF_EM_LAST))
16257 {
16258 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16259 && rcStrictCommit <= VINF_EM_LAST))
16260 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16261 }
16262
16263 /* Unlikely */
16264 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16265}
16266
16267
16268/**
16269 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16270 *
16271 * @returns Merge between @a rcStrict and what the commit operation returned.
16272 * @param pVM The cross context VM structure.
16273 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16274 * @param rcStrict The status code returned by ring-0 or raw-mode.
16275 */
16276VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16277{
16278 /*
16279 * Reset the pending commit.
16280 */
16281 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16282 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16283 ("%#x %#x %#x\n",
16284 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16285 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16286
16287 /*
16288 * Commit the pending bounce buffers (usually just one).
16289 */
16290 unsigned cBufs = 0;
16291 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16292 while (iMemMap-- > 0)
16293 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16294 {
16295 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16296 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16297 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16298
16299 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16300 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16301 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16302
16303 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16304 {
16305 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16306 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16307 pbBuf,
16308 cbFirst,
16309 PGMACCESSORIGIN_IEM);
16310 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16311 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16312 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16313 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16314 }
16315
16316 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16317 {
16318 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16319 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16320 pbBuf + cbFirst,
16321 cbSecond,
16322 PGMACCESSORIGIN_IEM);
16323 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16324 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16325 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16326 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16327 }
16328 cBufs++;
16329 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16330 }
16331
16332 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16333 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16334 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16335 pVCpu->iem.s.cActiveMappings = 0;
16336 return rcStrict;
16337}
16338
16339#endif /* IN_RING3 */
16340
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette