VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 66257

Last change on this file since 66257 was 66159, checked in by vboxsync, 8 years ago

IEM,bs3-cpu-generated-1: Implemented the BOUND instruction and prepared for EVEX prefix (AVX-512).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 586.8 KB
Line 
1/* $Id: IEMAll.cpp 66159 2017-03-17 22:16:24Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/hm_svm.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#ifdef IEM_VERIFICATION_MODE_FULL
118# include <VBox/vmm/rem.h>
119# include <VBox/vmm/mm.h>
120#endif
121#include <VBox/vmm/vm.h>
122#include <VBox/log.h>
123#include <VBox/err.h>
124#include <VBox/param.h>
125#include <VBox/dis.h>
126#include <VBox/disopcode.h>
127#include <iprt/assert.h>
128#include <iprt/string.h>
129#include <iprt/x86.h>
130
131
132/*********************************************************************************************************************************
133* Structures and Typedefs *
134*********************************************************************************************************************************/
135/** @typedef PFNIEMOP
136 * Pointer to an opcode decoder function.
137 */
138
139/** @def FNIEMOP_DEF
140 * Define an opcode decoder function.
141 *
142 * We're using macors for this so that adding and removing parameters as well as
143 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
144 *
145 * @param a_Name The function name.
146 */
147
148/** @typedef PFNIEMOPRM
149 * Pointer to an opcode decoder function with RM byte.
150 */
151
152/** @def FNIEMOPRM_DEF
153 * Define an opcode decoder function with RM byte.
154 *
155 * We're using macors for this so that adding and removing parameters as well as
156 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
157 *
158 * @param a_Name The function name.
159 */
160
161#if defined(__GNUC__) && defined(RT_ARCH_X86)
162typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
170
171#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
172typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
174# define FNIEMOP_DEF(a_Name) \
175 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
176# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
177 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
178# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
179 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
180
181#elif defined(__GNUC__)
182typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
183typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
184# define FNIEMOP_DEF(a_Name) \
185 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
186# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
187 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
188# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
189 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
190
191#else
192typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
193typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
194# define FNIEMOP_DEF(a_Name) \
195 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
196# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
197 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
198# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
199 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
200
201#endif
202#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
203
204
205/**
206 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
207 */
208typedef union IEMSELDESC
209{
210 /** The legacy view. */
211 X86DESC Legacy;
212 /** The long mode view. */
213 X86DESC64 Long;
214} IEMSELDESC;
215/** Pointer to a selector descriptor table entry. */
216typedef IEMSELDESC *PIEMSELDESC;
217
218
219/*********************************************************************************************************************************
220* Defined Constants And Macros *
221*********************************************************************************************************************************/
222/** @def IEM_WITH_SETJMP
223 * Enables alternative status code handling using setjmps.
224 *
225 * This adds a bit of expense via the setjmp() call since it saves all the
226 * non-volatile registers. However, it eliminates return code checks and allows
227 * for more optimal return value passing (return regs instead of stack buffer).
228 */
229#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
230# define IEM_WITH_SETJMP
231#endif
232
233/** Temporary hack to disable the double execution. Will be removed in favor
234 * of a dedicated execution mode in EM. */
235//#define IEM_VERIFICATION_MODE_NO_REM
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in real mode.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
335 * @returns PCCPUMFEATURES
336 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
337 */
338#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
339
340/**
341 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
342 * @returns PCCPUMFEATURES
343 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
344 */
345#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
346
347/**
348 * Evaluates to true if we're presenting an Intel CPU to the guest.
349 */
350#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
351
352/**
353 * Evaluates to true if we're presenting an AMD CPU to the guest.
354 */
355#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
356
357/**
358 * Check if the address is canonical.
359 */
360#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
361
362/** @def IEM_USE_UNALIGNED_DATA_ACCESS
363 * Use unaligned accesses instead of elaborate byte assembly. */
364#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
365# define IEM_USE_UNALIGNED_DATA_ACCESS
366#endif
367
368#ifdef VBOX_WITH_NESTED_HWVIRT
369/**
370 * Check the common SVM instruction preconditions.
371 */
372#define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
373 do { \
374 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
375 { \
376 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
377 return iemRaiseUndefinedOpcode(pVCpu); \
378 } \
379 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
380 { \
381 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
382 return iemRaiseUndefinedOpcode(pVCpu); \
383 } \
384 if (pVCpu->iem.s.uCpl != 0) \
385 { \
386 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
387 return iemRaiseGeneralProtectionFault0(pVCpu); \
388 } \
389 } while (0)
390
391/**
392 * Check if an SVM is enabled.
393 */
394#define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if an SVM control/instruction intercept is set.
398 */
399#define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
400
401/**
402 * Check if an SVM read CRx intercept is set.
403 */
404#define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
405
406/**
407 * Check if an SVM write CRx intercept is set.
408 */
409#define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
410
411/**
412 * Check if an SVM read DRx intercept is set.
413 */
414#define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
415
416/**
417 * Check if an SVM write DRx intercept is set.
418 */
419#define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
420
421/**
422 * Check if an SVM exception intercept is set.
423 */
424#define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_enmXcpt) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_enmXcpt)))
425#endif /* VBOX_WITH_NESTED_HWVIRT */
426
427
428/*********************************************************************************************************************************
429* Global Variables *
430*********************************************************************************************************************************/
431extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
432
433
434/** Function table for the ADD instruction. */
435IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
436{
437 iemAImpl_add_u8, iemAImpl_add_u8_locked,
438 iemAImpl_add_u16, iemAImpl_add_u16_locked,
439 iemAImpl_add_u32, iemAImpl_add_u32_locked,
440 iemAImpl_add_u64, iemAImpl_add_u64_locked
441};
442
443/** Function table for the ADC instruction. */
444IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
445{
446 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
447 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
448 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
449 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
450};
451
452/** Function table for the SUB instruction. */
453IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
454{
455 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
456 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
457 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
458 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
459};
460
461/** Function table for the SBB instruction. */
462IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
463{
464 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
465 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
466 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
467 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
468};
469
470/** Function table for the OR instruction. */
471IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
472{
473 iemAImpl_or_u8, iemAImpl_or_u8_locked,
474 iemAImpl_or_u16, iemAImpl_or_u16_locked,
475 iemAImpl_or_u32, iemAImpl_or_u32_locked,
476 iemAImpl_or_u64, iemAImpl_or_u64_locked
477};
478
479/** Function table for the XOR instruction. */
480IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
481{
482 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
483 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
484 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
485 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
486};
487
488/** Function table for the AND instruction. */
489IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
490{
491 iemAImpl_and_u8, iemAImpl_and_u8_locked,
492 iemAImpl_and_u16, iemAImpl_and_u16_locked,
493 iemAImpl_and_u32, iemAImpl_and_u32_locked,
494 iemAImpl_and_u64, iemAImpl_and_u64_locked
495};
496
497/** Function table for the CMP instruction.
498 * @remarks Making operand order ASSUMPTIONS.
499 */
500IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
501{
502 iemAImpl_cmp_u8, NULL,
503 iemAImpl_cmp_u16, NULL,
504 iemAImpl_cmp_u32, NULL,
505 iemAImpl_cmp_u64, NULL
506};
507
508/** Function table for the TEST instruction.
509 * @remarks Making operand order ASSUMPTIONS.
510 */
511IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
512{
513 iemAImpl_test_u8, NULL,
514 iemAImpl_test_u16, NULL,
515 iemAImpl_test_u32, NULL,
516 iemAImpl_test_u64, NULL
517};
518
519/** Function table for the BT instruction. */
520IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
521{
522 NULL, NULL,
523 iemAImpl_bt_u16, NULL,
524 iemAImpl_bt_u32, NULL,
525 iemAImpl_bt_u64, NULL
526};
527
528/** Function table for the BTC instruction. */
529IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
530{
531 NULL, NULL,
532 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
533 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
534 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
535};
536
537/** Function table for the BTR instruction. */
538IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
539{
540 NULL, NULL,
541 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
542 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
543 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
544};
545
546/** Function table for the BTS instruction. */
547IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
548{
549 NULL, NULL,
550 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
551 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
552 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
553};
554
555/** Function table for the BSF instruction. */
556IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
557{
558 NULL, NULL,
559 iemAImpl_bsf_u16, NULL,
560 iemAImpl_bsf_u32, NULL,
561 iemAImpl_bsf_u64, NULL
562};
563
564/** Function table for the BSR instruction. */
565IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
566{
567 NULL, NULL,
568 iemAImpl_bsr_u16, NULL,
569 iemAImpl_bsr_u32, NULL,
570 iemAImpl_bsr_u64, NULL
571};
572
573/** Function table for the IMUL instruction. */
574IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
575{
576 NULL, NULL,
577 iemAImpl_imul_two_u16, NULL,
578 iemAImpl_imul_two_u32, NULL,
579 iemAImpl_imul_two_u64, NULL
580};
581
582/** Group 1 /r lookup table. */
583IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
584{
585 &g_iemAImpl_add,
586 &g_iemAImpl_or,
587 &g_iemAImpl_adc,
588 &g_iemAImpl_sbb,
589 &g_iemAImpl_and,
590 &g_iemAImpl_sub,
591 &g_iemAImpl_xor,
592 &g_iemAImpl_cmp
593};
594
595/** Function table for the INC instruction. */
596IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
597{
598 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
599 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
600 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
601 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
602};
603
604/** Function table for the DEC instruction. */
605IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
606{
607 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
608 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
609 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
610 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
611};
612
613/** Function table for the NEG instruction. */
614IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
615{
616 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
617 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
618 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
619 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
620};
621
622/** Function table for the NOT instruction. */
623IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
624{
625 iemAImpl_not_u8, iemAImpl_not_u8_locked,
626 iemAImpl_not_u16, iemAImpl_not_u16_locked,
627 iemAImpl_not_u32, iemAImpl_not_u32_locked,
628 iemAImpl_not_u64, iemAImpl_not_u64_locked
629};
630
631
632/** Function table for the ROL instruction. */
633IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
634{
635 iemAImpl_rol_u8,
636 iemAImpl_rol_u16,
637 iemAImpl_rol_u32,
638 iemAImpl_rol_u64
639};
640
641/** Function table for the ROR instruction. */
642IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
643{
644 iemAImpl_ror_u8,
645 iemAImpl_ror_u16,
646 iemAImpl_ror_u32,
647 iemAImpl_ror_u64
648};
649
650/** Function table for the RCL instruction. */
651IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
652{
653 iemAImpl_rcl_u8,
654 iemAImpl_rcl_u16,
655 iemAImpl_rcl_u32,
656 iemAImpl_rcl_u64
657};
658
659/** Function table for the RCR instruction. */
660IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
661{
662 iemAImpl_rcr_u8,
663 iemAImpl_rcr_u16,
664 iemAImpl_rcr_u32,
665 iemAImpl_rcr_u64
666};
667
668/** Function table for the SHL instruction. */
669IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
670{
671 iemAImpl_shl_u8,
672 iemAImpl_shl_u16,
673 iemAImpl_shl_u32,
674 iemAImpl_shl_u64
675};
676
677/** Function table for the SHR instruction. */
678IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
679{
680 iemAImpl_shr_u8,
681 iemAImpl_shr_u16,
682 iemAImpl_shr_u32,
683 iemAImpl_shr_u64
684};
685
686/** Function table for the SAR instruction. */
687IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
688{
689 iemAImpl_sar_u8,
690 iemAImpl_sar_u16,
691 iemAImpl_sar_u32,
692 iemAImpl_sar_u64
693};
694
695
696/** Function table for the MUL instruction. */
697IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
698{
699 iemAImpl_mul_u8,
700 iemAImpl_mul_u16,
701 iemAImpl_mul_u32,
702 iemAImpl_mul_u64
703};
704
705/** Function table for the IMUL instruction working implicitly on rAX. */
706IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
707{
708 iemAImpl_imul_u8,
709 iemAImpl_imul_u16,
710 iemAImpl_imul_u32,
711 iemAImpl_imul_u64
712};
713
714/** Function table for the DIV instruction. */
715IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
716{
717 iemAImpl_div_u8,
718 iemAImpl_div_u16,
719 iemAImpl_div_u32,
720 iemAImpl_div_u64
721};
722
723/** Function table for the MUL instruction. */
724IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
725{
726 iemAImpl_idiv_u8,
727 iemAImpl_idiv_u16,
728 iemAImpl_idiv_u32,
729 iemAImpl_idiv_u64
730};
731
732/** Function table for the SHLD instruction */
733IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
734{
735 iemAImpl_shld_u16,
736 iemAImpl_shld_u32,
737 iemAImpl_shld_u64,
738};
739
740/** Function table for the SHRD instruction */
741IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
742{
743 iemAImpl_shrd_u16,
744 iemAImpl_shrd_u32,
745 iemAImpl_shrd_u64,
746};
747
748
749/** Function table for the PUNPCKLBW instruction */
750IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
751/** Function table for the PUNPCKLBD instruction */
752IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
753/** Function table for the PUNPCKLDQ instruction */
754IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
755/** Function table for the PUNPCKLQDQ instruction */
756IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
757
758/** Function table for the PUNPCKHBW instruction */
759IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
760/** Function table for the PUNPCKHBD instruction */
761IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
762/** Function table for the PUNPCKHDQ instruction */
763IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
764/** Function table for the PUNPCKHQDQ instruction */
765IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
766
767/** Function table for the PXOR instruction */
768IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
769/** Function table for the PCMPEQB instruction */
770IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
771/** Function table for the PCMPEQW instruction */
772IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
773/** Function table for the PCMPEQD instruction */
774IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
775
776
777#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
778/** What IEM just wrote. */
779uint8_t g_abIemWrote[256];
780/** How much IEM just wrote. */
781size_t g_cbIemWrote;
782#endif
783
784
785/*********************************************************************************************************************************
786* Internal Functions *
787*********************************************************************************************************************************/
788IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
789IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
790IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
791IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
792/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
793IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
794IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
795IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
796IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
797IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
798IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
799IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
800IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
801IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
802IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
803IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
804IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
805#ifdef IEM_WITH_SETJMP
806DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
807DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
808DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
809DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
810DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
811#endif
812
813IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
814IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
815IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
816IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
817IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
818IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
819IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
820IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
821IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
822IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
823IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
824IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
825IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
826IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
827IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
828IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
829
830#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
831IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
832#endif
833IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
834IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
835
836
837
838/**
839 * Sets the pass up status.
840 *
841 * @returns VINF_SUCCESS.
842 * @param pVCpu The cross context virtual CPU structure of the
843 * calling thread.
844 * @param rcPassUp The pass up status. Must be informational.
845 * VINF_SUCCESS is not allowed.
846 */
847IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
848{
849 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
850
851 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
852 if (rcOldPassUp == VINF_SUCCESS)
853 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
854 /* If both are EM scheduling codes, use EM priority rules. */
855 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
856 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
857 {
858 if (rcPassUp < rcOldPassUp)
859 {
860 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
861 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
862 }
863 else
864 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
865 }
866 /* Override EM scheduling with specific status code. */
867 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
868 {
869 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
870 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
871 }
872 /* Don't override specific status code, first come first served. */
873 else
874 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
875 return VINF_SUCCESS;
876}
877
878
879/**
880 * Calculates the CPU mode.
881 *
882 * This is mainly for updating IEMCPU::enmCpuMode.
883 *
884 * @returns CPU mode.
885 * @param pCtx The register context for the CPU.
886 */
887DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
888{
889 if (CPUMIsGuestIn64BitCodeEx(pCtx))
890 return IEMMODE_64BIT;
891 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
892 return IEMMODE_32BIT;
893 return IEMMODE_16BIT;
894}
895
896
897/**
898 * Initializes the execution state.
899 *
900 * @param pVCpu The cross context virtual CPU structure of the
901 * calling thread.
902 * @param fBypassHandlers Whether to bypass access handlers.
903 *
904 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
905 * side-effects in strict builds.
906 */
907DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
908{
909 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
910
911 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
912
913#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
914 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
915 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
916 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
917 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
918 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
919 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
920 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
921 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
922#endif
923
924#ifdef VBOX_WITH_RAW_MODE_NOT_R0
925 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
926#endif
927 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
928 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
929#ifdef VBOX_STRICT
930 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
931 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
932 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
933 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
934 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
935 pVCpu->iem.s.uRexReg = 127;
936 pVCpu->iem.s.uRexB = 127;
937 pVCpu->iem.s.uRexIndex = 127;
938 pVCpu->iem.s.iEffSeg = 127;
939 pVCpu->iem.s.idxPrefix = 127;
940 pVCpu->iem.s.uVex3rdReg = 127;
941 pVCpu->iem.s.uVexLength = 127;
942 pVCpu->iem.s.fEvexStuff = 127;
943 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
944# ifdef IEM_WITH_CODE_TLB
945 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
946 pVCpu->iem.s.pbInstrBuf = NULL;
947 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
948 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
949 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
950 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
951# else
952 pVCpu->iem.s.offOpcode = 127;
953 pVCpu->iem.s.cbOpcode = 127;
954# endif
955#endif
956
957 pVCpu->iem.s.cActiveMappings = 0;
958 pVCpu->iem.s.iNextMapping = 0;
959 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
960 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
961#ifdef VBOX_WITH_RAW_MODE_NOT_R0
962 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
963 && pCtx->cs.u64Base == 0
964 && pCtx->cs.u32Limit == UINT32_MAX
965 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
966 if (!pVCpu->iem.s.fInPatchCode)
967 CPUMRawLeave(pVCpu, VINF_SUCCESS);
968#endif
969
970#ifdef IEM_VERIFICATION_MODE_FULL
971 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
972 pVCpu->iem.s.fNoRem = true;
973#endif
974}
975
976
977/**
978 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
979 *
980 * @param pVCpu The cross context virtual CPU structure of the
981 * calling thread.
982 */
983DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
984{
985 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
986#ifdef IEM_VERIFICATION_MODE_FULL
987 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
988#endif
989#ifdef VBOX_STRICT
990# ifdef IEM_WITH_CODE_TLB
991 NOREF(pVCpu);
992# else
993 pVCpu->iem.s.cbOpcode = 0;
994# endif
995#else
996 NOREF(pVCpu);
997#endif
998}
999
1000
1001/**
1002 * Initializes the decoder state.
1003 *
1004 * iemReInitDecoder is mostly a copy of this function.
1005 *
1006 * @param pVCpu The cross context virtual CPU structure of the
1007 * calling thread.
1008 * @param fBypassHandlers Whether to bypass access handlers.
1009 */
1010DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1011{
1012 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1013
1014 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1015
1016#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1017 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1018 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1019 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1020 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1022 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1023 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1025#endif
1026
1027#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1028 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1029#endif
1030 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1031#ifdef IEM_VERIFICATION_MODE_FULL
1032 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1033 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1034#endif
1035 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1036 pVCpu->iem.s.enmCpuMode = enmMode;
1037 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1038 pVCpu->iem.s.enmEffAddrMode = enmMode;
1039 if (enmMode != IEMMODE_64BIT)
1040 {
1041 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1042 pVCpu->iem.s.enmEffOpSize = enmMode;
1043 }
1044 else
1045 {
1046 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1047 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1048 }
1049 pVCpu->iem.s.fPrefixes = 0;
1050 pVCpu->iem.s.uRexReg = 0;
1051 pVCpu->iem.s.uRexB = 0;
1052 pVCpu->iem.s.uRexIndex = 0;
1053 pVCpu->iem.s.idxPrefix = 0;
1054 pVCpu->iem.s.uVex3rdReg = 0;
1055 pVCpu->iem.s.uVexLength = 0;
1056 pVCpu->iem.s.fEvexStuff = 0;
1057 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1058#ifdef IEM_WITH_CODE_TLB
1059 pVCpu->iem.s.pbInstrBuf = NULL;
1060 pVCpu->iem.s.offInstrNextByte = 0;
1061 pVCpu->iem.s.offCurInstrStart = 0;
1062# ifdef VBOX_STRICT
1063 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1064 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1065 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1066# endif
1067#else
1068 pVCpu->iem.s.offOpcode = 0;
1069 pVCpu->iem.s.cbOpcode = 0;
1070#endif
1071 pVCpu->iem.s.cActiveMappings = 0;
1072 pVCpu->iem.s.iNextMapping = 0;
1073 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1074 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1075#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1076 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1077 && pCtx->cs.u64Base == 0
1078 && pCtx->cs.u32Limit == UINT32_MAX
1079 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1080 if (!pVCpu->iem.s.fInPatchCode)
1081 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1082#endif
1083
1084#ifdef DBGFTRACE_ENABLED
1085 switch (enmMode)
1086 {
1087 case IEMMODE_64BIT:
1088 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1089 break;
1090 case IEMMODE_32BIT:
1091 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1092 break;
1093 case IEMMODE_16BIT:
1094 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1095 break;
1096 }
1097#endif
1098}
1099
1100
1101/**
1102 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1103 *
1104 * This is mostly a copy of iemInitDecoder.
1105 *
1106 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1107 */
1108DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1109{
1110 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1111
1112 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1113
1114#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1115 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1116 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1117 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1118 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1119 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1123#endif
1124
1125 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1126#ifdef IEM_VERIFICATION_MODE_FULL
1127 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1128 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1129#endif
1130 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1131 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1132 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1133 pVCpu->iem.s.enmEffAddrMode = enmMode;
1134 if (enmMode != IEMMODE_64BIT)
1135 {
1136 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1137 pVCpu->iem.s.enmEffOpSize = enmMode;
1138 }
1139 else
1140 {
1141 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1142 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1143 }
1144 pVCpu->iem.s.fPrefixes = 0;
1145 pVCpu->iem.s.uRexReg = 0;
1146 pVCpu->iem.s.uRexB = 0;
1147 pVCpu->iem.s.uRexIndex = 0;
1148 pVCpu->iem.s.idxPrefix = 0;
1149 pVCpu->iem.s.uVex3rdReg = 0;
1150 pVCpu->iem.s.uVexLength = 0;
1151 pVCpu->iem.s.fEvexStuff = 0;
1152 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1153#ifdef IEM_WITH_CODE_TLB
1154 if (pVCpu->iem.s.pbInstrBuf)
1155 {
1156 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1157 - pVCpu->iem.s.uInstrBufPc;
1158 if (off < pVCpu->iem.s.cbInstrBufTotal)
1159 {
1160 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1161 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1162 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1163 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1164 else
1165 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1166 }
1167 else
1168 {
1169 pVCpu->iem.s.pbInstrBuf = NULL;
1170 pVCpu->iem.s.offInstrNextByte = 0;
1171 pVCpu->iem.s.offCurInstrStart = 0;
1172 pVCpu->iem.s.cbInstrBuf = 0;
1173 pVCpu->iem.s.cbInstrBufTotal = 0;
1174 }
1175 }
1176 else
1177 {
1178 pVCpu->iem.s.offInstrNextByte = 0;
1179 pVCpu->iem.s.offCurInstrStart = 0;
1180 pVCpu->iem.s.cbInstrBuf = 0;
1181 pVCpu->iem.s.cbInstrBufTotal = 0;
1182 }
1183#else
1184 pVCpu->iem.s.cbOpcode = 0;
1185 pVCpu->iem.s.offOpcode = 0;
1186#endif
1187 Assert(pVCpu->iem.s.cActiveMappings == 0);
1188 pVCpu->iem.s.iNextMapping = 0;
1189 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1190 Assert(pVCpu->iem.s.fBypassHandlers == false);
1191#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1192 if (!pVCpu->iem.s.fInPatchCode)
1193 { /* likely */ }
1194 else
1195 {
1196 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1197 && pCtx->cs.u64Base == 0
1198 && pCtx->cs.u32Limit == UINT32_MAX
1199 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1200 if (!pVCpu->iem.s.fInPatchCode)
1201 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1202 }
1203#endif
1204
1205#ifdef DBGFTRACE_ENABLED
1206 switch (enmMode)
1207 {
1208 case IEMMODE_64BIT:
1209 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1210 break;
1211 case IEMMODE_32BIT:
1212 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1213 break;
1214 case IEMMODE_16BIT:
1215 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1216 break;
1217 }
1218#endif
1219}
1220
1221
1222
1223/**
1224 * Prefetch opcodes the first time when starting executing.
1225 *
1226 * @returns Strict VBox status code.
1227 * @param pVCpu The cross context virtual CPU structure of the
1228 * calling thread.
1229 * @param fBypassHandlers Whether to bypass access handlers.
1230 */
1231IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1232{
1233#ifdef IEM_VERIFICATION_MODE_FULL
1234 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1235#endif
1236 iemInitDecoder(pVCpu, fBypassHandlers);
1237
1238#ifdef IEM_WITH_CODE_TLB
1239 /** @todo Do ITLB lookup here. */
1240
1241#else /* !IEM_WITH_CODE_TLB */
1242
1243 /*
1244 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1245 *
1246 * First translate CS:rIP to a physical address.
1247 */
1248 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1249 uint32_t cbToTryRead;
1250 RTGCPTR GCPtrPC;
1251 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1252 {
1253 cbToTryRead = PAGE_SIZE;
1254 GCPtrPC = pCtx->rip;
1255 if (IEM_IS_CANONICAL(GCPtrPC))
1256 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1257 else
1258 return iemRaiseGeneralProtectionFault0(pVCpu);
1259 }
1260 else
1261 {
1262 uint32_t GCPtrPC32 = pCtx->eip;
1263 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1264 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1265 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1266 else
1267 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1268 if (cbToTryRead) { /* likely */ }
1269 else /* overflowed */
1270 {
1271 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1272 cbToTryRead = UINT32_MAX;
1273 }
1274 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1275 Assert(GCPtrPC <= UINT32_MAX);
1276 }
1277
1278# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1279 /* Allow interpretation of patch manager code blocks since they can for
1280 instance throw #PFs for perfectly good reasons. */
1281 if (pVCpu->iem.s.fInPatchCode)
1282 {
1283 size_t cbRead = 0;
1284 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1285 AssertRCReturn(rc, rc);
1286 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1287 return VINF_SUCCESS;
1288 }
1289# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1290
1291 RTGCPHYS GCPhys;
1292 uint64_t fFlags;
1293 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1294 if (RT_SUCCESS(rc)) { /* probable */ }
1295 else
1296 {
1297 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1298 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1299 }
1300 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1301 else
1302 {
1303 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1304 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1305 }
1306 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1307 else
1308 {
1309 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1310 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1311 }
1312 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1313 /** @todo Check reserved bits and such stuff. PGM is better at doing
1314 * that, so do it when implementing the guest virtual address
1315 * TLB... */
1316
1317# ifdef IEM_VERIFICATION_MODE_FULL
1318 /*
1319 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1320 * instruction.
1321 */
1322 /** @todo optimize this differently by not using PGMPhysRead. */
1323 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1324 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1325 if ( offPrevOpcodes < cbOldOpcodes
1326 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1327 {
1328 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1329 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1330 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1331 pVCpu->iem.s.cbOpcode = cbNew;
1332 return VINF_SUCCESS;
1333 }
1334# endif
1335
1336 /*
1337 * Read the bytes at this address.
1338 */
1339 PVM pVM = pVCpu->CTX_SUFF(pVM);
1340# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1341 size_t cbActual;
1342 if ( PATMIsEnabled(pVM)
1343 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1344 {
1345 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1346 Assert(cbActual > 0);
1347 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1348 }
1349 else
1350# endif
1351 {
1352 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1353 if (cbToTryRead > cbLeftOnPage)
1354 cbToTryRead = cbLeftOnPage;
1355 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1356 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1357
1358 if (!pVCpu->iem.s.fBypassHandlers)
1359 {
1360 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1361 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1362 { /* likely */ }
1363 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1364 {
1365 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1366 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1367 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1368 }
1369 else
1370 {
1371 Log((RT_SUCCESS(rcStrict)
1372 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1373 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1374 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1375 return rcStrict;
1376 }
1377 }
1378 else
1379 {
1380 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1381 if (RT_SUCCESS(rc))
1382 { /* likely */ }
1383 else
1384 {
1385 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1386 GCPtrPC, GCPhys, rc, cbToTryRead));
1387 return rc;
1388 }
1389 }
1390 pVCpu->iem.s.cbOpcode = cbToTryRead;
1391 }
1392#endif /* !IEM_WITH_CODE_TLB */
1393 return VINF_SUCCESS;
1394}
1395
1396
1397/**
1398 * Invalidates the IEM TLBs.
1399 *
1400 * This is called internally as well as by PGM when moving GC mappings.
1401 *
1402 * @returns
1403 * @param pVCpu The cross context virtual CPU structure of the calling
1404 * thread.
1405 * @param fVmm Set when PGM calls us with a remapping.
1406 */
1407VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1408{
1409#ifdef IEM_WITH_CODE_TLB
1410 pVCpu->iem.s.cbInstrBufTotal = 0;
1411 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1412 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1413 { /* very likely */ }
1414 else
1415 {
1416 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1417 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1418 while (i-- > 0)
1419 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1420 }
1421#endif
1422
1423#ifdef IEM_WITH_DATA_TLB
1424 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1425 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1426 { /* very likely */ }
1427 else
1428 {
1429 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1430 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1431 while (i-- > 0)
1432 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1433 }
1434#endif
1435 NOREF(pVCpu); NOREF(fVmm);
1436}
1437
1438
1439/**
1440 * Invalidates a page in the TLBs.
1441 *
1442 * @param pVCpu The cross context virtual CPU structure of the calling
1443 * thread.
1444 * @param GCPtr The address of the page to invalidate
1445 */
1446VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1447{
1448#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1449 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1450 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1451 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1452 uintptr_t idx = (uint8_t)GCPtr;
1453
1454# ifdef IEM_WITH_CODE_TLB
1455 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1456 {
1457 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1458 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1459 pVCpu->iem.s.cbInstrBufTotal = 0;
1460 }
1461# endif
1462
1463# ifdef IEM_WITH_DATA_TLB
1464 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1465 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1466# endif
1467#else
1468 NOREF(pVCpu); NOREF(GCPtr);
1469#endif
1470}
1471
1472
1473/**
1474 * Invalidates the host physical aspects of the IEM TLBs.
1475 *
1476 * This is called internally as well as by PGM when moving GC mappings.
1477 *
1478 * @param pVCpu The cross context virtual CPU structure of the calling
1479 * thread.
1480 */
1481VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1482{
1483#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1484 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1485
1486# ifdef IEM_WITH_CODE_TLB
1487 pVCpu->iem.s.cbInstrBufTotal = 0;
1488# endif
1489 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1490 if (uTlbPhysRev != 0)
1491 {
1492 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1493 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1494 }
1495 else
1496 {
1497 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1498 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1499
1500 unsigned i;
1501# ifdef IEM_WITH_CODE_TLB
1502 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1503 while (i-- > 0)
1504 {
1505 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1506 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1507 }
1508# endif
1509# ifdef IEM_WITH_DATA_TLB
1510 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1511 while (i-- > 0)
1512 {
1513 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1514 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1515 }
1516# endif
1517 }
1518#else
1519 NOREF(pVCpu);
1520#endif
1521}
1522
1523
1524/**
1525 * Invalidates the host physical aspects of the IEM TLBs.
1526 *
1527 * This is called internally as well as by PGM when moving GC mappings.
1528 *
1529 * @param pVM The cross context VM structure.
1530 *
1531 * @remarks Caller holds the PGM lock.
1532 */
1533VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1534{
1535 RT_NOREF_PV(pVM);
1536}
1537
1538#ifdef IEM_WITH_CODE_TLB
1539
1540/**
1541 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1542 * failure and jumps.
1543 *
1544 * We end up here for a number of reasons:
1545 * - pbInstrBuf isn't yet initialized.
1546 * - Advancing beyond the buffer boundrary (e.g. cross page).
1547 * - Advancing beyond the CS segment limit.
1548 * - Fetching from non-mappable page (e.g. MMIO).
1549 *
1550 * @param pVCpu The cross context virtual CPU structure of the
1551 * calling thread.
1552 * @param pvDst Where to return the bytes.
1553 * @param cbDst Number of bytes to read.
1554 *
1555 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1556 */
1557IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1558{
1559#ifdef IN_RING3
1560//__debugbreak();
1561 for (;;)
1562 {
1563 Assert(cbDst <= 8);
1564 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1565
1566 /*
1567 * We might have a partial buffer match, deal with that first to make the
1568 * rest simpler. This is the first part of the cross page/buffer case.
1569 */
1570 if (pVCpu->iem.s.pbInstrBuf != NULL)
1571 {
1572 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1573 {
1574 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1575 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1576 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1577
1578 cbDst -= cbCopy;
1579 pvDst = (uint8_t *)pvDst + cbCopy;
1580 offBuf += cbCopy;
1581 pVCpu->iem.s.offInstrNextByte += offBuf;
1582 }
1583 }
1584
1585 /*
1586 * Check segment limit, figuring how much we're allowed to access at this point.
1587 *
1588 * We will fault immediately if RIP is past the segment limit / in non-canonical
1589 * territory. If we do continue, there are one or more bytes to read before we
1590 * end up in trouble and we need to do that first before faulting.
1591 */
1592 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1593 RTGCPTR GCPtrFirst;
1594 uint32_t cbMaxRead;
1595 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1596 {
1597 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1598 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1599 { /* likely */ }
1600 else
1601 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1602 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1603 }
1604 else
1605 {
1606 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1607 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1608 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1609 { /* likely */ }
1610 else
1611 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1612 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1613 if (cbMaxRead != 0)
1614 { /* likely */ }
1615 else
1616 {
1617 /* Overflowed because address is 0 and limit is max. */
1618 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1619 cbMaxRead = X86_PAGE_SIZE;
1620 }
1621 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1622 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1623 if (cbMaxRead2 < cbMaxRead)
1624 cbMaxRead = cbMaxRead2;
1625 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1626 }
1627
1628 /*
1629 * Get the TLB entry for this piece of code.
1630 */
1631 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1632 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1633 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1634 if (pTlbe->uTag == uTag)
1635 {
1636 /* likely when executing lots of code, otherwise unlikely */
1637# ifdef VBOX_WITH_STATISTICS
1638 pVCpu->iem.s.CodeTlb.cTlbHits++;
1639# endif
1640 }
1641 else
1642 {
1643 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1644# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1645 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1646 {
1647 pTlbe->uTag = uTag;
1648 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1649 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1650 pTlbe->GCPhys = NIL_RTGCPHYS;
1651 pTlbe->pbMappingR3 = NULL;
1652 }
1653 else
1654# endif
1655 {
1656 RTGCPHYS GCPhys;
1657 uint64_t fFlags;
1658 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1659 if (RT_FAILURE(rc))
1660 {
1661 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1662 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1663 }
1664
1665 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1666 pTlbe->uTag = uTag;
1667 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1668 pTlbe->GCPhys = GCPhys;
1669 pTlbe->pbMappingR3 = NULL;
1670 }
1671 }
1672
1673 /*
1674 * Check TLB page table level access flags.
1675 */
1676 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1677 {
1678 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1679 {
1680 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1681 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1682 }
1683 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1684 {
1685 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1686 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1687 }
1688 }
1689
1690# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1691 /*
1692 * Allow interpretation of patch manager code blocks since they can for
1693 * instance throw #PFs for perfectly good reasons.
1694 */
1695 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1696 { /* no unlikely */ }
1697 else
1698 {
1699 /** @todo Could be optimized this a little in ring-3 if we liked. */
1700 size_t cbRead = 0;
1701 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1702 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1703 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1704 return;
1705 }
1706# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1707
1708 /*
1709 * Look up the physical page info if necessary.
1710 */
1711 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1712 { /* not necessary */ }
1713 else
1714 {
1715 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1716 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1717 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1718 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1719 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1720 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1721 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1722 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1723 }
1724
1725# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1726 /*
1727 * Try do a direct read using the pbMappingR3 pointer.
1728 */
1729 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1730 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1731 {
1732 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1733 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1734 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1735 {
1736 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1737 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1738 }
1739 else
1740 {
1741 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1742 Assert(cbInstr < cbMaxRead);
1743 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1744 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1745 }
1746 if (cbDst <= cbMaxRead)
1747 {
1748 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1749 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1750 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1751 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1752 return;
1753 }
1754 pVCpu->iem.s.pbInstrBuf = NULL;
1755
1756 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1757 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1758 }
1759 else
1760# endif
1761#if 0
1762 /*
1763 * If there is no special read handling, so we can read a bit more and
1764 * put it in the prefetch buffer.
1765 */
1766 if ( cbDst < cbMaxRead
1767 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1768 {
1769 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1770 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1771 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1772 { /* likely */ }
1773 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1774 {
1775 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1776 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1777 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1778 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1779 }
1780 else
1781 {
1782 Log((RT_SUCCESS(rcStrict)
1783 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1784 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1785 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1786 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1787 }
1788 }
1789 /*
1790 * Special read handling, so only read exactly what's needed.
1791 * This is a highly unlikely scenario.
1792 */
1793 else
1794#endif
1795 {
1796 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1797 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1798 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1799 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1800 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1801 { /* likely */ }
1802 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1803 {
1804 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1805 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1806 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1807 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1808 }
1809 else
1810 {
1811 Log((RT_SUCCESS(rcStrict)
1812 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1813 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1814 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1815 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1816 }
1817 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1818 if (cbToRead == cbDst)
1819 return;
1820 }
1821
1822 /*
1823 * More to read, loop.
1824 */
1825 cbDst -= cbMaxRead;
1826 pvDst = (uint8_t *)pvDst + cbMaxRead;
1827 }
1828#else
1829 RT_NOREF(pvDst, cbDst);
1830 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1831#endif
1832}
1833
1834#else
1835
1836/**
1837 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1838 * exception if it fails.
1839 *
1840 * @returns Strict VBox status code.
1841 * @param pVCpu The cross context virtual CPU structure of the
1842 * calling thread.
1843 * @param cbMin The minimum number of bytes relative offOpcode
1844 * that must be read.
1845 */
1846IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1847{
1848 /*
1849 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1850 *
1851 * First translate CS:rIP to a physical address.
1852 */
1853 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1854 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1855 uint32_t cbToTryRead;
1856 RTGCPTR GCPtrNext;
1857 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1858 {
1859 cbToTryRead = PAGE_SIZE;
1860 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1861 if (!IEM_IS_CANONICAL(GCPtrNext))
1862 return iemRaiseGeneralProtectionFault0(pVCpu);
1863 }
1864 else
1865 {
1866 uint32_t GCPtrNext32 = pCtx->eip;
1867 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1868 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1869 if (GCPtrNext32 > pCtx->cs.u32Limit)
1870 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1871 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1872 if (!cbToTryRead) /* overflowed */
1873 {
1874 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1875 cbToTryRead = UINT32_MAX;
1876 /** @todo check out wrapping around the code segment. */
1877 }
1878 if (cbToTryRead < cbMin - cbLeft)
1879 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1880 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1881 }
1882
1883 /* Only read up to the end of the page, and make sure we don't read more
1884 than the opcode buffer can hold. */
1885 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1886 if (cbToTryRead > cbLeftOnPage)
1887 cbToTryRead = cbLeftOnPage;
1888 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1889 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1890/** @todo r=bird: Convert assertion into undefined opcode exception? */
1891 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1892
1893# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1894 /* Allow interpretation of patch manager code blocks since they can for
1895 instance throw #PFs for perfectly good reasons. */
1896 if (pVCpu->iem.s.fInPatchCode)
1897 {
1898 size_t cbRead = 0;
1899 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1900 AssertRCReturn(rc, rc);
1901 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1902 return VINF_SUCCESS;
1903 }
1904# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1905
1906 RTGCPHYS GCPhys;
1907 uint64_t fFlags;
1908 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1909 if (RT_FAILURE(rc))
1910 {
1911 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1912 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1913 }
1914 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1915 {
1916 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1917 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1918 }
1919 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1920 {
1921 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1922 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1923 }
1924 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1925 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1926 /** @todo Check reserved bits and such stuff. PGM is better at doing
1927 * that, so do it when implementing the guest virtual address
1928 * TLB... */
1929
1930 /*
1931 * Read the bytes at this address.
1932 *
1933 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1934 * and since PATM should only patch the start of an instruction there
1935 * should be no need to check again here.
1936 */
1937 if (!pVCpu->iem.s.fBypassHandlers)
1938 {
1939 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1940 cbToTryRead, PGMACCESSORIGIN_IEM);
1941 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1942 { /* likely */ }
1943 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1944 {
1945 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1946 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1947 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1948 }
1949 else
1950 {
1951 Log((RT_SUCCESS(rcStrict)
1952 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1953 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1954 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1955 return rcStrict;
1956 }
1957 }
1958 else
1959 {
1960 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1961 if (RT_SUCCESS(rc))
1962 { /* likely */ }
1963 else
1964 {
1965 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1966 return rc;
1967 }
1968 }
1969 pVCpu->iem.s.cbOpcode += cbToTryRead;
1970 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1971
1972 return VINF_SUCCESS;
1973}
1974
1975#endif /* !IEM_WITH_CODE_TLB */
1976#ifndef IEM_WITH_SETJMP
1977
1978/**
1979 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1980 *
1981 * @returns Strict VBox status code.
1982 * @param pVCpu The cross context virtual CPU structure of the
1983 * calling thread.
1984 * @param pb Where to return the opcode byte.
1985 */
1986DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1987{
1988 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1989 if (rcStrict == VINF_SUCCESS)
1990 {
1991 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1992 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1993 pVCpu->iem.s.offOpcode = offOpcode + 1;
1994 }
1995 else
1996 *pb = 0;
1997 return rcStrict;
1998}
1999
2000
2001/**
2002 * Fetches the next opcode byte.
2003 *
2004 * @returns Strict VBox status code.
2005 * @param pVCpu The cross context virtual CPU structure of the
2006 * calling thread.
2007 * @param pu8 Where to return the opcode byte.
2008 */
2009DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2010{
2011 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2012 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2013 {
2014 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2015 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2016 return VINF_SUCCESS;
2017 }
2018 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2019}
2020
2021#else /* IEM_WITH_SETJMP */
2022
2023/**
2024 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2025 *
2026 * @returns The opcode byte.
2027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2028 */
2029DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2030{
2031# ifdef IEM_WITH_CODE_TLB
2032 uint8_t u8;
2033 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2034 return u8;
2035# else
2036 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2037 if (rcStrict == VINF_SUCCESS)
2038 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2039 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2040# endif
2041}
2042
2043
2044/**
2045 * Fetches the next opcode byte, longjmp on error.
2046 *
2047 * @returns The opcode byte.
2048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2049 */
2050DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2051{
2052# ifdef IEM_WITH_CODE_TLB
2053 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2054 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2055 if (RT_LIKELY( pbBuf != NULL
2056 && offBuf < pVCpu->iem.s.cbInstrBuf))
2057 {
2058 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2059 return pbBuf[offBuf];
2060 }
2061# else
2062 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2063 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2064 {
2065 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2066 return pVCpu->iem.s.abOpcode[offOpcode];
2067 }
2068# endif
2069 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2070}
2071
2072#endif /* IEM_WITH_SETJMP */
2073
2074/**
2075 * Fetches the next opcode byte, returns automatically on failure.
2076 *
2077 * @param a_pu8 Where to return the opcode byte.
2078 * @remark Implicitly references pVCpu.
2079 */
2080#ifndef IEM_WITH_SETJMP
2081# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2082 do \
2083 { \
2084 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2085 if (rcStrict2 == VINF_SUCCESS) \
2086 { /* likely */ } \
2087 else \
2088 return rcStrict2; \
2089 } while (0)
2090#else
2091# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2092#endif /* IEM_WITH_SETJMP */
2093
2094
2095#ifndef IEM_WITH_SETJMP
2096/**
2097 * Fetches the next signed byte from the opcode stream.
2098 *
2099 * @returns Strict VBox status code.
2100 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2101 * @param pi8 Where to return the signed byte.
2102 */
2103DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2104{
2105 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2106}
2107#endif /* !IEM_WITH_SETJMP */
2108
2109
2110/**
2111 * Fetches the next signed byte from the opcode stream, returning automatically
2112 * on failure.
2113 *
2114 * @param a_pi8 Where to return the signed byte.
2115 * @remark Implicitly references pVCpu.
2116 */
2117#ifndef IEM_WITH_SETJMP
2118# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2119 do \
2120 { \
2121 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2122 if (rcStrict2 != VINF_SUCCESS) \
2123 return rcStrict2; \
2124 } while (0)
2125#else /* IEM_WITH_SETJMP */
2126# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2127
2128#endif /* IEM_WITH_SETJMP */
2129
2130#ifndef IEM_WITH_SETJMP
2131
2132/**
2133 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2134 *
2135 * @returns Strict VBox status code.
2136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2137 * @param pu16 Where to return the opcode dword.
2138 */
2139DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2140{
2141 uint8_t u8;
2142 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2143 if (rcStrict == VINF_SUCCESS)
2144 *pu16 = (int8_t)u8;
2145 return rcStrict;
2146}
2147
2148
2149/**
2150 * Fetches the next signed byte from the opcode stream, extending it to
2151 * unsigned 16-bit.
2152 *
2153 * @returns Strict VBox status code.
2154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2155 * @param pu16 Where to return the unsigned word.
2156 */
2157DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2158{
2159 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2160 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2161 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2162
2163 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2164 pVCpu->iem.s.offOpcode = offOpcode + 1;
2165 return VINF_SUCCESS;
2166}
2167
2168#endif /* !IEM_WITH_SETJMP */
2169
2170/**
2171 * Fetches the next signed byte from the opcode stream and sign-extending it to
2172 * a word, returning automatically on failure.
2173 *
2174 * @param a_pu16 Where to return the word.
2175 * @remark Implicitly references pVCpu.
2176 */
2177#ifndef IEM_WITH_SETJMP
2178# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2179 do \
2180 { \
2181 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2182 if (rcStrict2 != VINF_SUCCESS) \
2183 return rcStrict2; \
2184 } while (0)
2185#else
2186# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2187#endif
2188
2189#ifndef IEM_WITH_SETJMP
2190
2191/**
2192 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2193 *
2194 * @returns Strict VBox status code.
2195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2196 * @param pu32 Where to return the opcode dword.
2197 */
2198DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2199{
2200 uint8_t u8;
2201 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2202 if (rcStrict == VINF_SUCCESS)
2203 *pu32 = (int8_t)u8;
2204 return rcStrict;
2205}
2206
2207
2208/**
2209 * Fetches the next signed byte from the opcode stream, extending it to
2210 * unsigned 32-bit.
2211 *
2212 * @returns Strict VBox status code.
2213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2214 * @param pu32 Where to return the unsigned dword.
2215 */
2216DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2217{
2218 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2219 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2220 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2221
2222 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2223 pVCpu->iem.s.offOpcode = offOpcode + 1;
2224 return VINF_SUCCESS;
2225}
2226
2227#endif /* !IEM_WITH_SETJMP */
2228
2229/**
2230 * Fetches the next signed byte from the opcode stream and sign-extending it to
2231 * a word, returning automatically on failure.
2232 *
2233 * @param a_pu32 Where to return the word.
2234 * @remark Implicitly references pVCpu.
2235 */
2236#ifndef IEM_WITH_SETJMP
2237#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2238 do \
2239 { \
2240 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2241 if (rcStrict2 != VINF_SUCCESS) \
2242 return rcStrict2; \
2243 } while (0)
2244#else
2245# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2246#endif
2247
2248#ifndef IEM_WITH_SETJMP
2249
2250/**
2251 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2252 *
2253 * @returns Strict VBox status code.
2254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2255 * @param pu64 Where to return the opcode qword.
2256 */
2257DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2258{
2259 uint8_t u8;
2260 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2261 if (rcStrict == VINF_SUCCESS)
2262 *pu64 = (int8_t)u8;
2263 return rcStrict;
2264}
2265
2266
2267/**
2268 * Fetches the next signed byte from the opcode stream, extending it to
2269 * unsigned 64-bit.
2270 *
2271 * @returns Strict VBox status code.
2272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2273 * @param pu64 Where to return the unsigned qword.
2274 */
2275DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2276{
2277 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2278 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2279 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2280
2281 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2282 pVCpu->iem.s.offOpcode = offOpcode + 1;
2283 return VINF_SUCCESS;
2284}
2285
2286#endif /* !IEM_WITH_SETJMP */
2287
2288
2289/**
2290 * Fetches the next signed byte from the opcode stream and sign-extending it to
2291 * a word, returning automatically on failure.
2292 *
2293 * @param a_pu64 Where to return the word.
2294 * @remark Implicitly references pVCpu.
2295 */
2296#ifndef IEM_WITH_SETJMP
2297# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2298 do \
2299 { \
2300 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2301 if (rcStrict2 != VINF_SUCCESS) \
2302 return rcStrict2; \
2303 } while (0)
2304#else
2305# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2306#endif
2307
2308
2309#ifndef IEM_WITH_SETJMP
2310
2311/**
2312 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2313 *
2314 * @returns Strict VBox status code.
2315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2316 * @param pu16 Where to return the opcode word.
2317 */
2318DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2319{
2320 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2321 if (rcStrict == VINF_SUCCESS)
2322 {
2323 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2324# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2325 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2326# else
2327 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2328# endif
2329 pVCpu->iem.s.offOpcode = offOpcode + 2;
2330 }
2331 else
2332 *pu16 = 0;
2333 return rcStrict;
2334}
2335
2336
2337/**
2338 * Fetches the next opcode word.
2339 *
2340 * @returns Strict VBox status code.
2341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2342 * @param pu16 Where to return the opcode word.
2343 */
2344DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2345{
2346 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2347 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2348 {
2349 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2350# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2351 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2352# else
2353 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2354# endif
2355 return VINF_SUCCESS;
2356 }
2357 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2358}
2359
2360#else /* IEM_WITH_SETJMP */
2361
2362/**
2363 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2364 *
2365 * @returns The opcode word.
2366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2367 */
2368DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2369{
2370# ifdef IEM_WITH_CODE_TLB
2371 uint16_t u16;
2372 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2373 return u16;
2374# else
2375 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2376 if (rcStrict == VINF_SUCCESS)
2377 {
2378 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2379 pVCpu->iem.s.offOpcode += 2;
2380# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2381 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2382# else
2383 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2384# endif
2385 }
2386 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2387# endif
2388}
2389
2390
2391/**
2392 * Fetches the next opcode word, longjmp on error.
2393 *
2394 * @returns The opcode word.
2395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2396 */
2397DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2398{
2399# ifdef IEM_WITH_CODE_TLB
2400 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2401 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2402 if (RT_LIKELY( pbBuf != NULL
2403 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2404 {
2405 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2406# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2407 return *(uint16_t const *)&pbBuf[offBuf];
2408# else
2409 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2410# endif
2411 }
2412# else
2413 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2414 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2415 {
2416 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2417# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2418 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2419# else
2420 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2421# endif
2422 }
2423# endif
2424 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2425}
2426
2427#endif /* IEM_WITH_SETJMP */
2428
2429
2430/**
2431 * Fetches the next opcode word, returns automatically on failure.
2432 *
2433 * @param a_pu16 Where to return the opcode word.
2434 * @remark Implicitly references pVCpu.
2435 */
2436#ifndef IEM_WITH_SETJMP
2437# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2438 do \
2439 { \
2440 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2441 if (rcStrict2 != VINF_SUCCESS) \
2442 return rcStrict2; \
2443 } while (0)
2444#else
2445# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2446#endif
2447
2448#ifndef IEM_WITH_SETJMP
2449
2450/**
2451 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2452 *
2453 * @returns Strict VBox status code.
2454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2455 * @param pu32 Where to return the opcode double word.
2456 */
2457DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2458{
2459 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2460 if (rcStrict == VINF_SUCCESS)
2461 {
2462 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2463 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2464 pVCpu->iem.s.offOpcode = offOpcode + 2;
2465 }
2466 else
2467 *pu32 = 0;
2468 return rcStrict;
2469}
2470
2471
2472/**
2473 * Fetches the next opcode word, zero extending it to a double word.
2474 *
2475 * @returns Strict VBox status code.
2476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2477 * @param pu32 Where to return the opcode double word.
2478 */
2479DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2480{
2481 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2482 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2483 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2484
2485 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2486 pVCpu->iem.s.offOpcode = offOpcode + 2;
2487 return VINF_SUCCESS;
2488}
2489
2490#endif /* !IEM_WITH_SETJMP */
2491
2492
2493/**
2494 * Fetches the next opcode word and zero extends it to a double word, returns
2495 * automatically on failure.
2496 *
2497 * @param a_pu32 Where to return the opcode double word.
2498 * @remark Implicitly references pVCpu.
2499 */
2500#ifndef IEM_WITH_SETJMP
2501# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2502 do \
2503 { \
2504 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2505 if (rcStrict2 != VINF_SUCCESS) \
2506 return rcStrict2; \
2507 } while (0)
2508#else
2509# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2510#endif
2511
2512#ifndef IEM_WITH_SETJMP
2513
2514/**
2515 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2516 *
2517 * @returns Strict VBox status code.
2518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2519 * @param pu64 Where to return the opcode quad word.
2520 */
2521DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2522{
2523 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2524 if (rcStrict == VINF_SUCCESS)
2525 {
2526 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2527 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2528 pVCpu->iem.s.offOpcode = offOpcode + 2;
2529 }
2530 else
2531 *pu64 = 0;
2532 return rcStrict;
2533}
2534
2535
2536/**
2537 * Fetches the next opcode word, zero extending it to a quad word.
2538 *
2539 * @returns Strict VBox status code.
2540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2541 * @param pu64 Where to return the opcode quad word.
2542 */
2543DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2544{
2545 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2546 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2547 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2548
2549 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2550 pVCpu->iem.s.offOpcode = offOpcode + 2;
2551 return VINF_SUCCESS;
2552}
2553
2554#endif /* !IEM_WITH_SETJMP */
2555
2556/**
2557 * Fetches the next opcode word and zero extends it to a quad word, returns
2558 * automatically on failure.
2559 *
2560 * @param a_pu64 Where to return the opcode quad word.
2561 * @remark Implicitly references pVCpu.
2562 */
2563#ifndef IEM_WITH_SETJMP
2564# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2565 do \
2566 { \
2567 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2568 if (rcStrict2 != VINF_SUCCESS) \
2569 return rcStrict2; \
2570 } while (0)
2571#else
2572# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2573#endif
2574
2575
2576#ifndef IEM_WITH_SETJMP
2577/**
2578 * Fetches the next signed word from the opcode stream.
2579 *
2580 * @returns Strict VBox status code.
2581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2582 * @param pi16 Where to return the signed word.
2583 */
2584DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2585{
2586 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2587}
2588#endif /* !IEM_WITH_SETJMP */
2589
2590
2591/**
2592 * Fetches the next signed word from the opcode stream, returning automatically
2593 * on failure.
2594 *
2595 * @param a_pi16 Where to return the signed word.
2596 * @remark Implicitly references pVCpu.
2597 */
2598#ifndef IEM_WITH_SETJMP
2599# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2600 do \
2601 { \
2602 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2603 if (rcStrict2 != VINF_SUCCESS) \
2604 return rcStrict2; \
2605 } while (0)
2606#else
2607# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2608#endif
2609
2610#ifndef IEM_WITH_SETJMP
2611
2612/**
2613 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2614 *
2615 * @returns Strict VBox status code.
2616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2617 * @param pu32 Where to return the opcode dword.
2618 */
2619DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2620{
2621 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2622 if (rcStrict == VINF_SUCCESS)
2623 {
2624 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2625# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2626 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2627# else
2628 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2629 pVCpu->iem.s.abOpcode[offOpcode + 1],
2630 pVCpu->iem.s.abOpcode[offOpcode + 2],
2631 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2632# endif
2633 pVCpu->iem.s.offOpcode = offOpcode + 4;
2634 }
2635 else
2636 *pu32 = 0;
2637 return rcStrict;
2638}
2639
2640
2641/**
2642 * Fetches the next opcode dword.
2643 *
2644 * @returns Strict VBox status code.
2645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2646 * @param pu32 Where to return the opcode double word.
2647 */
2648DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2649{
2650 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2651 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2652 {
2653 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2654# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2655 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2656# else
2657 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2658 pVCpu->iem.s.abOpcode[offOpcode + 1],
2659 pVCpu->iem.s.abOpcode[offOpcode + 2],
2660 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2661# endif
2662 return VINF_SUCCESS;
2663 }
2664 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2665}
2666
2667#else /* !IEM_WITH_SETJMP */
2668
2669/**
2670 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2671 *
2672 * @returns The opcode dword.
2673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2674 */
2675DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2676{
2677# ifdef IEM_WITH_CODE_TLB
2678 uint32_t u32;
2679 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2680 return u32;
2681# else
2682 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2683 if (rcStrict == VINF_SUCCESS)
2684 {
2685 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2686 pVCpu->iem.s.offOpcode = offOpcode + 4;
2687# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2688 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2689# else
2690 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2691 pVCpu->iem.s.abOpcode[offOpcode + 1],
2692 pVCpu->iem.s.abOpcode[offOpcode + 2],
2693 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2694# endif
2695 }
2696 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2697# endif
2698}
2699
2700
2701/**
2702 * Fetches the next opcode dword, longjmp on error.
2703 *
2704 * @returns The opcode dword.
2705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2706 */
2707DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2708{
2709# ifdef IEM_WITH_CODE_TLB
2710 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2711 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2712 if (RT_LIKELY( pbBuf != NULL
2713 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2714 {
2715 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2716# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2717 return *(uint32_t const *)&pbBuf[offBuf];
2718# else
2719 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2720 pbBuf[offBuf + 1],
2721 pbBuf[offBuf + 2],
2722 pbBuf[offBuf + 3]);
2723# endif
2724 }
2725# else
2726 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2727 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2728 {
2729 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2730# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2731 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2732# else
2733 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2734 pVCpu->iem.s.abOpcode[offOpcode + 1],
2735 pVCpu->iem.s.abOpcode[offOpcode + 2],
2736 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2737# endif
2738 }
2739# endif
2740 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2741}
2742
2743#endif /* !IEM_WITH_SETJMP */
2744
2745
2746/**
2747 * Fetches the next opcode dword, returns automatically on failure.
2748 *
2749 * @param a_pu32 Where to return the opcode dword.
2750 * @remark Implicitly references pVCpu.
2751 */
2752#ifndef IEM_WITH_SETJMP
2753# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2754 do \
2755 { \
2756 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2757 if (rcStrict2 != VINF_SUCCESS) \
2758 return rcStrict2; \
2759 } while (0)
2760#else
2761# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2762#endif
2763
2764#ifndef IEM_WITH_SETJMP
2765
2766/**
2767 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2768 *
2769 * @returns Strict VBox status code.
2770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2771 * @param pu64 Where to return the opcode dword.
2772 */
2773DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2774{
2775 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2776 if (rcStrict == VINF_SUCCESS)
2777 {
2778 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2779 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2780 pVCpu->iem.s.abOpcode[offOpcode + 1],
2781 pVCpu->iem.s.abOpcode[offOpcode + 2],
2782 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2783 pVCpu->iem.s.offOpcode = offOpcode + 4;
2784 }
2785 else
2786 *pu64 = 0;
2787 return rcStrict;
2788}
2789
2790
2791/**
2792 * Fetches the next opcode dword, zero extending it to a quad word.
2793 *
2794 * @returns Strict VBox status code.
2795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2796 * @param pu64 Where to return the opcode quad word.
2797 */
2798DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2799{
2800 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2801 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2802 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2803
2804 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2805 pVCpu->iem.s.abOpcode[offOpcode + 1],
2806 pVCpu->iem.s.abOpcode[offOpcode + 2],
2807 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2808 pVCpu->iem.s.offOpcode = offOpcode + 4;
2809 return VINF_SUCCESS;
2810}
2811
2812#endif /* !IEM_WITH_SETJMP */
2813
2814
2815/**
2816 * Fetches the next opcode dword and zero extends it to a quad word, returns
2817 * automatically on failure.
2818 *
2819 * @param a_pu64 Where to return the opcode quad word.
2820 * @remark Implicitly references pVCpu.
2821 */
2822#ifndef IEM_WITH_SETJMP
2823# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2824 do \
2825 { \
2826 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2827 if (rcStrict2 != VINF_SUCCESS) \
2828 return rcStrict2; \
2829 } while (0)
2830#else
2831# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2832#endif
2833
2834
2835#ifndef IEM_WITH_SETJMP
2836/**
2837 * Fetches the next signed double word from the opcode stream.
2838 *
2839 * @returns Strict VBox status code.
2840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2841 * @param pi32 Where to return the signed double word.
2842 */
2843DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2844{
2845 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2846}
2847#endif
2848
2849/**
2850 * Fetches the next signed double word from the opcode stream, returning
2851 * automatically on failure.
2852 *
2853 * @param a_pi32 Where to return the signed double word.
2854 * @remark Implicitly references pVCpu.
2855 */
2856#ifndef IEM_WITH_SETJMP
2857# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2858 do \
2859 { \
2860 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2861 if (rcStrict2 != VINF_SUCCESS) \
2862 return rcStrict2; \
2863 } while (0)
2864#else
2865# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2866#endif
2867
2868#ifndef IEM_WITH_SETJMP
2869
2870/**
2871 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2872 *
2873 * @returns Strict VBox status code.
2874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2875 * @param pu64 Where to return the opcode qword.
2876 */
2877DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2878{
2879 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2880 if (rcStrict == VINF_SUCCESS)
2881 {
2882 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2883 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2884 pVCpu->iem.s.abOpcode[offOpcode + 1],
2885 pVCpu->iem.s.abOpcode[offOpcode + 2],
2886 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2887 pVCpu->iem.s.offOpcode = offOpcode + 4;
2888 }
2889 else
2890 *pu64 = 0;
2891 return rcStrict;
2892}
2893
2894
2895/**
2896 * Fetches the next opcode dword, sign extending it into a quad word.
2897 *
2898 * @returns Strict VBox status code.
2899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2900 * @param pu64 Where to return the opcode quad word.
2901 */
2902DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2903{
2904 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2905 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2906 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2907
2908 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2909 pVCpu->iem.s.abOpcode[offOpcode + 1],
2910 pVCpu->iem.s.abOpcode[offOpcode + 2],
2911 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2912 *pu64 = i32;
2913 pVCpu->iem.s.offOpcode = offOpcode + 4;
2914 return VINF_SUCCESS;
2915}
2916
2917#endif /* !IEM_WITH_SETJMP */
2918
2919
2920/**
2921 * Fetches the next opcode double word and sign extends it to a quad word,
2922 * returns automatically on failure.
2923 *
2924 * @param a_pu64 Where to return the opcode quad word.
2925 * @remark Implicitly references pVCpu.
2926 */
2927#ifndef IEM_WITH_SETJMP
2928# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2929 do \
2930 { \
2931 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2932 if (rcStrict2 != VINF_SUCCESS) \
2933 return rcStrict2; \
2934 } while (0)
2935#else
2936# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2937#endif
2938
2939#ifndef IEM_WITH_SETJMP
2940
2941/**
2942 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2943 *
2944 * @returns Strict VBox status code.
2945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2946 * @param pu64 Where to return the opcode qword.
2947 */
2948DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2949{
2950 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2951 if (rcStrict == VINF_SUCCESS)
2952 {
2953 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2954# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2955 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2956# else
2957 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2958 pVCpu->iem.s.abOpcode[offOpcode + 1],
2959 pVCpu->iem.s.abOpcode[offOpcode + 2],
2960 pVCpu->iem.s.abOpcode[offOpcode + 3],
2961 pVCpu->iem.s.abOpcode[offOpcode + 4],
2962 pVCpu->iem.s.abOpcode[offOpcode + 5],
2963 pVCpu->iem.s.abOpcode[offOpcode + 6],
2964 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2965# endif
2966 pVCpu->iem.s.offOpcode = offOpcode + 8;
2967 }
2968 else
2969 *pu64 = 0;
2970 return rcStrict;
2971}
2972
2973
2974/**
2975 * Fetches the next opcode qword.
2976 *
2977 * @returns Strict VBox status code.
2978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2979 * @param pu64 Where to return the opcode qword.
2980 */
2981DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2982{
2983 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2984 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2985 {
2986# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2987 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2988# else
2989 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2990 pVCpu->iem.s.abOpcode[offOpcode + 1],
2991 pVCpu->iem.s.abOpcode[offOpcode + 2],
2992 pVCpu->iem.s.abOpcode[offOpcode + 3],
2993 pVCpu->iem.s.abOpcode[offOpcode + 4],
2994 pVCpu->iem.s.abOpcode[offOpcode + 5],
2995 pVCpu->iem.s.abOpcode[offOpcode + 6],
2996 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2997# endif
2998 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2999 return VINF_SUCCESS;
3000 }
3001 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3002}
3003
3004#else /* IEM_WITH_SETJMP */
3005
3006/**
3007 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3008 *
3009 * @returns The opcode qword.
3010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3011 */
3012DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3013{
3014# ifdef IEM_WITH_CODE_TLB
3015 uint64_t u64;
3016 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3017 return u64;
3018# else
3019 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3020 if (rcStrict == VINF_SUCCESS)
3021 {
3022 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3023 pVCpu->iem.s.offOpcode = offOpcode + 8;
3024# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3025 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3026# else
3027 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3028 pVCpu->iem.s.abOpcode[offOpcode + 1],
3029 pVCpu->iem.s.abOpcode[offOpcode + 2],
3030 pVCpu->iem.s.abOpcode[offOpcode + 3],
3031 pVCpu->iem.s.abOpcode[offOpcode + 4],
3032 pVCpu->iem.s.abOpcode[offOpcode + 5],
3033 pVCpu->iem.s.abOpcode[offOpcode + 6],
3034 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3035# endif
3036 }
3037 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3038# endif
3039}
3040
3041
3042/**
3043 * Fetches the next opcode qword, longjmp on error.
3044 *
3045 * @returns The opcode qword.
3046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3047 */
3048DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3049{
3050# ifdef IEM_WITH_CODE_TLB
3051 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3052 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3053 if (RT_LIKELY( pbBuf != NULL
3054 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3055 {
3056 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3057# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3058 return *(uint64_t const *)&pbBuf[offBuf];
3059# else
3060 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3061 pbBuf[offBuf + 1],
3062 pbBuf[offBuf + 2],
3063 pbBuf[offBuf + 3],
3064 pbBuf[offBuf + 4],
3065 pbBuf[offBuf + 5],
3066 pbBuf[offBuf + 6],
3067 pbBuf[offBuf + 7]);
3068# endif
3069 }
3070# else
3071 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3072 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3073 {
3074 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3075# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3076 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3077# else
3078 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3079 pVCpu->iem.s.abOpcode[offOpcode + 1],
3080 pVCpu->iem.s.abOpcode[offOpcode + 2],
3081 pVCpu->iem.s.abOpcode[offOpcode + 3],
3082 pVCpu->iem.s.abOpcode[offOpcode + 4],
3083 pVCpu->iem.s.abOpcode[offOpcode + 5],
3084 pVCpu->iem.s.abOpcode[offOpcode + 6],
3085 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3086# endif
3087 }
3088# endif
3089 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3090}
3091
3092#endif /* IEM_WITH_SETJMP */
3093
3094/**
3095 * Fetches the next opcode quad word, returns automatically on failure.
3096 *
3097 * @param a_pu64 Where to return the opcode quad word.
3098 * @remark Implicitly references pVCpu.
3099 */
3100#ifndef IEM_WITH_SETJMP
3101# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3102 do \
3103 { \
3104 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3105 if (rcStrict2 != VINF_SUCCESS) \
3106 return rcStrict2; \
3107 } while (0)
3108#else
3109# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3110#endif
3111
3112
3113/** @name Misc Worker Functions.
3114 * @{
3115 */
3116
3117
3118/**
3119 * Validates a new SS segment.
3120 *
3121 * @returns VBox strict status code.
3122 * @param pVCpu The cross context virtual CPU structure of the
3123 * calling thread.
3124 * @param pCtx The CPU context.
3125 * @param NewSS The new SS selctor.
3126 * @param uCpl The CPL to load the stack for.
3127 * @param pDesc Where to return the descriptor.
3128 */
3129IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3130{
3131 NOREF(pCtx);
3132
3133 /* Null selectors are not allowed (we're not called for dispatching
3134 interrupts with SS=0 in long mode). */
3135 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3136 {
3137 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3138 return iemRaiseTaskSwitchFault0(pVCpu);
3139 }
3140
3141 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3142 if ((NewSS & X86_SEL_RPL) != uCpl)
3143 {
3144 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3145 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3146 }
3147
3148 /*
3149 * Read the descriptor.
3150 */
3151 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3152 if (rcStrict != VINF_SUCCESS)
3153 return rcStrict;
3154
3155 /*
3156 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3157 */
3158 if (!pDesc->Legacy.Gen.u1DescType)
3159 {
3160 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3161 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3162 }
3163
3164 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3165 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3166 {
3167 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3168 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3169 }
3170 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3171 {
3172 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3173 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3174 }
3175
3176 /* Is it there? */
3177 /** @todo testcase: Is this checked before the canonical / limit check below? */
3178 if (!pDesc->Legacy.Gen.u1Present)
3179 {
3180 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3181 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3182 }
3183
3184 return VINF_SUCCESS;
3185}
3186
3187
3188/**
3189 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3190 * not.
3191 *
3192 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3193 * @param a_pCtx The CPU context.
3194 */
3195#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3196# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3197 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3198 ? (a_pCtx)->eflags.u \
3199 : CPUMRawGetEFlags(a_pVCpu) )
3200#else
3201# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3202 ( (a_pCtx)->eflags.u )
3203#endif
3204
3205/**
3206 * Updates the EFLAGS in the correct manner wrt. PATM.
3207 *
3208 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3209 * @param a_pCtx The CPU context.
3210 * @param a_fEfl The new EFLAGS.
3211 */
3212#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3213# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3214 do { \
3215 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3216 (a_pCtx)->eflags.u = (a_fEfl); \
3217 else \
3218 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3219 } while (0)
3220#else
3221# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3222 do { \
3223 (a_pCtx)->eflags.u = (a_fEfl); \
3224 } while (0)
3225#endif
3226
3227
3228/** @} */
3229
3230/** @name Raising Exceptions.
3231 *
3232 * @{
3233 */
3234
3235/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3236 * @{ */
3237/** CPU exception. */
3238#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3239/** External interrupt (from PIC, APIC, whatever). */
3240#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3241/** Software interrupt (int or into, not bound).
3242 * Returns to the following instruction */
3243#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3244/** Takes an error code. */
3245#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3246/** Takes a CR2. */
3247#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3248/** Generated by the breakpoint instruction. */
3249#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3250/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3251#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3252/** @} */
3253
3254
3255/**
3256 * Loads the specified stack far pointer from the TSS.
3257 *
3258 * @returns VBox strict status code.
3259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3260 * @param pCtx The CPU context.
3261 * @param uCpl The CPL to load the stack for.
3262 * @param pSelSS Where to return the new stack segment.
3263 * @param puEsp Where to return the new stack pointer.
3264 */
3265IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3266 PRTSEL pSelSS, uint32_t *puEsp)
3267{
3268 VBOXSTRICTRC rcStrict;
3269 Assert(uCpl < 4);
3270
3271 switch (pCtx->tr.Attr.n.u4Type)
3272 {
3273 /*
3274 * 16-bit TSS (X86TSS16).
3275 */
3276 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); /* fall thru */
3277 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3278 {
3279 uint32_t off = uCpl * 4 + 2;
3280 if (off + 4 <= pCtx->tr.u32Limit)
3281 {
3282 /** @todo check actual access pattern here. */
3283 uint32_t u32Tmp = 0; /* gcc maybe... */
3284 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3285 if (rcStrict == VINF_SUCCESS)
3286 {
3287 *puEsp = RT_LOWORD(u32Tmp);
3288 *pSelSS = RT_HIWORD(u32Tmp);
3289 return VINF_SUCCESS;
3290 }
3291 }
3292 else
3293 {
3294 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3295 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3296 }
3297 break;
3298 }
3299
3300 /*
3301 * 32-bit TSS (X86TSS32).
3302 */
3303 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); /* fall thru */
3304 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3305 {
3306 uint32_t off = uCpl * 8 + 4;
3307 if (off + 7 <= pCtx->tr.u32Limit)
3308 {
3309/** @todo check actual access pattern here. */
3310 uint64_t u64Tmp;
3311 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3312 if (rcStrict == VINF_SUCCESS)
3313 {
3314 *puEsp = u64Tmp & UINT32_MAX;
3315 *pSelSS = (RTSEL)(u64Tmp >> 32);
3316 return VINF_SUCCESS;
3317 }
3318 }
3319 else
3320 {
3321 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3322 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3323 }
3324 break;
3325 }
3326
3327 default:
3328 AssertFailed();
3329 rcStrict = VERR_IEM_IPE_4;
3330 break;
3331 }
3332
3333 *puEsp = 0; /* make gcc happy */
3334 *pSelSS = 0; /* make gcc happy */
3335 return rcStrict;
3336}
3337
3338
3339/**
3340 * Loads the specified stack pointer from the 64-bit TSS.
3341 *
3342 * @returns VBox strict status code.
3343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3344 * @param pCtx The CPU context.
3345 * @param uCpl The CPL to load the stack for.
3346 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3347 * @param puRsp Where to return the new stack pointer.
3348 */
3349IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3350{
3351 Assert(uCpl < 4);
3352 Assert(uIst < 8);
3353 *puRsp = 0; /* make gcc happy */
3354
3355 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3356
3357 uint32_t off;
3358 if (uIst)
3359 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3360 else
3361 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3362 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3363 {
3364 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3365 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3366 }
3367
3368 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3369}
3370
3371
3372/**
3373 * Adjust the CPU state according to the exception being raised.
3374 *
3375 * @param pCtx The CPU context.
3376 * @param u8Vector The exception that has been raised.
3377 */
3378DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3379{
3380 switch (u8Vector)
3381 {
3382 case X86_XCPT_DB:
3383 pCtx->dr[7] &= ~X86_DR7_GD;
3384 break;
3385 /** @todo Read the AMD and Intel exception reference... */
3386 }
3387}
3388
3389
3390/**
3391 * Implements exceptions and interrupts for real mode.
3392 *
3393 * @returns VBox strict status code.
3394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3395 * @param pCtx The CPU context.
3396 * @param cbInstr The number of bytes to offset rIP by in the return
3397 * address.
3398 * @param u8Vector The interrupt / exception vector number.
3399 * @param fFlags The flags.
3400 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3401 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3402 */
3403IEM_STATIC VBOXSTRICTRC
3404iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3405 PCPUMCTX pCtx,
3406 uint8_t cbInstr,
3407 uint8_t u8Vector,
3408 uint32_t fFlags,
3409 uint16_t uErr,
3410 uint64_t uCr2)
3411{
3412 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3413 NOREF(uErr); NOREF(uCr2);
3414
3415 /*
3416 * Read the IDT entry.
3417 */
3418 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3419 {
3420 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3421 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3422 }
3423 RTFAR16 Idte;
3424 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3425 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3426 return rcStrict;
3427
3428 /*
3429 * Push the stack frame.
3430 */
3431 uint16_t *pu16Frame;
3432 uint64_t uNewRsp;
3433 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3434 if (rcStrict != VINF_SUCCESS)
3435 return rcStrict;
3436
3437 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3438#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3439 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3440 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3441 fEfl |= UINT16_C(0xf000);
3442#endif
3443 pu16Frame[2] = (uint16_t)fEfl;
3444 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3445 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3446 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3447 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3448 return rcStrict;
3449
3450 /*
3451 * Load the vector address into cs:ip and make exception specific state
3452 * adjustments.
3453 */
3454 pCtx->cs.Sel = Idte.sel;
3455 pCtx->cs.ValidSel = Idte.sel;
3456 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3457 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3458 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3459 pCtx->rip = Idte.off;
3460 fEfl &= ~X86_EFL_IF;
3461 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3462
3463 /** @todo do we actually do this in real mode? */
3464 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3465 iemRaiseXcptAdjustState(pCtx, u8Vector);
3466
3467 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3468}
3469
3470
3471/**
3472 * Loads a NULL data selector into when coming from V8086 mode.
3473 *
3474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3475 * @param pSReg Pointer to the segment register.
3476 */
3477IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3478{
3479 pSReg->Sel = 0;
3480 pSReg->ValidSel = 0;
3481 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3482 {
3483 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3484 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3485 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3486 }
3487 else
3488 {
3489 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3490 /** @todo check this on AMD-V */
3491 pSReg->u64Base = 0;
3492 pSReg->u32Limit = 0;
3493 }
3494}
3495
3496
3497/**
3498 * Loads a segment selector during a task switch in V8086 mode.
3499 *
3500 * @param pSReg Pointer to the segment register.
3501 * @param uSel The selector value to load.
3502 */
3503IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3504{
3505 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3506 pSReg->Sel = uSel;
3507 pSReg->ValidSel = uSel;
3508 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3509 pSReg->u64Base = uSel << 4;
3510 pSReg->u32Limit = 0xffff;
3511 pSReg->Attr.u = 0xf3;
3512}
3513
3514
3515/**
3516 * Loads a NULL data selector into a selector register, both the hidden and
3517 * visible parts, in protected mode.
3518 *
3519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3520 * @param pSReg Pointer to the segment register.
3521 * @param uRpl The RPL.
3522 */
3523IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3524{
3525 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3526 * data selector in protected mode. */
3527 pSReg->Sel = uRpl;
3528 pSReg->ValidSel = uRpl;
3529 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3530 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3531 {
3532 /* VT-x (Intel 3960x) observed doing something like this. */
3533 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3534 pSReg->u32Limit = UINT32_MAX;
3535 pSReg->u64Base = 0;
3536 }
3537 else
3538 {
3539 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3540 pSReg->u32Limit = 0;
3541 pSReg->u64Base = 0;
3542 }
3543}
3544
3545
3546/**
3547 * Loads a segment selector during a task switch in protected mode.
3548 *
3549 * In this task switch scenario, we would throw \#TS exceptions rather than
3550 * \#GPs.
3551 *
3552 * @returns VBox strict status code.
3553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3554 * @param pSReg Pointer to the segment register.
3555 * @param uSel The new selector value.
3556 *
3557 * @remarks This does _not_ handle CS or SS.
3558 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3559 */
3560IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3561{
3562 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3563
3564 /* Null data selector. */
3565 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3566 {
3567 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3568 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3569 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3570 return VINF_SUCCESS;
3571 }
3572
3573 /* Fetch the descriptor. */
3574 IEMSELDESC Desc;
3575 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3576 if (rcStrict != VINF_SUCCESS)
3577 {
3578 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3579 VBOXSTRICTRC_VAL(rcStrict)));
3580 return rcStrict;
3581 }
3582
3583 /* Must be a data segment or readable code segment. */
3584 if ( !Desc.Legacy.Gen.u1DescType
3585 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3586 {
3587 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3588 Desc.Legacy.Gen.u4Type));
3589 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3590 }
3591
3592 /* Check privileges for data segments and non-conforming code segments. */
3593 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3594 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3595 {
3596 /* The RPL and the new CPL must be less than or equal to the DPL. */
3597 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3598 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3599 {
3600 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3601 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3602 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3603 }
3604 }
3605
3606 /* Is it there? */
3607 if (!Desc.Legacy.Gen.u1Present)
3608 {
3609 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3610 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3611 }
3612
3613 /* The base and limit. */
3614 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3615 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3616
3617 /*
3618 * Ok, everything checked out fine. Now set the accessed bit before
3619 * committing the result into the registers.
3620 */
3621 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3622 {
3623 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3624 if (rcStrict != VINF_SUCCESS)
3625 return rcStrict;
3626 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3627 }
3628
3629 /* Commit */
3630 pSReg->Sel = uSel;
3631 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3632 pSReg->u32Limit = cbLimit;
3633 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3634 pSReg->ValidSel = uSel;
3635 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3636 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3637 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3638
3639 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3640 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3641 return VINF_SUCCESS;
3642}
3643
3644
3645/**
3646 * Performs a task switch.
3647 *
3648 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3649 * caller is responsible for performing the necessary checks (like DPL, TSS
3650 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3651 * reference for JMP, CALL, IRET.
3652 *
3653 * If the task switch is the due to a software interrupt or hardware exception,
3654 * the caller is responsible for validating the TSS selector and descriptor. See
3655 * Intel Instruction reference for INT n.
3656 *
3657 * @returns VBox strict status code.
3658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3659 * @param pCtx The CPU context.
3660 * @param enmTaskSwitch What caused this task switch.
3661 * @param uNextEip The EIP effective after the task switch.
3662 * @param fFlags The flags.
3663 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3664 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3665 * @param SelTSS The TSS selector of the new task.
3666 * @param pNewDescTSS Pointer to the new TSS descriptor.
3667 */
3668IEM_STATIC VBOXSTRICTRC
3669iemTaskSwitch(PVMCPU pVCpu,
3670 PCPUMCTX pCtx,
3671 IEMTASKSWITCH enmTaskSwitch,
3672 uint32_t uNextEip,
3673 uint32_t fFlags,
3674 uint16_t uErr,
3675 uint64_t uCr2,
3676 RTSEL SelTSS,
3677 PIEMSELDESC pNewDescTSS)
3678{
3679 Assert(!IEM_IS_REAL_MODE(pVCpu));
3680 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3681
3682 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3683 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3684 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3685 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3686 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3687
3688 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3689 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3690
3691 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3692 fIsNewTSS386, pCtx->eip, uNextEip));
3693
3694 /* Update CR2 in case it's a page-fault. */
3695 /** @todo This should probably be done much earlier in IEM/PGM. See
3696 * @bugref{5653#c49}. */
3697 if (fFlags & IEM_XCPT_FLAGS_CR2)
3698 pCtx->cr2 = uCr2;
3699
3700 /*
3701 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3702 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3703 */
3704 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3705 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3706 if (uNewTSSLimit < uNewTSSLimitMin)
3707 {
3708 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3709 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3710 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3711 }
3712
3713 /*
3714 * Check the current TSS limit. The last written byte to the current TSS during the
3715 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3716 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3717 *
3718 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3719 * end up with smaller than "legal" TSS limits.
3720 */
3721 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3722 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3723 if (uCurTSSLimit < uCurTSSLimitMin)
3724 {
3725 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3726 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3727 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3728 }
3729
3730 /*
3731 * Verify that the new TSS can be accessed and map it. Map only the required contents
3732 * and not the entire TSS.
3733 */
3734 void *pvNewTSS;
3735 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3736 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3737 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3738 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3739 * not perform correct translation if this happens. See Intel spec. 7.2.1
3740 * "Task-State Segment" */
3741 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3742 if (rcStrict != VINF_SUCCESS)
3743 {
3744 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3745 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3746 return rcStrict;
3747 }
3748
3749 /*
3750 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3751 */
3752 uint32_t u32EFlags = pCtx->eflags.u32;
3753 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3754 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3755 {
3756 PX86DESC pDescCurTSS;
3757 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3758 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3759 if (rcStrict != VINF_SUCCESS)
3760 {
3761 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3762 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3763 return rcStrict;
3764 }
3765
3766 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3767 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3768 if (rcStrict != VINF_SUCCESS)
3769 {
3770 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3771 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3772 return rcStrict;
3773 }
3774
3775 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3776 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3777 {
3778 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3779 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3780 u32EFlags &= ~X86_EFL_NT;
3781 }
3782 }
3783
3784 /*
3785 * Save the CPU state into the current TSS.
3786 */
3787 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3788 if (GCPtrNewTSS == GCPtrCurTSS)
3789 {
3790 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3791 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3792 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3793 }
3794 if (fIsNewTSS386)
3795 {
3796 /*
3797 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3798 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3799 */
3800 void *pvCurTSS32;
3801 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3802 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3803 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3804 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3805 if (rcStrict != VINF_SUCCESS)
3806 {
3807 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3808 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3809 return rcStrict;
3810 }
3811
3812 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3813 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3814 pCurTSS32->eip = uNextEip;
3815 pCurTSS32->eflags = u32EFlags;
3816 pCurTSS32->eax = pCtx->eax;
3817 pCurTSS32->ecx = pCtx->ecx;
3818 pCurTSS32->edx = pCtx->edx;
3819 pCurTSS32->ebx = pCtx->ebx;
3820 pCurTSS32->esp = pCtx->esp;
3821 pCurTSS32->ebp = pCtx->ebp;
3822 pCurTSS32->esi = pCtx->esi;
3823 pCurTSS32->edi = pCtx->edi;
3824 pCurTSS32->es = pCtx->es.Sel;
3825 pCurTSS32->cs = pCtx->cs.Sel;
3826 pCurTSS32->ss = pCtx->ss.Sel;
3827 pCurTSS32->ds = pCtx->ds.Sel;
3828 pCurTSS32->fs = pCtx->fs.Sel;
3829 pCurTSS32->gs = pCtx->gs.Sel;
3830
3831 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3832 if (rcStrict != VINF_SUCCESS)
3833 {
3834 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3835 VBOXSTRICTRC_VAL(rcStrict)));
3836 return rcStrict;
3837 }
3838 }
3839 else
3840 {
3841 /*
3842 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3843 */
3844 void *pvCurTSS16;
3845 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3846 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3847 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3848 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3849 if (rcStrict != VINF_SUCCESS)
3850 {
3851 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3852 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3853 return rcStrict;
3854 }
3855
3856 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3857 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3858 pCurTSS16->ip = uNextEip;
3859 pCurTSS16->flags = u32EFlags;
3860 pCurTSS16->ax = pCtx->ax;
3861 pCurTSS16->cx = pCtx->cx;
3862 pCurTSS16->dx = pCtx->dx;
3863 pCurTSS16->bx = pCtx->bx;
3864 pCurTSS16->sp = pCtx->sp;
3865 pCurTSS16->bp = pCtx->bp;
3866 pCurTSS16->si = pCtx->si;
3867 pCurTSS16->di = pCtx->di;
3868 pCurTSS16->es = pCtx->es.Sel;
3869 pCurTSS16->cs = pCtx->cs.Sel;
3870 pCurTSS16->ss = pCtx->ss.Sel;
3871 pCurTSS16->ds = pCtx->ds.Sel;
3872
3873 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3874 if (rcStrict != VINF_SUCCESS)
3875 {
3876 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3877 VBOXSTRICTRC_VAL(rcStrict)));
3878 return rcStrict;
3879 }
3880 }
3881
3882 /*
3883 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3884 */
3885 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3886 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3887 {
3888 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3889 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3890 pNewTSS->selPrev = pCtx->tr.Sel;
3891 }
3892
3893 /*
3894 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3895 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3896 */
3897 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3898 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3899 bool fNewDebugTrap;
3900 if (fIsNewTSS386)
3901 {
3902 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3903 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3904 uNewEip = pNewTSS32->eip;
3905 uNewEflags = pNewTSS32->eflags;
3906 uNewEax = pNewTSS32->eax;
3907 uNewEcx = pNewTSS32->ecx;
3908 uNewEdx = pNewTSS32->edx;
3909 uNewEbx = pNewTSS32->ebx;
3910 uNewEsp = pNewTSS32->esp;
3911 uNewEbp = pNewTSS32->ebp;
3912 uNewEsi = pNewTSS32->esi;
3913 uNewEdi = pNewTSS32->edi;
3914 uNewES = pNewTSS32->es;
3915 uNewCS = pNewTSS32->cs;
3916 uNewSS = pNewTSS32->ss;
3917 uNewDS = pNewTSS32->ds;
3918 uNewFS = pNewTSS32->fs;
3919 uNewGS = pNewTSS32->gs;
3920 uNewLdt = pNewTSS32->selLdt;
3921 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3922 }
3923 else
3924 {
3925 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3926 uNewCr3 = 0;
3927 uNewEip = pNewTSS16->ip;
3928 uNewEflags = pNewTSS16->flags;
3929 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3930 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3931 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3932 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3933 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3934 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3935 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3936 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3937 uNewES = pNewTSS16->es;
3938 uNewCS = pNewTSS16->cs;
3939 uNewSS = pNewTSS16->ss;
3940 uNewDS = pNewTSS16->ds;
3941 uNewFS = 0;
3942 uNewGS = 0;
3943 uNewLdt = pNewTSS16->selLdt;
3944 fNewDebugTrap = false;
3945 }
3946
3947 if (GCPtrNewTSS == GCPtrCurTSS)
3948 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3949 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3950
3951 /*
3952 * We're done accessing the new TSS.
3953 */
3954 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3955 if (rcStrict != VINF_SUCCESS)
3956 {
3957 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3958 return rcStrict;
3959 }
3960
3961 /*
3962 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3963 */
3964 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3965 {
3966 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3967 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3968 if (rcStrict != VINF_SUCCESS)
3969 {
3970 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3971 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3972 return rcStrict;
3973 }
3974
3975 /* Check that the descriptor indicates the new TSS is available (not busy). */
3976 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3977 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3978 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3979
3980 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3981 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3982 if (rcStrict != VINF_SUCCESS)
3983 {
3984 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3985 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3986 return rcStrict;
3987 }
3988 }
3989
3990 /*
3991 * From this point on, we're technically in the new task. We will defer exceptions
3992 * until the completion of the task switch but before executing any instructions in the new task.
3993 */
3994 pCtx->tr.Sel = SelTSS;
3995 pCtx->tr.ValidSel = SelTSS;
3996 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3997 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3998 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3999 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4000 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4001
4002 /* Set the busy bit in TR. */
4003 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4004 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4005 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4006 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4007 {
4008 uNewEflags |= X86_EFL_NT;
4009 }
4010
4011 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4012 pCtx->cr0 |= X86_CR0_TS;
4013 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4014
4015 pCtx->eip = uNewEip;
4016 pCtx->eax = uNewEax;
4017 pCtx->ecx = uNewEcx;
4018 pCtx->edx = uNewEdx;
4019 pCtx->ebx = uNewEbx;
4020 pCtx->esp = uNewEsp;
4021 pCtx->ebp = uNewEbp;
4022 pCtx->esi = uNewEsi;
4023 pCtx->edi = uNewEdi;
4024
4025 uNewEflags &= X86_EFL_LIVE_MASK;
4026 uNewEflags |= X86_EFL_RA1_MASK;
4027 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4028
4029 /*
4030 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4031 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4032 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4033 */
4034 pCtx->es.Sel = uNewES;
4035 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4036
4037 pCtx->cs.Sel = uNewCS;
4038 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4039
4040 pCtx->ss.Sel = uNewSS;
4041 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4042
4043 pCtx->ds.Sel = uNewDS;
4044 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4045
4046 pCtx->fs.Sel = uNewFS;
4047 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4048
4049 pCtx->gs.Sel = uNewGS;
4050 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4051 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4052
4053 pCtx->ldtr.Sel = uNewLdt;
4054 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4055 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4056 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4057
4058 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4059 {
4060 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4061 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4062 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4063 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4064 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4065 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4066 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4067 }
4068
4069 /*
4070 * Switch CR3 for the new task.
4071 */
4072 if ( fIsNewTSS386
4073 && (pCtx->cr0 & X86_CR0_PG))
4074 {
4075 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4076 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4077 {
4078 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4079 AssertRCSuccessReturn(rc, rc);
4080 }
4081 else
4082 pCtx->cr3 = uNewCr3;
4083
4084 /* Inform PGM. */
4085 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4086 {
4087 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4088 AssertRCReturn(rc, rc);
4089 /* ignore informational status codes */
4090 }
4091 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4092 }
4093
4094 /*
4095 * Switch LDTR for the new task.
4096 */
4097 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4098 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4099 else
4100 {
4101 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4102
4103 IEMSELDESC DescNewLdt;
4104 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4105 if (rcStrict != VINF_SUCCESS)
4106 {
4107 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4108 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4109 return rcStrict;
4110 }
4111 if ( !DescNewLdt.Legacy.Gen.u1Present
4112 || DescNewLdt.Legacy.Gen.u1DescType
4113 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4114 {
4115 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4116 uNewLdt, DescNewLdt.Legacy.u));
4117 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4118 }
4119
4120 pCtx->ldtr.ValidSel = uNewLdt;
4121 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4122 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4123 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4124 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4125 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4126 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4128 }
4129
4130 IEMSELDESC DescSS;
4131 if (IEM_IS_V86_MODE(pVCpu))
4132 {
4133 pVCpu->iem.s.uCpl = 3;
4134 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4135 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4136 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4137 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4138 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4139 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4140
4141 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4142 DescSS.Legacy.u = 0;
4143 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4144 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4145 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4146 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4147 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4148 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4149 DescSS.Legacy.Gen.u2Dpl = 3;
4150 }
4151 else
4152 {
4153 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4154
4155 /*
4156 * Load the stack segment for the new task.
4157 */
4158 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4159 {
4160 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4161 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4162 }
4163
4164 /* Fetch the descriptor. */
4165 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4166 if (rcStrict != VINF_SUCCESS)
4167 {
4168 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4169 VBOXSTRICTRC_VAL(rcStrict)));
4170 return rcStrict;
4171 }
4172
4173 /* SS must be a data segment and writable. */
4174 if ( !DescSS.Legacy.Gen.u1DescType
4175 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4176 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4177 {
4178 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4179 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4180 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4181 }
4182
4183 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4184 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4185 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4186 {
4187 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4188 uNewCpl));
4189 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4190 }
4191
4192 /* Is it there? */
4193 if (!DescSS.Legacy.Gen.u1Present)
4194 {
4195 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4196 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4197 }
4198
4199 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4200 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4201
4202 /* Set the accessed bit before committing the result into SS. */
4203 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4204 {
4205 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4206 if (rcStrict != VINF_SUCCESS)
4207 return rcStrict;
4208 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4209 }
4210
4211 /* Commit SS. */
4212 pCtx->ss.Sel = uNewSS;
4213 pCtx->ss.ValidSel = uNewSS;
4214 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4215 pCtx->ss.u32Limit = cbLimit;
4216 pCtx->ss.u64Base = u64Base;
4217 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4219
4220 /* CPL has changed, update IEM before loading rest of segments. */
4221 pVCpu->iem.s.uCpl = uNewCpl;
4222
4223 /*
4224 * Load the data segments for the new task.
4225 */
4226 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4227 if (rcStrict != VINF_SUCCESS)
4228 return rcStrict;
4229 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4230 if (rcStrict != VINF_SUCCESS)
4231 return rcStrict;
4232 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4233 if (rcStrict != VINF_SUCCESS)
4234 return rcStrict;
4235 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4236 if (rcStrict != VINF_SUCCESS)
4237 return rcStrict;
4238
4239 /*
4240 * Load the code segment for the new task.
4241 */
4242 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4243 {
4244 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4245 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4246 }
4247
4248 /* Fetch the descriptor. */
4249 IEMSELDESC DescCS;
4250 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4251 if (rcStrict != VINF_SUCCESS)
4252 {
4253 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4254 return rcStrict;
4255 }
4256
4257 /* CS must be a code segment. */
4258 if ( !DescCS.Legacy.Gen.u1DescType
4259 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4260 {
4261 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4262 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4263 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4264 }
4265
4266 /* For conforming CS, DPL must be less than or equal to the RPL. */
4267 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4268 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4269 {
4270 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4271 DescCS.Legacy.Gen.u2Dpl));
4272 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4273 }
4274
4275 /* For non-conforming CS, DPL must match RPL. */
4276 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4277 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4278 {
4279 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4280 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4281 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4282 }
4283
4284 /* Is it there? */
4285 if (!DescCS.Legacy.Gen.u1Present)
4286 {
4287 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4288 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4289 }
4290
4291 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4292 u64Base = X86DESC_BASE(&DescCS.Legacy);
4293
4294 /* Set the accessed bit before committing the result into CS. */
4295 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4296 {
4297 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4298 if (rcStrict != VINF_SUCCESS)
4299 return rcStrict;
4300 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4301 }
4302
4303 /* Commit CS. */
4304 pCtx->cs.Sel = uNewCS;
4305 pCtx->cs.ValidSel = uNewCS;
4306 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4307 pCtx->cs.u32Limit = cbLimit;
4308 pCtx->cs.u64Base = u64Base;
4309 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4310 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4311 }
4312
4313 /** @todo Debug trap. */
4314 if (fIsNewTSS386 && fNewDebugTrap)
4315 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4316
4317 /*
4318 * Construct the error code masks based on what caused this task switch.
4319 * See Intel Instruction reference for INT.
4320 */
4321 uint16_t uExt;
4322 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4323 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4324 {
4325 uExt = 1;
4326 }
4327 else
4328 uExt = 0;
4329
4330 /*
4331 * Push any error code on to the new stack.
4332 */
4333 if (fFlags & IEM_XCPT_FLAGS_ERR)
4334 {
4335 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4336 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4337 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4338
4339 /* Check that there is sufficient space on the stack. */
4340 /** @todo Factor out segment limit checking for normal/expand down segments
4341 * into a separate function. */
4342 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4343 {
4344 if ( pCtx->esp - 1 > cbLimitSS
4345 || pCtx->esp < cbStackFrame)
4346 {
4347 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4348 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4349 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4350 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4351 }
4352 }
4353 else
4354 {
4355 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4356 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4357 {
4358 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4359 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4360 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4361 }
4362 }
4363
4364
4365 if (fIsNewTSS386)
4366 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4367 else
4368 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4369 if (rcStrict != VINF_SUCCESS)
4370 {
4371 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4372 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4373 return rcStrict;
4374 }
4375 }
4376
4377 /* Check the new EIP against the new CS limit. */
4378 if (pCtx->eip > pCtx->cs.u32Limit)
4379 {
4380 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4381 pCtx->eip, pCtx->cs.u32Limit));
4382 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4383 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4384 }
4385
4386 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4387 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4388}
4389
4390
4391/**
4392 * Implements exceptions and interrupts for protected mode.
4393 *
4394 * @returns VBox strict status code.
4395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4396 * @param pCtx The CPU context.
4397 * @param cbInstr The number of bytes to offset rIP by in the return
4398 * address.
4399 * @param u8Vector The interrupt / exception vector number.
4400 * @param fFlags The flags.
4401 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4402 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4403 */
4404IEM_STATIC VBOXSTRICTRC
4405iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4406 PCPUMCTX pCtx,
4407 uint8_t cbInstr,
4408 uint8_t u8Vector,
4409 uint32_t fFlags,
4410 uint16_t uErr,
4411 uint64_t uCr2)
4412{
4413 /*
4414 * Read the IDT entry.
4415 */
4416 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4417 {
4418 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4419 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4420 }
4421 X86DESC Idte;
4422 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4423 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4424 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4425 return rcStrict;
4426 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4427 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4428 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4429
4430 /*
4431 * Check the descriptor type, DPL and such.
4432 * ASSUMES this is done in the same order as described for call-gate calls.
4433 */
4434 if (Idte.Gate.u1DescType)
4435 {
4436 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4437 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4438 }
4439 bool fTaskGate = false;
4440 uint8_t f32BitGate = true;
4441 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4442 switch (Idte.Gate.u4Type)
4443 {
4444 case X86_SEL_TYPE_SYS_UNDEFINED:
4445 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4446 case X86_SEL_TYPE_SYS_LDT:
4447 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4448 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4449 case X86_SEL_TYPE_SYS_UNDEFINED2:
4450 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4451 case X86_SEL_TYPE_SYS_UNDEFINED3:
4452 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4453 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4454 case X86_SEL_TYPE_SYS_UNDEFINED4:
4455 {
4456 /** @todo check what actually happens when the type is wrong...
4457 * esp. call gates. */
4458 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4459 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4460 }
4461
4462 case X86_SEL_TYPE_SYS_286_INT_GATE:
4463 f32BitGate = false;
4464 /* fall thru */
4465 case X86_SEL_TYPE_SYS_386_INT_GATE:
4466 fEflToClear |= X86_EFL_IF;
4467 break;
4468
4469 case X86_SEL_TYPE_SYS_TASK_GATE:
4470 fTaskGate = true;
4471#ifndef IEM_IMPLEMENTS_TASKSWITCH
4472 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4473#endif
4474 break;
4475
4476 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4477 f32BitGate = false;
4478 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4479 break;
4480
4481 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4482 }
4483
4484 /* Check DPL against CPL if applicable. */
4485 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4486 {
4487 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4488 {
4489 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4490 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4491 }
4492 }
4493
4494 /* Is it there? */
4495 if (!Idte.Gate.u1Present)
4496 {
4497 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4498 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4499 }
4500
4501 /* Is it a task-gate? */
4502 if (fTaskGate)
4503 {
4504 /*
4505 * Construct the error code masks based on what caused this task switch.
4506 * See Intel Instruction reference for INT.
4507 */
4508 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4509 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4510 RTSEL SelTSS = Idte.Gate.u16Sel;
4511
4512 /*
4513 * Fetch the TSS descriptor in the GDT.
4514 */
4515 IEMSELDESC DescTSS;
4516 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4517 if (rcStrict != VINF_SUCCESS)
4518 {
4519 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4520 VBOXSTRICTRC_VAL(rcStrict)));
4521 return rcStrict;
4522 }
4523
4524 /* The TSS descriptor must be a system segment and be available (not busy). */
4525 if ( DescTSS.Legacy.Gen.u1DescType
4526 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4527 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4528 {
4529 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4530 u8Vector, SelTSS, DescTSS.Legacy.au64));
4531 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4532 }
4533
4534 /* The TSS must be present. */
4535 if (!DescTSS.Legacy.Gen.u1Present)
4536 {
4537 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4538 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4539 }
4540
4541 /* Do the actual task switch. */
4542 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4543 }
4544
4545 /* A null CS is bad. */
4546 RTSEL NewCS = Idte.Gate.u16Sel;
4547 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4548 {
4549 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4550 return iemRaiseGeneralProtectionFault0(pVCpu);
4551 }
4552
4553 /* Fetch the descriptor for the new CS. */
4554 IEMSELDESC DescCS;
4555 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4556 if (rcStrict != VINF_SUCCESS)
4557 {
4558 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4559 return rcStrict;
4560 }
4561
4562 /* Must be a code segment. */
4563 if (!DescCS.Legacy.Gen.u1DescType)
4564 {
4565 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4566 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4567 }
4568 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4569 {
4570 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4571 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4572 }
4573
4574 /* Don't allow lowering the privilege level. */
4575 /** @todo Does the lowering of privileges apply to software interrupts
4576 * only? This has bearings on the more-privileged or
4577 * same-privilege stack behavior further down. A testcase would
4578 * be nice. */
4579 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4580 {
4581 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4582 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4583 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4584 }
4585
4586 /* Make sure the selector is present. */
4587 if (!DescCS.Legacy.Gen.u1Present)
4588 {
4589 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4590 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4591 }
4592
4593 /* Check the new EIP against the new CS limit. */
4594 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4595 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4596 ? Idte.Gate.u16OffsetLow
4597 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4598 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4599 if (uNewEip > cbLimitCS)
4600 {
4601 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4602 u8Vector, uNewEip, cbLimitCS, NewCS));
4603 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4604 }
4605 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4606
4607 /* Calc the flag image to push. */
4608 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4609 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4610 fEfl &= ~X86_EFL_RF;
4611 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4612 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4613
4614 /* From V8086 mode only go to CPL 0. */
4615 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4616 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4617 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4618 {
4619 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4620 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4621 }
4622
4623 /*
4624 * If the privilege level changes, we need to get a new stack from the TSS.
4625 * This in turns means validating the new SS and ESP...
4626 */
4627 if (uNewCpl != pVCpu->iem.s.uCpl)
4628 {
4629 RTSEL NewSS;
4630 uint32_t uNewEsp;
4631 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4632 if (rcStrict != VINF_SUCCESS)
4633 return rcStrict;
4634
4635 IEMSELDESC DescSS;
4636 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4637 if (rcStrict != VINF_SUCCESS)
4638 return rcStrict;
4639 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4640 if (!DescSS.Legacy.Gen.u1DefBig)
4641 {
4642 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4643 uNewEsp = (uint16_t)uNewEsp;
4644 }
4645
4646 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4647
4648 /* Check that there is sufficient space for the stack frame. */
4649 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4650 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4651 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4652 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4653
4654 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4655 {
4656 if ( uNewEsp - 1 > cbLimitSS
4657 || uNewEsp < cbStackFrame)
4658 {
4659 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4660 u8Vector, NewSS, uNewEsp, cbStackFrame));
4661 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4662 }
4663 }
4664 else
4665 {
4666 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4667 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4668 {
4669 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4670 u8Vector, NewSS, uNewEsp, cbStackFrame));
4671 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4672 }
4673 }
4674
4675 /*
4676 * Start making changes.
4677 */
4678
4679 /* Set the new CPL so that stack accesses use it. */
4680 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4681 pVCpu->iem.s.uCpl = uNewCpl;
4682
4683 /* Create the stack frame. */
4684 RTPTRUNION uStackFrame;
4685 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4686 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4687 if (rcStrict != VINF_SUCCESS)
4688 return rcStrict;
4689 void * const pvStackFrame = uStackFrame.pv;
4690 if (f32BitGate)
4691 {
4692 if (fFlags & IEM_XCPT_FLAGS_ERR)
4693 *uStackFrame.pu32++ = uErr;
4694 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4695 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4696 uStackFrame.pu32[2] = fEfl;
4697 uStackFrame.pu32[3] = pCtx->esp;
4698 uStackFrame.pu32[4] = pCtx->ss.Sel;
4699 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4700 if (fEfl & X86_EFL_VM)
4701 {
4702 uStackFrame.pu32[1] = pCtx->cs.Sel;
4703 uStackFrame.pu32[5] = pCtx->es.Sel;
4704 uStackFrame.pu32[6] = pCtx->ds.Sel;
4705 uStackFrame.pu32[7] = pCtx->fs.Sel;
4706 uStackFrame.pu32[8] = pCtx->gs.Sel;
4707 }
4708 }
4709 else
4710 {
4711 if (fFlags & IEM_XCPT_FLAGS_ERR)
4712 *uStackFrame.pu16++ = uErr;
4713 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4714 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4715 uStackFrame.pu16[2] = fEfl;
4716 uStackFrame.pu16[3] = pCtx->sp;
4717 uStackFrame.pu16[4] = pCtx->ss.Sel;
4718 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4719 if (fEfl & X86_EFL_VM)
4720 {
4721 uStackFrame.pu16[1] = pCtx->cs.Sel;
4722 uStackFrame.pu16[5] = pCtx->es.Sel;
4723 uStackFrame.pu16[6] = pCtx->ds.Sel;
4724 uStackFrame.pu16[7] = pCtx->fs.Sel;
4725 uStackFrame.pu16[8] = pCtx->gs.Sel;
4726 }
4727 }
4728 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4729 if (rcStrict != VINF_SUCCESS)
4730 return rcStrict;
4731
4732 /* Mark the selectors 'accessed' (hope this is the correct time). */
4733 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4734 * after pushing the stack frame? (Write protect the gdt + stack to
4735 * find out.) */
4736 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4737 {
4738 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4739 if (rcStrict != VINF_SUCCESS)
4740 return rcStrict;
4741 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4742 }
4743
4744 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4745 {
4746 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4747 if (rcStrict != VINF_SUCCESS)
4748 return rcStrict;
4749 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4750 }
4751
4752 /*
4753 * Start comitting the register changes (joins with the DPL=CPL branch).
4754 */
4755 pCtx->ss.Sel = NewSS;
4756 pCtx->ss.ValidSel = NewSS;
4757 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4758 pCtx->ss.u32Limit = cbLimitSS;
4759 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4760 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4761 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4762 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4763 * SP is loaded).
4764 * Need to check the other combinations too:
4765 * - 16-bit TSS, 32-bit handler
4766 * - 32-bit TSS, 16-bit handler */
4767 if (!pCtx->ss.Attr.n.u1DefBig)
4768 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4769 else
4770 pCtx->rsp = uNewEsp - cbStackFrame;
4771
4772 if (fEfl & X86_EFL_VM)
4773 {
4774 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4775 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4776 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4777 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4778 }
4779 }
4780 /*
4781 * Same privilege, no stack change and smaller stack frame.
4782 */
4783 else
4784 {
4785 uint64_t uNewRsp;
4786 RTPTRUNION uStackFrame;
4787 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4788 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4789 if (rcStrict != VINF_SUCCESS)
4790 return rcStrict;
4791 void * const pvStackFrame = uStackFrame.pv;
4792
4793 if (f32BitGate)
4794 {
4795 if (fFlags & IEM_XCPT_FLAGS_ERR)
4796 *uStackFrame.pu32++ = uErr;
4797 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4798 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4799 uStackFrame.pu32[2] = fEfl;
4800 }
4801 else
4802 {
4803 if (fFlags & IEM_XCPT_FLAGS_ERR)
4804 *uStackFrame.pu16++ = uErr;
4805 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4806 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4807 uStackFrame.pu16[2] = fEfl;
4808 }
4809 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4810 if (rcStrict != VINF_SUCCESS)
4811 return rcStrict;
4812
4813 /* Mark the CS selector as 'accessed'. */
4814 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4815 {
4816 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4817 if (rcStrict != VINF_SUCCESS)
4818 return rcStrict;
4819 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4820 }
4821
4822 /*
4823 * Start committing the register changes (joins with the other branch).
4824 */
4825 pCtx->rsp = uNewRsp;
4826 }
4827
4828 /* ... register committing continues. */
4829 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4830 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4831 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4832 pCtx->cs.u32Limit = cbLimitCS;
4833 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4834 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4835
4836 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4837 fEfl &= ~fEflToClear;
4838 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4839
4840 if (fFlags & IEM_XCPT_FLAGS_CR2)
4841 pCtx->cr2 = uCr2;
4842
4843 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4844 iemRaiseXcptAdjustState(pCtx, u8Vector);
4845
4846 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4847}
4848
4849
4850/**
4851 * Implements exceptions and interrupts for long mode.
4852 *
4853 * @returns VBox strict status code.
4854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4855 * @param pCtx The CPU context.
4856 * @param cbInstr The number of bytes to offset rIP by in the return
4857 * address.
4858 * @param u8Vector The interrupt / exception vector number.
4859 * @param fFlags The flags.
4860 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4861 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4862 */
4863IEM_STATIC VBOXSTRICTRC
4864iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4865 PCPUMCTX pCtx,
4866 uint8_t cbInstr,
4867 uint8_t u8Vector,
4868 uint32_t fFlags,
4869 uint16_t uErr,
4870 uint64_t uCr2)
4871{
4872 /*
4873 * Read the IDT entry.
4874 */
4875 uint16_t offIdt = (uint16_t)u8Vector << 4;
4876 if (pCtx->idtr.cbIdt < offIdt + 7)
4877 {
4878 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4879 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4880 }
4881 X86DESC64 Idte;
4882 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4883 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4884 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4885 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4886 return rcStrict;
4887 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4888 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4889 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4890
4891 /*
4892 * Check the descriptor type, DPL and such.
4893 * ASSUMES this is done in the same order as described for call-gate calls.
4894 */
4895 if (Idte.Gate.u1DescType)
4896 {
4897 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4898 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4899 }
4900 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4901 switch (Idte.Gate.u4Type)
4902 {
4903 case AMD64_SEL_TYPE_SYS_INT_GATE:
4904 fEflToClear |= X86_EFL_IF;
4905 break;
4906 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4907 break;
4908
4909 default:
4910 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4911 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4912 }
4913
4914 /* Check DPL against CPL if applicable. */
4915 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4916 {
4917 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4918 {
4919 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4920 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4921 }
4922 }
4923
4924 /* Is it there? */
4925 if (!Idte.Gate.u1Present)
4926 {
4927 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4928 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4929 }
4930
4931 /* A null CS is bad. */
4932 RTSEL NewCS = Idte.Gate.u16Sel;
4933 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4934 {
4935 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4936 return iemRaiseGeneralProtectionFault0(pVCpu);
4937 }
4938
4939 /* Fetch the descriptor for the new CS. */
4940 IEMSELDESC DescCS;
4941 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4942 if (rcStrict != VINF_SUCCESS)
4943 {
4944 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4945 return rcStrict;
4946 }
4947
4948 /* Must be a 64-bit code segment. */
4949 if (!DescCS.Long.Gen.u1DescType)
4950 {
4951 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4952 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4953 }
4954 if ( !DescCS.Long.Gen.u1Long
4955 || DescCS.Long.Gen.u1DefBig
4956 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4957 {
4958 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4959 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4960 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4961 }
4962
4963 /* Don't allow lowering the privilege level. For non-conforming CS
4964 selectors, the CS.DPL sets the privilege level the trap/interrupt
4965 handler runs at. For conforming CS selectors, the CPL remains
4966 unchanged, but the CS.DPL must be <= CPL. */
4967 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4968 * when CPU in Ring-0. Result \#GP? */
4969 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4970 {
4971 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4972 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4973 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4974 }
4975
4976
4977 /* Make sure the selector is present. */
4978 if (!DescCS.Legacy.Gen.u1Present)
4979 {
4980 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4981 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4982 }
4983
4984 /* Check that the new RIP is canonical. */
4985 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4986 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4987 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4988 if (!IEM_IS_CANONICAL(uNewRip))
4989 {
4990 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4991 return iemRaiseGeneralProtectionFault0(pVCpu);
4992 }
4993
4994 /*
4995 * If the privilege level changes or if the IST isn't zero, we need to get
4996 * a new stack from the TSS.
4997 */
4998 uint64_t uNewRsp;
4999 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5000 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5001 if ( uNewCpl != pVCpu->iem.s.uCpl
5002 || Idte.Gate.u3IST != 0)
5003 {
5004 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5005 if (rcStrict != VINF_SUCCESS)
5006 return rcStrict;
5007 }
5008 else
5009 uNewRsp = pCtx->rsp;
5010 uNewRsp &= ~(uint64_t)0xf;
5011
5012 /*
5013 * Calc the flag image to push.
5014 */
5015 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5016 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5017 fEfl &= ~X86_EFL_RF;
5018 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5019 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5020
5021 /*
5022 * Start making changes.
5023 */
5024 /* Set the new CPL so that stack accesses use it. */
5025 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5026 pVCpu->iem.s.uCpl = uNewCpl;
5027
5028 /* Create the stack frame. */
5029 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5030 RTPTRUNION uStackFrame;
5031 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5032 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5033 if (rcStrict != VINF_SUCCESS)
5034 return rcStrict;
5035 void * const pvStackFrame = uStackFrame.pv;
5036
5037 if (fFlags & IEM_XCPT_FLAGS_ERR)
5038 *uStackFrame.pu64++ = uErr;
5039 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5040 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5041 uStackFrame.pu64[2] = fEfl;
5042 uStackFrame.pu64[3] = pCtx->rsp;
5043 uStackFrame.pu64[4] = pCtx->ss.Sel;
5044 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5045 if (rcStrict != VINF_SUCCESS)
5046 return rcStrict;
5047
5048 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5049 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5050 * after pushing the stack frame? (Write protect the gdt + stack to
5051 * find out.) */
5052 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5053 {
5054 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5055 if (rcStrict != VINF_SUCCESS)
5056 return rcStrict;
5057 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5058 }
5059
5060 /*
5061 * Start comitting the register changes.
5062 */
5063 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5064 * hidden registers when interrupting 32-bit or 16-bit code! */
5065 if (uNewCpl != uOldCpl)
5066 {
5067 pCtx->ss.Sel = 0 | uNewCpl;
5068 pCtx->ss.ValidSel = 0 | uNewCpl;
5069 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5070 pCtx->ss.u32Limit = UINT32_MAX;
5071 pCtx->ss.u64Base = 0;
5072 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5073 }
5074 pCtx->rsp = uNewRsp - cbStackFrame;
5075 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5076 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5077 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5078 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5079 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5080 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5081 pCtx->rip = uNewRip;
5082
5083 fEfl &= ~fEflToClear;
5084 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5085
5086 if (fFlags & IEM_XCPT_FLAGS_CR2)
5087 pCtx->cr2 = uCr2;
5088
5089 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5090 iemRaiseXcptAdjustState(pCtx, u8Vector);
5091
5092 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5093}
5094
5095
5096/**
5097 * Implements exceptions and interrupts.
5098 *
5099 * All exceptions and interrupts goes thru this function!
5100 *
5101 * @returns VBox strict status code.
5102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5103 * @param cbInstr The number of bytes to offset rIP by in the return
5104 * address.
5105 * @param u8Vector The interrupt / exception vector number.
5106 * @param fFlags The flags.
5107 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5108 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5109 */
5110DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5111iemRaiseXcptOrInt(PVMCPU pVCpu,
5112 uint8_t cbInstr,
5113 uint8_t u8Vector,
5114 uint32_t fFlags,
5115 uint16_t uErr,
5116 uint64_t uCr2)
5117{
5118 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5119#ifdef IN_RING0
5120 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5121 AssertRCReturn(rc, rc);
5122#endif
5123
5124#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5125 /*
5126 * Flush prefetch buffer
5127 */
5128 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5129#endif
5130
5131 /*
5132 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5133 */
5134 if ( pCtx->eflags.Bits.u1VM
5135 && pCtx->eflags.Bits.u2IOPL != 3
5136 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5137 && (pCtx->cr0 & X86_CR0_PE) )
5138 {
5139 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5140 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5141 u8Vector = X86_XCPT_GP;
5142 uErr = 0;
5143 }
5144#ifdef DBGFTRACE_ENABLED
5145 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5146 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5147 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5148#endif
5149
5150 /*
5151 * Do recursion accounting.
5152 */
5153 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5154 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5155 if (pVCpu->iem.s.cXcptRecursions == 0)
5156 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5157 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5158 else
5159 {
5160 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5161 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5162
5163 /** @todo double and tripple faults. */
5164 if (pVCpu->iem.s.cXcptRecursions >= 3)
5165 {
5166#ifdef DEBUG_bird
5167 AssertFailed();
5168#endif
5169 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5170 }
5171
5172 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
5173 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
5174 {
5175 ....
5176 } */
5177 }
5178 pVCpu->iem.s.cXcptRecursions++;
5179 pVCpu->iem.s.uCurXcpt = u8Vector;
5180 pVCpu->iem.s.fCurXcpt = fFlags;
5181
5182 /*
5183 * Extensive logging.
5184 */
5185#if defined(LOG_ENABLED) && defined(IN_RING3)
5186 if (LogIs3Enabled())
5187 {
5188 PVM pVM = pVCpu->CTX_SUFF(pVM);
5189 char szRegs[4096];
5190 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5191 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5192 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5193 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5194 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5195 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5196 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5197 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5198 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5199 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5200 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5201 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5202 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5203 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5204 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5205 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5206 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5207 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5208 " efer=%016VR{efer}\n"
5209 " pat=%016VR{pat}\n"
5210 " sf_mask=%016VR{sf_mask}\n"
5211 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5212 " lstar=%016VR{lstar}\n"
5213 " star=%016VR{star} cstar=%016VR{cstar}\n"
5214 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5215 );
5216
5217 char szInstr[256];
5218 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5219 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5220 szInstr, sizeof(szInstr), NULL);
5221 Log3(("%s%s\n", szRegs, szInstr));
5222 }
5223#endif /* LOG_ENABLED */
5224
5225 /*
5226 * Call the mode specific worker function.
5227 */
5228 VBOXSTRICTRC rcStrict;
5229 if (!(pCtx->cr0 & X86_CR0_PE))
5230 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5231 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5232 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5233 else
5234 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5235
5236 /* Flush the prefetch buffer. */
5237#ifdef IEM_WITH_CODE_TLB
5238 pVCpu->iem.s.pbInstrBuf = NULL;
5239#else
5240 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5241#endif
5242
5243 /*
5244 * Unwind.
5245 */
5246 pVCpu->iem.s.cXcptRecursions--;
5247 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5248 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5249 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5250 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5251 return rcStrict;
5252}
5253
5254#ifdef IEM_WITH_SETJMP
5255/**
5256 * See iemRaiseXcptOrInt. Will not return.
5257 */
5258IEM_STATIC DECL_NO_RETURN(void)
5259iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5260 uint8_t cbInstr,
5261 uint8_t u8Vector,
5262 uint32_t fFlags,
5263 uint16_t uErr,
5264 uint64_t uCr2)
5265{
5266 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5267 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5268}
5269#endif
5270
5271
5272/** \#DE - 00. */
5273DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5274{
5275 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5276}
5277
5278
5279/** \#DB - 01.
5280 * @note This automatically clear DR7.GD. */
5281DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5282{
5283 /** @todo set/clear RF. */
5284 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5285 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5286}
5287
5288
5289/** \#BR - 05. */
5290DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5291{
5292 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5293}
5294
5295
5296/** \#UD - 06. */
5297DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5298{
5299 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5300}
5301
5302
5303/** \#NM - 07. */
5304DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5305{
5306 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5307}
5308
5309
5310/** \#TS(err) - 0a. */
5311DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5312{
5313 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5314}
5315
5316
5317/** \#TS(tr) - 0a. */
5318DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5319{
5320 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5321 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5322}
5323
5324
5325/** \#TS(0) - 0a. */
5326DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5327{
5328 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5329 0, 0);
5330}
5331
5332
5333/** \#TS(err) - 0a. */
5334DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5335{
5336 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5337 uSel & X86_SEL_MASK_OFF_RPL, 0);
5338}
5339
5340
5341/** \#NP(err) - 0b. */
5342DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5343{
5344 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5345}
5346
5347
5348/** \#NP(sel) - 0b. */
5349DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5350{
5351 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5352 uSel & ~X86_SEL_RPL, 0);
5353}
5354
5355
5356/** \#SS(seg) - 0c. */
5357DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5358{
5359 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5360 uSel & ~X86_SEL_RPL, 0);
5361}
5362
5363
5364/** \#SS(err) - 0c. */
5365DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5366{
5367 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5368}
5369
5370
5371/** \#GP(n) - 0d. */
5372DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5373{
5374 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5375}
5376
5377
5378/** \#GP(0) - 0d. */
5379DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5380{
5381 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5382}
5383
5384#ifdef IEM_WITH_SETJMP
5385/** \#GP(0) - 0d. */
5386DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5387{
5388 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5389}
5390#endif
5391
5392
5393/** \#GP(sel) - 0d. */
5394DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5395{
5396 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5397 Sel & ~X86_SEL_RPL, 0);
5398}
5399
5400
5401/** \#GP(0) - 0d. */
5402DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5403{
5404 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5405}
5406
5407
5408/** \#GP(sel) - 0d. */
5409DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5410{
5411 NOREF(iSegReg); NOREF(fAccess);
5412 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5413 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5414}
5415
5416#ifdef IEM_WITH_SETJMP
5417/** \#GP(sel) - 0d, longjmp. */
5418DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5419{
5420 NOREF(iSegReg); NOREF(fAccess);
5421 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5422 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5423}
5424#endif
5425
5426/** \#GP(sel) - 0d. */
5427DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5428{
5429 NOREF(Sel);
5430 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5431}
5432
5433#ifdef IEM_WITH_SETJMP
5434/** \#GP(sel) - 0d, longjmp. */
5435DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5436{
5437 NOREF(Sel);
5438 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5439}
5440#endif
5441
5442
5443/** \#GP(sel) - 0d. */
5444DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5445{
5446 NOREF(iSegReg); NOREF(fAccess);
5447 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5448}
5449
5450#ifdef IEM_WITH_SETJMP
5451/** \#GP(sel) - 0d, longjmp. */
5452DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5453 uint32_t fAccess)
5454{
5455 NOREF(iSegReg); NOREF(fAccess);
5456 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5457}
5458#endif
5459
5460
5461/** \#PF(n) - 0e. */
5462DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5463{
5464 uint16_t uErr;
5465 switch (rc)
5466 {
5467 case VERR_PAGE_NOT_PRESENT:
5468 case VERR_PAGE_TABLE_NOT_PRESENT:
5469 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5470 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5471 uErr = 0;
5472 break;
5473
5474 default:
5475 AssertMsgFailed(("%Rrc\n", rc));
5476 /* fall thru */
5477 case VERR_ACCESS_DENIED:
5478 uErr = X86_TRAP_PF_P;
5479 break;
5480
5481 /** @todo reserved */
5482 }
5483
5484 if (pVCpu->iem.s.uCpl == 3)
5485 uErr |= X86_TRAP_PF_US;
5486
5487 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5488 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5489 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5490 uErr |= X86_TRAP_PF_ID;
5491
5492#if 0 /* This is so much non-sense, really. Why was it done like that? */
5493 /* Note! RW access callers reporting a WRITE protection fault, will clear
5494 the READ flag before calling. So, read-modify-write accesses (RW)
5495 can safely be reported as READ faults. */
5496 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5497 uErr |= X86_TRAP_PF_RW;
5498#else
5499 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5500 {
5501 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5502 uErr |= X86_TRAP_PF_RW;
5503 }
5504#endif
5505
5506 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5507 uErr, GCPtrWhere);
5508}
5509
5510#ifdef IEM_WITH_SETJMP
5511/** \#PF(n) - 0e, longjmp. */
5512IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5513{
5514 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5515}
5516#endif
5517
5518
5519/** \#MF(0) - 10. */
5520DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5521{
5522 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5523}
5524
5525
5526/** \#AC(0) - 11. */
5527DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5528{
5529 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5530}
5531
5532
5533/**
5534 * Macro for calling iemCImplRaiseDivideError().
5535 *
5536 * This enables us to add/remove arguments and force different levels of
5537 * inlining as we wish.
5538 *
5539 * @return Strict VBox status code.
5540 */
5541#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5542IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5543{
5544 NOREF(cbInstr);
5545 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5546}
5547
5548
5549/**
5550 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5551 *
5552 * This enables us to add/remove arguments and force different levels of
5553 * inlining as we wish.
5554 *
5555 * @return Strict VBox status code.
5556 */
5557#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5558IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5559{
5560 NOREF(cbInstr);
5561 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5562}
5563
5564
5565/**
5566 * Macro for calling iemCImplRaiseInvalidOpcode().
5567 *
5568 * This enables us to add/remove arguments and force different levels of
5569 * inlining as we wish.
5570 *
5571 * @return Strict VBox status code.
5572 */
5573#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5574IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5575{
5576 NOREF(cbInstr);
5577 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5578}
5579
5580
5581/** @} */
5582
5583
5584/*
5585 *
5586 * Helpers routines.
5587 * Helpers routines.
5588 * Helpers routines.
5589 *
5590 */
5591
5592/**
5593 * Recalculates the effective operand size.
5594 *
5595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5596 */
5597IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5598{
5599 switch (pVCpu->iem.s.enmCpuMode)
5600 {
5601 case IEMMODE_16BIT:
5602 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5603 break;
5604 case IEMMODE_32BIT:
5605 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5606 break;
5607 case IEMMODE_64BIT:
5608 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5609 {
5610 case 0:
5611 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5612 break;
5613 case IEM_OP_PRF_SIZE_OP:
5614 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5615 break;
5616 case IEM_OP_PRF_SIZE_REX_W:
5617 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5618 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5619 break;
5620 }
5621 break;
5622 default:
5623 AssertFailed();
5624 }
5625}
5626
5627
5628/**
5629 * Sets the default operand size to 64-bit and recalculates the effective
5630 * operand size.
5631 *
5632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5633 */
5634IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5635{
5636 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5637 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5638 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5639 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5640 else
5641 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5642}
5643
5644
5645/*
5646 *
5647 * Common opcode decoders.
5648 * Common opcode decoders.
5649 * Common opcode decoders.
5650 *
5651 */
5652//#include <iprt/mem.h>
5653
5654/**
5655 * Used to add extra details about a stub case.
5656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5657 */
5658IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5659{
5660#if defined(LOG_ENABLED) && defined(IN_RING3)
5661 PVM pVM = pVCpu->CTX_SUFF(pVM);
5662 char szRegs[4096];
5663 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5664 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5665 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5666 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5667 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5668 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5669 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5670 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5671 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5672 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5673 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5674 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5675 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5676 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5677 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5678 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5679 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5680 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5681 " efer=%016VR{efer}\n"
5682 " pat=%016VR{pat}\n"
5683 " sf_mask=%016VR{sf_mask}\n"
5684 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5685 " lstar=%016VR{lstar}\n"
5686 " star=%016VR{star} cstar=%016VR{cstar}\n"
5687 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5688 );
5689
5690 char szInstr[256];
5691 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5692 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5693 szInstr, sizeof(szInstr), NULL);
5694
5695 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5696#else
5697 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5698#endif
5699}
5700
5701/**
5702 * Complains about a stub.
5703 *
5704 * Providing two versions of this macro, one for daily use and one for use when
5705 * working on IEM.
5706 */
5707#if 0
5708# define IEMOP_BITCH_ABOUT_STUB() \
5709 do { \
5710 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5711 iemOpStubMsg2(pVCpu); \
5712 RTAssertPanic(); \
5713 } while (0)
5714#else
5715# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5716#endif
5717
5718/** Stubs an opcode. */
5719#define FNIEMOP_STUB(a_Name) \
5720 FNIEMOP_DEF(a_Name) \
5721 { \
5722 RT_NOREF_PV(pVCpu); \
5723 IEMOP_BITCH_ABOUT_STUB(); \
5724 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5725 } \
5726 typedef int ignore_semicolon
5727
5728/** Stubs an opcode. */
5729#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5730 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5731 { \
5732 RT_NOREF_PV(pVCpu); \
5733 RT_NOREF_PV(a_Name0); \
5734 IEMOP_BITCH_ABOUT_STUB(); \
5735 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5736 } \
5737 typedef int ignore_semicolon
5738
5739/** Stubs an opcode which currently should raise \#UD. */
5740#define FNIEMOP_UD_STUB(a_Name) \
5741 FNIEMOP_DEF(a_Name) \
5742 { \
5743 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5744 return IEMOP_RAISE_INVALID_OPCODE(); \
5745 } \
5746 typedef int ignore_semicolon
5747
5748/** Stubs an opcode which currently should raise \#UD. */
5749#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5750 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5751 { \
5752 RT_NOREF_PV(pVCpu); \
5753 RT_NOREF_PV(a_Name0); \
5754 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5755 return IEMOP_RAISE_INVALID_OPCODE(); \
5756 } \
5757 typedef int ignore_semicolon
5758
5759
5760
5761/** @name Register Access.
5762 * @{
5763 */
5764
5765/**
5766 * Gets a reference (pointer) to the specified hidden segment register.
5767 *
5768 * @returns Hidden register reference.
5769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5770 * @param iSegReg The segment register.
5771 */
5772IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5773{
5774 Assert(iSegReg < X86_SREG_COUNT);
5775 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5776 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5777
5778#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5779 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5780 { /* likely */ }
5781 else
5782 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5783#else
5784 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5785#endif
5786 return pSReg;
5787}
5788
5789
5790/**
5791 * Ensures that the given hidden segment register is up to date.
5792 *
5793 * @returns Hidden register reference.
5794 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5795 * @param pSReg The segment register.
5796 */
5797IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5798{
5799#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5800 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5801 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5802#else
5803 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5804 NOREF(pVCpu);
5805#endif
5806 return pSReg;
5807}
5808
5809
5810/**
5811 * Gets a reference (pointer) to the specified segment register (the selector
5812 * value).
5813 *
5814 * @returns Pointer to the selector variable.
5815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5816 * @param iSegReg The segment register.
5817 */
5818DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5819{
5820 Assert(iSegReg < X86_SREG_COUNT);
5821 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5822 return &pCtx->aSRegs[iSegReg].Sel;
5823}
5824
5825
5826/**
5827 * Fetches the selector value of a segment register.
5828 *
5829 * @returns The selector value.
5830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5831 * @param iSegReg The segment register.
5832 */
5833DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5834{
5835 Assert(iSegReg < X86_SREG_COUNT);
5836 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5837}
5838
5839
5840/**
5841 * Gets a reference (pointer) to the specified general purpose register.
5842 *
5843 * @returns Register reference.
5844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5845 * @param iReg The general purpose register.
5846 */
5847DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5848{
5849 Assert(iReg < 16);
5850 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5851 return &pCtx->aGRegs[iReg];
5852}
5853
5854
5855/**
5856 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5857 *
5858 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5859 *
5860 * @returns Register reference.
5861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5862 * @param iReg The register.
5863 */
5864DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5865{
5866 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5867 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5868 {
5869 Assert(iReg < 16);
5870 return &pCtx->aGRegs[iReg].u8;
5871 }
5872 /* high 8-bit register. */
5873 Assert(iReg < 8);
5874 return &pCtx->aGRegs[iReg & 3].bHi;
5875}
5876
5877
5878/**
5879 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5880 *
5881 * @returns Register reference.
5882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5883 * @param iReg The register.
5884 */
5885DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5886{
5887 Assert(iReg < 16);
5888 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5889 return &pCtx->aGRegs[iReg].u16;
5890}
5891
5892
5893/**
5894 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5895 *
5896 * @returns Register reference.
5897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5898 * @param iReg The register.
5899 */
5900DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5901{
5902 Assert(iReg < 16);
5903 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5904 return &pCtx->aGRegs[iReg].u32;
5905}
5906
5907
5908/**
5909 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5910 *
5911 * @returns Register reference.
5912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5913 * @param iReg The register.
5914 */
5915DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5916{
5917 Assert(iReg < 64);
5918 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5919 return &pCtx->aGRegs[iReg].u64;
5920}
5921
5922
5923/**
5924 * Fetches the value of a 8-bit general purpose register.
5925 *
5926 * @returns The register value.
5927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5928 * @param iReg The register.
5929 */
5930DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5931{
5932 return *iemGRegRefU8(pVCpu, iReg);
5933}
5934
5935
5936/**
5937 * Fetches the value of a 16-bit general purpose register.
5938 *
5939 * @returns The register value.
5940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5941 * @param iReg The register.
5942 */
5943DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5944{
5945 Assert(iReg < 16);
5946 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5947}
5948
5949
5950/**
5951 * Fetches the value of a 32-bit general purpose register.
5952 *
5953 * @returns The register value.
5954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5955 * @param iReg The register.
5956 */
5957DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5958{
5959 Assert(iReg < 16);
5960 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5961}
5962
5963
5964/**
5965 * Fetches the value of a 64-bit general purpose register.
5966 *
5967 * @returns The register value.
5968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5969 * @param iReg The register.
5970 */
5971DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5972{
5973 Assert(iReg < 16);
5974 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5975}
5976
5977
5978/**
5979 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5980 *
5981 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5982 * segment limit.
5983 *
5984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5985 * @param offNextInstr The offset of the next instruction.
5986 */
5987IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5988{
5989 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5990 switch (pVCpu->iem.s.enmEffOpSize)
5991 {
5992 case IEMMODE_16BIT:
5993 {
5994 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5995 if ( uNewIp > pCtx->cs.u32Limit
5996 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5997 return iemRaiseGeneralProtectionFault0(pVCpu);
5998 pCtx->rip = uNewIp;
5999 break;
6000 }
6001
6002 case IEMMODE_32BIT:
6003 {
6004 Assert(pCtx->rip <= UINT32_MAX);
6005 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6006
6007 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6008 if (uNewEip > pCtx->cs.u32Limit)
6009 return iemRaiseGeneralProtectionFault0(pVCpu);
6010 pCtx->rip = uNewEip;
6011 break;
6012 }
6013
6014 case IEMMODE_64BIT:
6015 {
6016 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6017
6018 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6019 if (!IEM_IS_CANONICAL(uNewRip))
6020 return iemRaiseGeneralProtectionFault0(pVCpu);
6021 pCtx->rip = uNewRip;
6022 break;
6023 }
6024
6025 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6026 }
6027
6028 pCtx->eflags.Bits.u1RF = 0;
6029
6030#ifndef IEM_WITH_CODE_TLB
6031 /* Flush the prefetch buffer. */
6032 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6033#endif
6034
6035 return VINF_SUCCESS;
6036}
6037
6038
6039/**
6040 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6041 *
6042 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6043 * segment limit.
6044 *
6045 * @returns Strict VBox status code.
6046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6047 * @param offNextInstr The offset of the next instruction.
6048 */
6049IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6050{
6051 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6052 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6053
6054 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6055 if ( uNewIp > pCtx->cs.u32Limit
6056 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6057 return iemRaiseGeneralProtectionFault0(pVCpu);
6058 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6059 pCtx->rip = uNewIp;
6060 pCtx->eflags.Bits.u1RF = 0;
6061
6062#ifndef IEM_WITH_CODE_TLB
6063 /* Flush the prefetch buffer. */
6064 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6065#endif
6066
6067 return VINF_SUCCESS;
6068}
6069
6070
6071/**
6072 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6073 *
6074 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6075 * segment limit.
6076 *
6077 * @returns Strict VBox status code.
6078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6079 * @param offNextInstr The offset of the next instruction.
6080 */
6081IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6082{
6083 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6084 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6085
6086 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6087 {
6088 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6089
6090 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6091 if (uNewEip > pCtx->cs.u32Limit)
6092 return iemRaiseGeneralProtectionFault0(pVCpu);
6093 pCtx->rip = uNewEip;
6094 }
6095 else
6096 {
6097 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6098
6099 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6100 if (!IEM_IS_CANONICAL(uNewRip))
6101 return iemRaiseGeneralProtectionFault0(pVCpu);
6102 pCtx->rip = uNewRip;
6103 }
6104 pCtx->eflags.Bits.u1RF = 0;
6105
6106#ifndef IEM_WITH_CODE_TLB
6107 /* Flush the prefetch buffer. */
6108 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6109#endif
6110
6111 return VINF_SUCCESS;
6112}
6113
6114
6115/**
6116 * Performs a near jump to the specified address.
6117 *
6118 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6119 * segment limit.
6120 *
6121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6122 * @param uNewRip The new RIP value.
6123 */
6124IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6125{
6126 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6127 switch (pVCpu->iem.s.enmEffOpSize)
6128 {
6129 case IEMMODE_16BIT:
6130 {
6131 Assert(uNewRip <= UINT16_MAX);
6132 if ( uNewRip > pCtx->cs.u32Limit
6133 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6134 return iemRaiseGeneralProtectionFault0(pVCpu);
6135 /** @todo Test 16-bit jump in 64-bit mode. */
6136 pCtx->rip = uNewRip;
6137 break;
6138 }
6139
6140 case IEMMODE_32BIT:
6141 {
6142 Assert(uNewRip <= UINT32_MAX);
6143 Assert(pCtx->rip <= UINT32_MAX);
6144 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6145
6146 if (uNewRip > pCtx->cs.u32Limit)
6147 return iemRaiseGeneralProtectionFault0(pVCpu);
6148 pCtx->rip = uNewRip;
6149 break;
6150 }
6151
6152 case IEMMODE_64BIT:
6153 {
6154 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6155
6156 if (!IEM_IS_CANONICAL(uNewRip))
6157 return iemRaiseGeneralProtectionFault0(pVCpu);
6158 pCtx->rip = uNewRip;
6159 break;
6160 }
6161
6162 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6163 }
6164
6165 pCtx->eflags.Bits.u1RF = 0;
6166
6167#ifndef IEM_WITH_CODE_TLB
6168 /* Flush the prefetch buffer. */
6169 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6170#endif
6171
6172 return VINF_SUCCESS;
6173}
6174
6175
6176/**
6177 * Get the address of the top of the stack.
6178 *
6179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6180 * @param pCtx The CPU context which SP/ESP/RSP should be
6181 * read.
6182 */
6183DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6184{
6185 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6186 return pCtx->rsp;
6187 if (pCtx->ss.Attr.n.u1DefBig)
6188 return pCtx->esp;
6189 return pCtx->sp;
6190}
6191
6192
6193/**
6194 * Updates the RIP/EIP/IP to point to the next instruction.
6195 *
6196 * This function leaves the EFLAGS.RF flag alone.
6197 *
6198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6199 * @param cbInstr The number of bytes to add.
6200 */
6201IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6202{
6203 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6204 switch (pVCpu->iem.s.enmCpuMode)
6205 {
6206 case IEMMODE_16BIT:
6207 Assert(pCtx->rip <= UINT16_MAX);
6208 pCtx->eip += cbInstr;
6209 pCtx->eip &= UINT32_C(0xffff);
6210 break;
6211
6212 case IEMMODE_32BIT:
6213 pCtx->eip += cbInstr;
6214 Assert(pCtx->rip <= UINT32_MAX);
6215 break;
6216
6217 case IEMMODE_64BIT:
6218 pCtx->rip += cbInstr;
6219 break;
6220 default: AssertFailed();
6221 }
6222}
6223
6224
6225#if 0
6226/**
6227 * Updates the RIP/EIP/IP to point to the next instruction.
6228 *
6229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6230 */
6231IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6232{
6233 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6234}
6235#endif
6236
6237
6238
6239/**
6240 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6241 *
6242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6243 * @param cbInstr The number of bytes to add.
6244 */
6245IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6246{
6247 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6248
6249 pCtx->eflags.Bits.u1RF = 0;
6250
6251 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6252#if ARCH_BITS >= 64
6253 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6254 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6255 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6256#else
6257 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6258 pCtx->rip += cbInstr;
6259 else
6260 {
6261 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6262 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6263 }
6264#endif
6265}
6266
6267
6268/**
6269 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6270 *
6271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6272 */
6273IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6274{
6275 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6276}
6277
6278
6279/**
6280 * Adds to the stack pointer.
6281 *
6282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6283 * @param pCtx The CPU context which SP/ESP/RSP should be
6284 * updated.
6285 * @param cbToAdd The number of bytes to add (8-bit!).
6286 */
6287DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6288{
6289 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6290 pCtx->rsp += cbToAdd;
6291 else if (pCtx->ss.Attr.n.u1DefBig)
6292 pCtx->esp += cbToAdd;
6293 else
6294 pCtx->sp += cbToAdd;
6295}
6296
6297
6298/**
6299 * Subtracts from the stack pointer.
6300 *
6301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6302 * @param pCtx The CPU context which SP/ESP/RSP should be
6303 * updated.
6304 * @param cbToSub The number of bytes to subtract (8-bit!).
6305 */
6306DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6307{
6308 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6309 pCtx->rsp -= cbToSub;
6310 else if (pCtx->ss.Attr.n.u1DefBig)
6311 pCtx->esp -= cbToSub;
6312 else
6313 pCtx->sp -= cbToSub;
6314}
6315
6316
6317/**
6318 * Adds to the temporary stack pointer.
6319 *
6320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6321 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6322 * @param cbToAdd The number of bytes to add (16-bit).
6323 * @param pCtx Where to get the current stack mode.
6324 */
6325DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6326{
6327 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6328 pTmpRsp->u += cbToAdd;
6329 else if (pCtx->ss.Attr.n.u1DefBig)
6330 pTmpRsp->DWords.dw0 += cbToAdd;
6331 else
6332 pTmpRsp->Words.w0 += cbToAdd;
6333}
6334
6335
6336/**
6337 * Subtracts from the temporary stack pointer.
6338 *
6339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6340 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6341 * @param cbToSub The number of bytes to subtract.
6342 * @param pCtx Where to get the current stack mode.
6343 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6344 * expecting that.
6345 */
6346DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6347{
6348 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6349 pTmpRsp->u -= cbToSub;
6350 else if (pCtx->ss.Attr.n.u1DefBig)
6351 pTmpRsp->DWords.dw0 -= cbToSub;
6352 else
6353 pTmpRsp->Words.w0 -= cbToSub;
6354}
6355
6356
6357/**
6358 * Calculates the effective stack address for a push of the specified size as
6359 * well as the new RSP value (upper bits may be masked).
6360 *
6361 * @returns Effective stack addressf for the push.
6362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6363 * @param pCtx Where to get the current stack mode.
6364 * @param cbItem The size of the stack item to pop.
6365 * @param puNewRsp Where to return the new RSP value.
6366 */
6367DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6368{
6369 RTUINT64U uTmpRsp;
6370 RTGCPTR GCPtrTop;
6371 uTmpRsp.u = pCtx->rsp;
6372
6373 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6374 GCPtrTop = uTmpRsp.u -= cbItem;
6375 else if (pCtx->ss.Attr.n.u1DefBig)
6376 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6377 else
6378 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6379 *puNewRsp = uTmpRsp.u;
6380 return GCPtrTop;
6381}
6382
6383
6384/**
6385 * Gets the current stack pointer and calculates the value after a pop of the
6386 * specified size.
6387 *
6388 * @returns Current stack pointer.
6389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6390 * @param pCtx Where to get the current stack mode.
6391 * @param cbItem The size of the stack item to pop.
6392 * @param puNewRsp Where to return the new RSP value.
6393 */
6394DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6395{
6396 RTUINT64U uTmpRsp;
6397 RTGCPTR GCPtrTop;
6398 uTmpRsp.u = pCtx->rsp;
6399
6400 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6401 {
6402 GCPtrTop = uTmpRsp.u;
6403 uTmpRsp.u += cbItem;
6404 }
6405 else if (pCtx->ss.Attr.n.u1DefBig)
6406 {
6407 GCPtrTop = uTmpRsp.DWords.dw0;
6408 uTmpRsp.DWords.dw0 += cbItem;
6409 }
6410 else
6411 {
6412 GCPtrTop = uTmpRsp.Words.w0;
6413 uTmpRsp.Words.w0 += cbItem;
6414 }
6415 *puNewRsp = uTmpRsp.u;
6416 return GCPtrTop;
6417}
6418
6419
6420/**
6421 * Calculates the effective stack address for a push of the specified size as
6422 * well as the new temporary RSP value (upper bits may be masked).
6423 *
6424 * @returns Effective stack addressf for the push.
6425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6426 * @param pCtx Where to get the current stack mode.
6427 * @param pTmpRsp The temporary stack pointer. This is updated.
6428 * @param cbItem The size of the stack item to pop.
6429 */
6430DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6431{
6432 RTGCPTR GCPtrTop;
6433
6434 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6435 GCPtrTop = pTmpRsp->u -= cbItem;
6436 else if (pCtx->ss.Attr.n.u1DefBig)
6437 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6438 else
6439 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6440 return GCPtrTop;
6441}
6442
6443
6444/**
6445 * Gets the effective stack address for a pop of the specified size and
6446 * calculates and updates the temporary RSP.
6447 *
6448 * @returns Current stack pointer.
6449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6450 * @param pCtx Where to get the current stack mode.
6451 * @param pTmpRsp The temporary stack pointer. This is updated.
6452 * @param cbItem The size of the stack item to pop.
6453 */
6454DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6455{
6456 RTGCPTR GCPtrTop;
6457 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6458 {
6459 GCPtrTop = pTmpRsp->u;
6460 pTmpRsp->u += cbItem;
6461 }
6462 else if (pCtx->ss.Attr.n.u1DefBig)
6463 {
6464 GCPtrTop = pTmpRsp->DWords.dw0;
6465 pTmpRsp->DWords.dw0 += cbItem;
6466 }
6467 else
6468 {
6469 GCPtrTop = pTmpRsp->Words.w0;
6470 pTmpRsp->Words.w0 += cbItem;
6471 }
6472 return GCPtrTop;
6473}
6474
6475/** @} */
6476
6477
6478/** @name FPU access and helpers.
6479 *
6480 * @{
6481 */
6482
6483
6484/**
6485 * Hook for preparing to use the host FPU.
6486 *
6487 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6488 *
6489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6490 */
6491DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6492{
6493#ifdef IN_RING3
6494 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6495#else
6496 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6497#endif
6498}
6499
6500
6501/**
6502 * Hook for preparing to use the host FPU for SSE
6503 *
6504 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6505 *
6506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6507 */
6508DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6509{
6510 iemFpuPrepareUsage(pVCpu);
6511}
6512
6513
6514/**
6515 * Hook for actualizing the guest FPU state before the interpreter reads it.
6516 *
6517 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6518 *
6519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6520 */
6521DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6522{
6523#ifdef IN_RING3
6524 NOREF(pVCpu);
6525#else
6526 CPUMRZFpuStateActualizeForRead(pVCpu);
6527#endif
6528}
6529
6530
6531/**
6532 * Hook for actualizing the guest FPU state before the interpreter changes it.
6533 *
6534 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6535 *
6536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6537 */
6538DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6539{
6540#ifdef IN_RING3
6541 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6542#else
6543 CPUMRZFpuStateActualizeForChange(pVCpu);
6544#endif
6545}
6546
6547
6548/**
6549 * Hook for actualizing the guest XMM0..15 register state for read only.
6550 *
6551 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6552 *
6553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6554 */
6555DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6556{
6557#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6558 NOREF(pVCpu);
6559#else
6560 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6561#endif
6562}
6563
6564
6565/**
6566 * Hook for actualizing the guest XMM0..15 register state for read+write.
6567 *
6568 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6569 *
6570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6571 */
6572DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6573{
6574#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6575 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6576#else
6577 CPUMRZFpuStateActualizeForChange(pVCpu);
6578#endif
6579}
6580
6581
6582/**
6583 * Stores a QNaN value into a FPU register.
6584 *
6585 * @param pReg Pointer to the register.
6586 */
6587DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6588{
6589 pReg->au32[0] = UINT32_C(0x00000000);
6590 pReg->au32[1] = UINT32_C(0xc0000000);
6591 pReg->au16[4] = UINT16_C(0xffff);
6592}
6593
6594
6595/**
6596 * Updates the FOP, FPU.CS and FPUIP registers.
6597 *
6598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6599 * @param pCtx The CPU context.
6600 * @param pFpuCtx The FPU context.
6601 */
6602DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6603{
6604 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6605 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6606 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6607 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6608 {
6609 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6610 * happens in real mode here based on the fnsave and fnstenv images. */
6611 pFpuCtx->CS = 0;
6612 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6613 }
6614 else
6615 {
6616 pFpuCtx->CS = pCtx->cs.Sel;
6617 pFpuCtx->FPUIP = pCtx->rip;
6618 }
6619}
6620
6621
6622/**
6623 * Updates the x87.DS and FPUDP registers.
6624 *
6625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6626 * @param pCtx The CPU context.
6627 * @param pFpuCtx The FPU context.
6628 * @param iEffSeg The effective segment register.
6629 * @param GCPtrEff The effective address relative to @a iEffSeg.
6630 */
6631DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6632{
6633 RTSEL sel;
6634 switch (iEffSeg)
6635 {
6636 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6637 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6638 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6639 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6640 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6641 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6642 default:
6643 AssertMsgFailed(("%d\n", iEffSeg));
6644 sel = pCtx->ds.Sel;
6645 }
6646 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6647 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6648 {
6649 pFpuCtx->DS = 0;
6650 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6651 }
6652 else
6653 {
6654 pFpuCtx->DS = sel;
6655 pFpuCtx->FPUDP = GCPtrEff;
6656 }
6657}
6658
6659
6660/**
6661 * Rotates the stack registers in the push direction.
6662 *
6663 * @param pFpuCtx The FPU context.
6664 * @remarks This is a complete waste of time, but fxsave stores the registers in
6665 * stack order.
6666 */
6667DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6668{
6669 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6670 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6671 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6672 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6673 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6674 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6675 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6676 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6677 pFpuCtx->aRegs[0].r80 = r80Tmp;
6678}
6679
6680
6681/**
6682 * Rotates the stack registers in the pop direction.
6683 *
6684 * @param pFpuCtx The FPU context.
6685 * @remarks This is a complete waste of time, but fxsave stores the registers in
6686 * stack order.
6687 */
6688DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6689{
6690 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6691 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6692 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6693 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6694 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6695 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6696 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6697 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6698 pFpuCtx->aRegs[7].r80 = r80Tmp;
6699}
6700
6701
6702/**
6703 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6704 * exception prevents it.
6705 *
6706 * @param pResult The FPU operation result to push.
6707 * @param pFpuCtx The FPU context.
6708 */
6709IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6710{
6711 /* Update FSW and bail if there are pending exceptions afterwards. */
6712 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6713 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6714 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6715 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6716 {
6717 pFpuCtx->FSW = fFsw;
6718 return;
6719 }
6720
6721 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6722 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6723 {
6724 /* All is fine, push the actual value. */
6725 pFpuCtx->FTW |= RT_BIT(iNewTop);
6726 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6727 }
6728 else if (pFpuCtx->FCW & X86_FCW_IM)
6729 {
6730 /* Masked stack overflow, push QNaN. */
6731 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6732 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6733 }
6734 else
6735 {
6736 /* Raise stack overflow, don't push anything. */
6737 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6738 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6739 return;
6740 }
6741
6742 fFsw &= ~X86_FSW_TOP_MASK;
6743 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6744 pFpuCtx->FSW = fFsw;
6745
6746 iemFpuRotateStackPush(pFpuCtx);
6747}
6748
6749
6750/**
6751 * Stores a result in a FPU register and updates the FSW and FTW.
6752 *
6753 * @param pFpuCtx The FPU context.
6754 * @param pResult The result to store.
6755 * @param iStReg Which FPU register to store it in.
6756 */
6757IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6758{
6759 Assert(iStReg < 8);
6760 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6761 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6762 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6763 pFpuCtx->FTW |= RT_BIT(iReg);
6764 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6765}
6766
6767
6768/**
6769 * Only updates the FPU status word (FSW) with the result of the current
6770 * instruction.
6771 *
6772 * @param pFpuCtx The FPU context.
6773 * @param u16FSW The FSW output of the current instruction.
6774 */
6775IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6776{
6777 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6778 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6779}
6780
6781
6782/**
6783 * Pops one item off the FPU stack if no pending exception prevents it.
6784 *
6785 * @param pFpuCtx The FPU context.
6786 */
6787IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6788{
6789 /* Check pending exceptions. */
6790 uint16_t uFSW = pFpuCtx->FSW;
6791 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6792 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6793 return;
6794
6795 /* TOP--. */
6796 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6797 uFSW &= ~X86_FSW_TOP_MASK;
6798 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6799 pFpuCtx->FSW = uFSW;
6800
6801 /* Mark the previous ST0 as empty. */
6802 iOldTop >>= X86_FSW_TOP_SHIFT;
6803 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6804
6805 /* Rotate the registers. */
6806 iemFpuRotateStackPop(pFpuCtx);
6807}
6808
6809
6810/**
6811 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6812 *
6813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6814 * @param pResult The FPU operation result to push.
6815 */
6816IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6817{
6818 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6819 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6820 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6821 iemFpuMaybePushResult(pResult, pFpuCtx);
6822}
6823
6824
6825/**
6826 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6827 * and sets FPUDP and FPUDS.
6828 *
6829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6830 * @param pResult The FPU operation result to push.
6831 * @param iEffSeg The effective segment register.
6832 * @param GCPtrEff The effective address relative to @a iEffSeg.
6833 */
6834IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6835{
6836 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6837 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6838 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6839 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6840 iemFpuMaybePushResult(pResult, pFpuCtx);
6841}
6842
6843
6844/**
6845 * Replace ST0 with the first value and push the second onto the FPU stack,
6846 * unless a pending exception prevents it.
6847 *
6848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6849 * @param pResult The FPU operation result to store and push.
6850 */
6851IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6852{
6853 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6854 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6855 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6856
6857 /* Update FSW and bail if there are pending exceptions afterwards. */
6858 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6859 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6860 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6861 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6862 {
6863 pFpuCtx->FSW = fFsw;
6864 return;
6865 }
6866
6867 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6868 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6869 {
6870 /* All is fine, push the actual value. */
6871 pFpuCtx->FTW |= RT_BIT(iNewTop);
6872 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6873 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6874 }
6875 else if (pFpuCtx->FCW & X86_FCW_IM)
6876 {
6877 /* Masked stack overflow, push QNaN. */
6878 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6879 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6880 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6881 }
6882 else
6883 {
6884 /* Raise stack overflow, don't push anything. */
6885 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6886 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6887 return;
6888 }
6889
6890 fFsw &= ~X86_FSW_TOP_MASK;
6891 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6892 pFpuCtx->FSW = fFsw;
6893
6894 iemFpuRotateStackPush(pFpuCtx);
6895}
6896
6897
6898/**
6899 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6900 * FOP.
6901 *
6902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6903 * @param pResult The result to store.
6904 * @param iStReg Which FPU register to store it in.
6905 */
6906IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6907{
6908 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6909 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6910 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6911 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6912}
6913
6914
6915/**
6916 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6917 * FOP, and then pops the stack.
6918 *
6919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6920 * @param pResult The result to store.
6921 * @param iStReg Which FPU register to store it in.
6922 */
6923IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6924{
6925 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6926 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6927 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6928 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6929 iemFpuMaybePopOne(pFpuCtx);
6930}
6931
6932
6933/**
6934 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6935 * FPUDP, and FPUDS.
6936 *
6937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6938 * @param pResult The result to store.
6939 * @param iStReg Which FPU register to store it in.
6940 * @param iEffSeg The effective memory operand selector register.
6941 * @param GCPtrEff The effective memory operand offset.
6942 */
6943IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6944 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6945{
6946 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6947 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6948 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6949 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6950 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6951}
6952
6953
6954/**
6955 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6956 * FPUDP, and FPUDS, and then pops the stack.
6957 *
6958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6959 * @param pResult The result to store.
6960 * @param iStReg Which FPU register to store it in.
6961 * @param iEffSeg The effective memory operand selector register.
6962 * @param GCPtrEff The effective memory operand offset.
6963 */
6964IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6965 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6966{
6967 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6968 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6969 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6970 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6971 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6972 iemFpuMaybePopOne(pFpuCtx);
6973}
6974
6975
6976/**
6977 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6978 *
6979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6980 */
6981IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6982{
6983 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6984 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6985 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6986}
6987
6988
6989/**
6990 * Marks the specified stack register as free (for FFREE).
6991 *
6992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6993 * @param iStReg The register to free.
6994 */
6995IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6996{
6997 Assert(iStReg < 8);
6998 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6999 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7000 pFpuCtx->FTW &= ~RT_BIT(iReg);
7001}
7002
7003
7004/**
7005 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7006 *
7007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7008 */
7009IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7010{
7011 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7012 uint16_t uFsw = pFpuCtx->FSW;
7013 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7014 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7015 uFsw &= ~X86_FSW_TOP_MASK;
7016 uFsw |= uTop;
7017 pFpuCtx->FSW = uFsw;
7018}
7019
7020
7021/**
7022 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7023 *
7024 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7025 */
7026IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7027{
7028 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7029 uint16_t uFsw = pFpuCtx->FSW;
7030 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7031 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7032 uFsw &= ~X86_FSW_TOP_MASK;
7033 uFsw |= uTop;
7034 pFpuCtx->FSW = uFsw;
7035}
7036
7037
7038/**
7039 * Updates the FSW, FOP, FPUIP, and FPUCS.
7040 *
7041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7042 * @param u16FSW The FSW from the current instruction.
7043 */
7044IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7045{
7046 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7047 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7048 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7049 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7050}
7051
7052
7053/**
7054 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7055 *
7056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7057 * @param u16FSW The FSW from the current instruction.
7058 */
7059IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7060{
7061 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7062 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7063 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7064 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7065 iemFpuMaybePopOne(pFpuCtx);
7066}
7067
7068
7069/**
7070 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7071 *
7072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7073 * @param u16FSW The FSW from the current instruction.
7074 * @param iEffSeg The effective memory operand selector register.
7075 * @param GCPtrEff The effective memory operand offset.
7076 */
7077IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7078{
7079 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7080 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7081 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7082 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7083 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7084}
7085
7086
7087/**
7088 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7089 *
7090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7091 * @param u16FSW The FSW from the current instruction.
7092 */
7093IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7094{
7095 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7096 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7097 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7098 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7099 iemFpuMaybePopOne(pFpuCtx);
7100 iemFpuMaybePopOne(pFpuCtx);
7101}
7102
7103
7104/**
7105 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7106 *
7107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7108 * @param u16FSW The FSW from the current instruction.
7109 * @param iEffSeg The effective memory operand selector register.
7110 * @param GCPtrEff The effective memory operand offset.
7111 */
7112IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7113{
7114 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7115 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7116 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7117 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7118 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7119 iemFpuMaybePopOne(pFpuCtx);
7120}
7121
7122
7123/**
7124 * Worker routine for raising an FPU stack underflow exception.
7125 *
7126 * @param pFpuCtx The FPU context.
7127 * @param iStReg The stack register being accessed.
7128 */
7129IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7130{
7131 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7132 if (pFpuCtx->FCW & X86_FCW_IM)
7133 {
7134 /* Masked underflow. */
7135 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7136 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7137 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7138 if (iStReg != UINT8_MAX)
7139 {
7140 pFpuCtx->FTW |= RT_BIT(iReg);
7141 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7142 }
7143 }
7144 else
7145 {
7146 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7147 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7148 }
7149}
7150
7151
7152/**
7153 * Raises a FPU stack underflow exception.
7154 *
7155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7156 * @param iStReg The destination register that should be loaded
7157 * with QNaN if \#IS is not masked. Specify
7158 * UINT8_MAX if none (like for fcom).
7159 */
7160DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7161{
7162 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7163 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7164 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7165 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7166}
7167
7168
7169DECL_NO_INLINE(IEM_STATIC, void)
7170iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7171{
7172 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7173 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7174 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7175 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7176 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7177}
7178
7179
7180DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7181{
7182 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7183 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7184 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7185 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7186 iemFpuMaybePopOne(pFpuCtx);
7187}
7188
7189
7190DECL_NO_INLINE(IEM_STATIC, void)
7191iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7192{
7193 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7194 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7195 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7196 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7197 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7198 iemFpuMaybePopOne(pFpuCtx);
7199}
7200
7201
7202DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7203{
7204 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7205 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7206 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7207 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7208 iemFpuMaybePopOne(pFpuCtx);
7209 iemFpuMaybePopOne(pFpuCtx);
7210}
7211
7212
7213DECL_NO_INLINE(IEM_STATIC, void)
7214iemFpuStackPushUnderflow(PVMCPU pVCpu)
7215{
7216 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7217 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7218 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7219
7220 if (pFpuCtx->FCW & X86_FCW_IM)
7221 {
7222 /* Masked overflow - Push QNaN. */
7223 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7224 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7225 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7226 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7227 pFpuCtx->FTW |= RT_BIT(iNewTop);
7228 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7229 iemFpuRotateStackPush(pFpuCtx);
7230 }
7231 else
7232 {
7233 /* Exception pending - don't change TOP or the register stack. */
7234 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7235 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7236 }
7237}
7238
7239
7240DECL_NO_INLINE(IEM_STATIC, void)
7241iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7242{
7243 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7244 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7245 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7246
7247 if (pFpuCtx->FCW & X86_FCW_IM)
7248 {
7249 /* Masked overflow - Push QNaN. */
7250 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7251 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7252 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7253 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7254 pFpuCtx->FTW |= RT_BIT(iNewTop);
7255 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7256 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7257 iemFpuRotateStackPush(pFpuCtx);
7258 }
7259 else
7260 {
7261 /* Exception pending - don't change TOP or the register stack. */
7262 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7263 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7264 }
7265}
7266
7267
7268/**
7269 * Worker routine for raising an FPU stack overflow exception on a push.
7270 *
7271 * @param pFpuCtx The FPU context.
7272 */
7273IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7274{
7275 if (pFpuCtx->FCW & X86_FCW_IM)
7276 {
7277 /* Masked overflow. */
7278 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7279 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7280 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7281 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7282 pFpuCtx->FTW |= RT_BIT(iNewTop);
7283 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7284 iemFpuRotateStackPush(pFpuCtx);
7285 }
7286 else
7287 {
7288 /* Exception pending - don't change TOP or the register stack. */
7289 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7290 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7291 }
7292}
7293
7294
7295/**
7296 * Raises a FPU stack overflow exception on a push.
7297 *
7298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7299 */
7300DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7301{
7302 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7303 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7304 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7305 iemFpuStackPushOverflowOnly(pFpuCtx);
7306}
7307
7308
7309/**
7310 * Raises a FPU stack overflow exception on a push with a memory operand.
7311 *
7312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7313 * @param iEffSeg The effective memory operand selector register.
7314 * @param GCPtrEff The effective memory operand offset.
7315 */
7316DECL_NO_INLINE(IEM_STATIC, void)
7317iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7318{
7319 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7320 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7321 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7322 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7323 iemFpuStackPushOverflowOnly(pFpuCtx);
7324}
7325
7326
7327IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7328{
7329 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7330 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7331 if (pFpuCtx->FTW & RT_BIT(iReg))
7332 return VINF_SUCCESS;
7333 return VERR_NOT_FOUND;
7334}
7335
7336
7337IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7338{
7339 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7340 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7341 if (pFpuCtx->FTW & RT_BIT(iReg))
7342 {
7343 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7344 return VINF_SUCCESS;
7345 }
7346 return VERR_NOT_FOUND;
7347}
7348
7349
7350IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7351 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7352{
7353 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7354 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7355 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7356 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7357 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7358 {
7359 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7360 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7361 return VINF_SUCCESS;
7362 }
7363 return VERR_NOT_FOUND;
7364}
7365
7366
7367IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7368{
7369 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7370 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7371 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7372 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7373 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7374 {
7375 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7376 return VINF_SUCCESS;
7377 }
7378 return VERR_NOT_FOUND;
7379}
7380
7381
7382/**
7383 * Updates the FPU exception status after FCW is changed.
7384 *
7385 * @param pFpuCtx The FPU context.
7386 */
7387IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7388{
7389 uint16_t u16Fsw = pFpuCtx->FSW;
7390 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7391 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7392 else
7393 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7394 pFpuCtx->FSW = u16Fsw;
7395}
7396
7397
7398/**
7399 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7400 *
7401 * @returns The full FTW.
7402 * @param pFpuCtx The FPU context.
7403 */
7404IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7405{
7406 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7407 uint16_t u16Ftw = 0;
7408 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7409 for (unsigned iSt = 0; iSt < 8; iSt++)
7410 {
7411 unsigned const iReg = (iSt + iTop) & 7;
7412 if (!(u8Ftw & RT_BIT(iReg)))
7413 u16Ftw |= 3 << (iReg * 2); /* empty */
7414 else
7415 {
7416 uint16_t uTag;
7417 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7418 if (pr80Reg->s.uExponent == 0x7fff)
7419 uTag = 2; /* Exponent is all 1's => Special. */
7420 else if (pr80Reg->s.uExponent == 0x0000)
7421 {
7422 if (pr80Reg->s.u64Mantissa == 0x0000)
7423 uTag = 1; /* All bits are zero => Zero. */
7424 else
7425 uTag = 2; /* Must be special. */
7426 }
7427 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7428 uTag = 0; /* Valid. */
7429 else
7430 uTag = 2; /* Must be special. */
7431
7432 u16Ftw |= uTag << (iReg * 2); /* empty */
7433 }
7434 }
7435
7436 return u16Ftw;
7437}
7438
7439
7440/**
7441 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7442 *
7443 * @returns The compressed FTW.
7444 * @param u16FullFtw The full FTW to convert.
7445 */
7446IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7447{
7448 uint8_t u8Ftw = 0;
7449 for (unsigned i = 0; i < 8; i++)
7450 {
7451 if ((u16FullFtw & 3) != 3 /*empty*/)
7452 u8Ftw |= RT_BIT(i);
7453 u16FullFtw >>= 2;
7454 }
7455
7456 return u8Ftw;
7457}
7458
7459/** @} */
7460
7461
7462/** @name Memory access.
7463 *
7464 * @{
7465 */
7466
7467
7468/**
7469 * Updates the IEMCPU::cbWritten counter if applicable.
7470 *
7471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7472 * @param fAccess The access being accounted for.
7473 * @param cbMem The access size.
7474 */
7475DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7476{
7477 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7478 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7479 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7480}
7481
7482
7483/**
7484 * Checks if the given segment can be written to, raise the appropriate
7485 * exception if not.
7486 *
7487 * @returns VBox strict status code.
7488 *
7489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7490 * @param pHid Pointer to the hidden register.
7491 * @param iSegReg The register number.
7492 * @param pu64BaseAddr Where to return the base address to use for the
7493 * segment. (In 64-bit code it may differ from the
7494 * base in the hidden segment.)
7495 */
7496IEM_STATIC VBOXSTRICTRC
7497iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7498{
7499 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7500 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7501 else
7502 {
7503 if (!pHid->Attr.n.u1Present)
7504 {
7505 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7506 AssertRelease(uSel == 0);
7507 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7508 return iemRaiseGeneralProtectionFault0(pVCpu);
7509 }
7510
7511 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7512 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7513 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7514 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7515 *pu64BaseAddr = pHid->u64Base;
7516 }
7517 return VINF_SUCCESS;
7518}
7519
7520
7521/**
7522 * Checks if the given segment can be read from, raise the appropriate
7523 * exception if not.
7524 *
7525 * @returns VBox strict status code.
7526 *
7527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7528 * @param pHid Pointer to the hidden register.
7529 * @param iSegReg The register number.
7530 * @param pu64BaseAddr Where to return the base address to use for the
7531 * segment. (In 64-bit code it may differ from the
7532 * base in the hidden segment.)
7533 */
7534IEM_STATIC VBOXSTRICTRC
7535iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7536{
7537 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7538 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7539 else
7540 {
7541 if (!pHid->Attr.n.u1Present)
7542 {
7543 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7544 AssertRelease(uSel == 0);
7545 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7546 return iemRaiseGeneralProtectionFault0(pVCpu);
7547 }
7548
7549 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7550 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7551 *pu64BaseAddr = pHid->u64Base;
7552 }
7553 return VINF_SUCCESS;
7554}
7555
7556
7557/**
7558 * Applies the segment limit, base and attributes.
7559 *
7560 * This may raise a \#GP or \#SS.
7561 *
7562 * @returns VBox strict status code.
7563 *
7564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7565 * @param fAccess The kind of access which is being performed.
7566 * @param iSegReg The index of the segment register to apply.
7567 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7568 * TSS, ++).
7569 * @param cbMem The access size.
7570 * @param pGCPtrMem Pointer to the guest memory address to apply
7571 * segmentation to. Input and output parameter.
7572 */
7573IEM_STATIC VBOXSTRICTRC
7574iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7575{
7576 if (iSegReg == UINT8_MAX)
7577 return VINF_SUCCESS;
7578
7579 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7580 switch (pVCpu->iem.s.enmCpuMode)
7581 {
7582 case IEMMODE_16BIT:
7583 case IEMMODE_32BIT:
7584 {
7585 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7586 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7587
7588 if ( pSel->Attr.n.u1Present
7589 && !pSel->Attr.n.u1Unusable)
7590 {
7591 Assert(pSel->Attr.n.u1DescType);
7592 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7593 {
7594 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7595 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7596 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7597
7598 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7599 {
7600 /** @todo CPL check. */
7601 }
7602
7603 /*
7604 * There are two kinds of data selectors, normal and expand down.
7605 */
7606 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7607 {
7608 if ( GCPtrFirst32 > pSel->u32Limit
7609 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7610 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7611 }
7612 else
7613 {
7614 /*
7615 * The upper boundary is defined by the B bit, not the G bit!
7616 */
7617 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7618 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7619 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7620 }
7621 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7622 }
7623 else
7624 {
7625
7626 /*
7627 * Code selector and usually be used to read thru, writing is
7628 * only permitted in real and V8086 mode.
7629 */
7630 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7631 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7632 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7633 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7634 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7635
7636 if ( GCPtrFirst32 > pSel->u32Limit
7637 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7638 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7639
7640 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7641 {
7642 /** @todo CPL check. */
7643 }
7644
7645 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7646 }
7647 }
7648 else
7649 return iemRaiseGeneralProtectionFault0(pVCpu);
7650 return VINF_SUCCESS;
7651 }
7652
7653 case IEMMODE_64BIT:
7654 {
7655 RTGCPTR GCPtrMem = *pGCPtrMem;
7656 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7657 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7658
7659 Assert(cbMem >= 1);
7660 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7661 return VINF_SUCCESS;
7662 return iemRaiseGeneralProtectionFault0(pVCpu);
7663 }
7664
7665 default:
7666 AssertFailedReturn(VERR_IEM_IPE_7);
7667 }
7668}
7669
7670
7671/**
7672 * Translates a virtual address to a physical physical address and checks if we
7673 * can access the page as specified.
7674 *
7675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7676 * @param GCPtrMem The virtual address.
7677 * @param fAccess The intended access.
7678 * @param pGCPhysMem Where to return the physical address.
7679 */
7680IEM_STATIC VBOXSTRICTRC
7681iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7682{
7683 /** @todo Need a different PGM interface here. We're currently using
7684 * generic / REM interfaces. this won't cut it for R0 & RC. */
7685 RTGCPHYS GCPhys;
7686 uint64_t fFlags;
7687 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7688 if (RT_FAILURE(rc))
7689 {
7690 /** @todo Check unassigned memory in unpaged mode. */
7691 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7692 *pGCPhysMem = NIL_RTGCPHYS;
7693 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7694 }
7695
7696 /* If the page is writable and does not have the no-exec bit set, all
7697 access is allowed. Otherwise we'll have to check more carefully... */
7698 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7699 {
7700 /* Write to read only memory? */
7701 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7702 && !(fFlags & X86_PTE_RW)
7703 && ( (pVCpu->iem.s.uCpl == 3
7704 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7705 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7706 {
7707 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7708 *pGCPhysMem = NIL_RTGCPHYS;
7709 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7710 }
7711
7712 /* Kernel memory accessed by userland? */
7713 if ( !(fFlags & X86_PTE_US)
7714 && pVCpu->iem.s.uCpl == 3
7715 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7716 {
7717 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7718 *pGCPhysMem = NIL_RTGCPHYS;
7719 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7720 }
7721
7722 /* Executing non-executable memory? */
7723 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7724 && (fFlags & X86_PTE_PAE_NX)
7725 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7726 {
7727 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7728 *pGCPhysMem = NIL_RTGCPHYS;
7729 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7730 VERR_ACCESS_DENIED);
7731 }
7732 }
7733
7734 /*
7735 * Set the dirty / access flags.
7736 * ASSUMES this is set when the address is translated rather than on committ...
7737 */
7738 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7739 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7740 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7741 {
7742 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7743 AssertRC(rc2);
7744 }
7745
7746 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7747 *pGCPhysMem = GCPhys;
7748 return VINF_SUCCESS;
7749}
7750
7751
7752
7753/**
7754 * Maps a physical page.
7755 *
7756 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7758 * @param GCPhysMem The physical address.
7759 * @param fAccess The intended access.
7760 * @param ppvMem Where to return the mapping address.
7761 * @param pLock The PGM lock.
7762 */
7763IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7764{
7765#ifdef IEM_VERIFICATION_MODE_FULL
7766 /* Force the alternative path so we can ignore writes. */
7767 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7768 {
7769 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7770 {
7771 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7772 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7773 if (RT_FAILURE(rc2))
7774 pVCpu->iem.s.fProblematicMemory = true;
7775 }
7776 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7777 }
7778#endif
7779#ifdef IEM_LOG_MEMORY_WRITES
7780 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7781 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7782#endif
7783#ifdef IEM_VERIFICATION_MODE_MINIMAL
7784 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7785#endif
7786
7787 /** @todo This API may require some improving later. A private deal with PGM
7788 * regarding locking and unlocking needs to be struct. A couple of TLBs
7789 * living in PGM, but with publicly accessible inlined access methods
7790 * could perhaps be an even better solution. */
7791 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7792 GCPhysMem,
7793 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7794 pVCpu->iem.s.fBypassHandlers,
7795 ppvMem,
7796 pLock);
7797 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7798 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7799
7800#ifdef IEM_VERIFICATION_MODE_FULL
7801 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7802 pVCpu->iem.s.fProblematicMemory = true;
7803#endif
7804 return rc;
7805}
7806
7807
7808/**
7809 * Unmap a page previously mapped by iemMemPageMap.
7810 *
7811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7812 * @param GCPhysMem The physical address.
7813 * @param fAccess The intended access.
7814 * @param pvMem What iemMemPageMap returned.
7815 * @param pLock The PGM lock.
7816 */
7817DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7818{
7819 NOREF(pVCpu);
7820 NOREF(GCPhysMem);
7821 NOREF(fAccess);
7822 NOREF(pvMem);
7823 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7824}
7825
7826
7827/**
7828 * Looks up a memory mapping entry.
7829 *
7830 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7832 * @param pvMem The memory address.
7833 * @param fAccess The access to.
7834 */
7835DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7836{
7837 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7838 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7839 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7840 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7841 return 0;
7842 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7843 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7844 return 1;
7845 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7846 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7847 return 2;
7848 return VERR_NOT_FOUND;
7849}
7850
7851
7852/**
7853 * Finds a free memmap entry when using iNextMapping doesn't work.
7854 *
7855 * @returns Memory mapping index, 1024 on failure.
7856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7857 */
7858IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7859{
7860 /*
7861 * The easy case.
7862 */
7863 if (pVCpu->iem.s.cActiveMappings == 0)
7864 {
7865 pVCpu->iem.s.iNextMapping = 1;
7866 return 0;
7867 }
7868
7869 /* There should be enough mappings for all instructions. */
7870 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7871
7872 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7873 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7874 return i;
7875
7876 AssertFailedReturn(1024);
7877}
7878
7879
7880/**
7881 * Commits a bounce buffer that needs writing back and unmaps it.
7882 *
7883 * @returns Strict VBox status code.
7884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7885 * @param iMemMap The index of the buffer to commit.
7886 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7887 * Always false in ring-3, obviously.
7888 */
7889IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7890{
7891 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7892 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7893#ifdef IN_RING3
7894 Assert(!fPostponeFail);
7895 RT_NOREF_PV(fPostponeFail);
7896#endif
7897
7898 /*
7899 * Do the writing.
7900 */
7901#ifndef IEM_VERIFICATION_MODE_MINIMAL
7902 PVM pVM = pVCpu->CTX_SUFF(pVM);
7903 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7904 && !IEM_VERIFICATION_ENABLED(pVCpu))
7905 {
7906 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7907 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7908 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7909 if (!pVCpu->iem.s.fBypassHandlers)
7910 {
7911 /*
7912 * Carefully and efficiently dealing with access handler return
7913 * codes make this a little bloated.
7914 */
7915 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7916 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7917 pbBuf,
7918 cbFirst,
7919 PGMACCESSORIGIN_IEM);
7920 if (rcStrict == VINF_SUCCESS)
7921 {
7922 if (cbSecond)
7923 {
7924 rcStrict = PGMPhysWrite(pVM,
7925 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7926 pbBuf + cbFirst,
7927 cbSecond,
7928 PGMACCESSORIGIN_IEM);
7929 if (rcStrict == VINF_SUCCESS)
7930 { /* nothing */ }
7931 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7932 {
7933 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7934 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7935 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7936 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7937 }
7938# ifndef IN_RING3
7939 else if (fPostponeFail)
7940 {
7941 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7942 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7943 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7944 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7945 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7946 return iemSetPassUpStatus(pVCpu, rcStrict);
7947 }
7948# endif
7949 else
7950 {
7951 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7952 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7953 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7954 return rcStrict;
7955 }
7956 }
7957 }
7958 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7959 {
7960 if (!cbSecond)
7961 {
7962 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7963 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7964 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7965 }
7966 else
7967 {
7968 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7969 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7970 pbBuf + cbFirst,
7971 cbSecond,
7972 PGMACCESSORIGIN_IEM);
7973 if (rcStrict2 == VINF_SUCCESS)
7974 {
7975 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7976 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7977 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7978 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7979 }
7980 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7981 {
7982 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7983 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7984 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7985 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7986 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7987 }
7988# ifndef IN_RING3
7989 else if (fPostponeFail)
7990 {
7991 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7992 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7993 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7994 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7995 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7996 return iemSetPassUpStatus(pVCpu, rcStrict);
7997 }
7998# endif
7999 else
8000 {
8001 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8002 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8003 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8004 return rcStrict2;
8005 }
8006 }
8007 }
8008# ifndef IN_RING3
8009 else if (fPostponeFail)
8010 {
8011 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8012 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8013 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8014 if (!cbSecond)
8015 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8016 else
8017 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8018 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8019 return iemSetPassUpStatus(pVCpu, rcStrict);
8020 }
8021# endif
8022 else
8023 {
8024 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8025 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8026 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8027 return rcStrict;
8028 }
8029 }
8030 else
8031 {
8032 /*
8033 * No access handlers, much simpler.
8034 */
8035 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8036 if (RT_SUCCESS(rc))
8037 {
8038 if (cbSecond)
8039 {
8040 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8041 if (RT_SUCCESS(rc))
8042 { /* likely */ }
8043 else
8044 {
8045 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8046 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8047 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8048 return rc;
8049 }
8050 }
8051 }
8052 else
8053 {
8054 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8055 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8056 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8057 return rc;
8058 }
8059 }
8060 }
8061#endif
8062
8063#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8064 /*
8065 * Record the write(s).
8066 */
8067 if (!pVCpu->iem.s.fNoRem)
8068 {
8069 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8070 if (pEvtRec)
8071 {
8072 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8073 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8074 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8075 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8076 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8077 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8078 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8079 }
8080 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8081 {
8082 pEvtRec = iemVerifyAllocRecord(pVCpu);
8083 if (pEvtRec)
8084 {
8085 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8086 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8087 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8088 memcpy(pEvtRec->u.RamWrite.ab,
8089 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8090 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8091 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8092 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8093 }
8094 }
8095 }
8096#endif
8097#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8098 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8099 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8100 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8101 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8102 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8103 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8104
8105 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8106 g_cbIemWrote = cbWrote;
8107 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8108#endif
8109
8110 /*
8111 * Free the mapping entry.
8112 */
8113 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8114 Assert(pVCpu->iem.s.cActiveMappings != 0);
8115 pVCpu->iem.s.cActiveMappings--;
8116 return VINF_SUCCESS;
8117}
8118
8119
8120/**
8121 * iemMemMap worker that deals with a request crossing pages.
8122 */
8123IEM_STATIC VBOXSTRICTRC
8124iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8125{
8126 /*
8127 * Do the address translations.
8128 */
8129 RTGCPHYS GCPhysFirst;
8130 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8131 if (rcStrict != VINF_SUCCESS)
8132 return rcStrict;
8133
8134 RTGCPHYS GCPhysSecond;
8135 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8136 fAccess, &GCPhysSecond);
8137 if (rcStrict != VINF_SUCCESS)
8138 return rcStrict;
8139 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8140
8141 PVM pVM = pVCpu->CTX_SUFF(pVM);
8142#ifdef IEM_VERIFICATION_MODE_FULL
8143 /*
8144 * Detect problematic memory when verifying so we can select
8145 * the right execution engine. (TLB: Redo this.)
8146 */
8147 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8148 {
8149 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8150 if (RT_SUCCESS(rc2))
8151 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8152 if (RT_FAILURE(rc2))
8153 pVCpu->iem.s.fProblematicMemory = true;
8154 }
8155#endif
8156
8157
8158 /*
8159 * Read in the current memory content if it's a read, execute or partial
8160 * write access.
8161 */
8162 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8163 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8164 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8165
8166 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8167 {
8168 if (!pVCpu->iem.s.fBypassHandlers)
8169 {
8170 /*
8171 * Must carefully deal with access handler status codes here,
8172 * makes the code a bit bloated.
8173 */
8174 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8175 if (rcStrict == VINF_SUCCESS)
8176 {
8177 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8178 if (rcStrict == VINF_SUCCESS)
8179 { /*likely */ }
8180 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8181 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8182 else
8183 {
8184 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8185 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8186 return rcStrict;
8187 }
8188 }
8189 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8190 {
8191 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8192 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8193 {
8194 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8195 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8196 }
8197 else
8198 {
8199 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8200 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8201 return rcStrict2;
8202 }
8203 }
8204 else
8205 {
8206 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8207 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8208 return rcStrict;
8209 }
8210 }
8211 else
8212 {
8213 /*
8214 * No informational status codes here, much more straight forward.
8215 */
8216 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8217 if (RT_SUCCESS(rc))
8218 {
8219 Assert(rc == VINF_SUCCESS);
8220 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8221 if (RT_SUCCESS(rc))
8222 Assert(rc == VINF_SUCCESS);
8223 else
8224 {
8225 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8226 return rc;
8227 }
8228 }
8229 else
8230 {
8231 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8232 return rc;
8233 }
8234 }
8235
8236#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8237 if ( !pVCpu->iem.s.fNoRem
8238 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8239 {
8240 /*
8241 * Record the reads.
8242 */
8243 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8244 if (pEvtRec)
8245 {
8246 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8247 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8248 pEvtRec->u.RamRead.cb = cbFirstPage;
8249 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8250 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8251 }
8252 pEvtRec = iemVerifyAllocRecord(pVCpu);
8253 if (pEvtRec)
8254 {
8255 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8256 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8257 pEvtRec->u.RamRead.cb = cbSecondPage;
8258 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8259 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8260 }
8261 }
8262#endif
8263 }
8264#ifdef VBOX_STRICT
8265 else
8266 memset(pbBuf, 0xcc, cbMem);
8267 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8268 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8269#endif
8270
8271 /*
8272 * Commit the bounce buffer entry.
8273 */
8274 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8275 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8276 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8277 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8278 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8279 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8280 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8281 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8282 pVCpu->iem.s.cActiveMappings++;
8283
8284 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8285 *ppvMem = pbBuf;
8286 return VINF_SUCCESS;
8287}
8288
8289
8290/**
8291 * iemMemMap woker that deals with iemMemPageMap failures.
8292 */
8293IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8294 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8295{
8296 /*
8297 * Filter out conditions we can handle and the ones which shouldn't happen.
8298 */
8299 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8300 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8301 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8302 {
8303 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8304 return rcMap;
8305 }
8306 pVCpu->iem.s.cPotentialExits++;
8307
8308 /*
8309 * Read in the current memory content if it's a read, execute or partial
8310 * write access.
8311 */
8312 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8313 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8314 {
8315 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8316 memset(pbBuf, 0xff, cbMem);
8317 else
8318 {
8319 int rc;
8320 if (!pVCpu->iem.s.fBypassHandlers)
8321 {
8322 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8323 if (rcStrict == VINF_SUCCESS)
8324 { /* nothing */ }
8325 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8326 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8327 else
8328 {
8329 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8330 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8331 return rcStrict;
8332 }
8333 }
8334 else
8335 {
8336 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8337 if (RT_SUCCESS(rc))
8338 { /* likely */ }
8339 else
8340 {
8341 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8342 GCPhysFirst, rc));
8343 return rc;
8344 }
8345 }
8346 }
8347
8348#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8349 if ( !pVCpu->iem.s.fNoRem
8350 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8351 {
8352 /*
8353 * Record the read.
8354 */
8355 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8356 if (pEvtRec)
8357 {
8358 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8359 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8360 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8361 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8362 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8363 }
8364 }
8365#endif
8366 }
8367#ifdef VBOX_STRICT
8368 else
8369 memset(pbBuf, 0xcc, cbMem);
8370#endif
8371#ifdef VBOX_STRICT
8372 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8373 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8374#endif
8375
8376 /*
8377 * Commit the bounce buffer entry.
8378 */
8379 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8380 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8381 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8382 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8383 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8384 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8385 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8386 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8387 pVCpu->iem.s.cActiveMappings++;
8388
8389 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8390 *ppvMem = pbBuf;
8391 return VINF_SUCCESS;
8392}
8393
8394
8395
8396/**
8397 * Maps the specified guest memory for the given kind of access.
8398 *
8399 * This may be using bounce buffering of the memory if it's crossing a page
8400 * boundary or if there is an access handler installed for any of it. Because
8401 * of lock prefix guarantees, we're in for some extra clutter when this
8402 * happens.
8403 *
8404 * This may raise a \#GP, \#SS, \#PF or \#AC.
8405 *
8406 * @returns VBox strict status code.
8407 *
8408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8409 * @param ppvMem Where to return the pointer to the mapped
8410 * memory.
8411 * @param cbMem The number of bytes to map. This is usually 1,
8412 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8413 * string operations it can be up to a page.
8414 * @param iSegReg The index of the segment register to use for
8415 * this access. The base and limits are checked.
8416 * Use UINT8_MAX to indicate that no segmentation
8417 * is required (for IDT, GDT and LDT accesses).
8418 * @param GCPtrMem The address of the guest memory.
8419 * @param fAccess How the memory is being accessed. The
8420 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8421 * how to map the memory, while the
8422 * IEM_ACCESS_WHAT_XXX bit is used when raising
8423 * exceptions.
8424 */
8425IEM_STATIC VBOXSTRICTRC
8426iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8427{
8428 /*
8429 * Check the input and figure out which mapping entry to use.
8430 */
8431 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8432 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8433 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8434
8435 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8436 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8437 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8438 {
8439 iMemMap = iemMemMapFindFree(pVCpu);
8440 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8441 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8442 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8443 pVCpu->iem.s.aMemMappings[2].fAccess),
8444 VERR_IEM_IPE_9);
8445 }
8446
8447 /*
8448 * Map the memory, checking that we can actually access it. If something
8449 * slightly complicated happens, fall back on bounce buffering.
8450 */
8451 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8452 if (rcStrict != VINF_SUCCESS)
8453 return rcStrict;
8454
8455 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8456 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8457
8458 RTGCPHYS GCPhysFirst;
8459 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8460 if (rcStrict != VINF_SUCCESS)
8461 return rcStrict;
8462
8463 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8464 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8465 if (fAccess & IEM_ACCESS_TYPE_READ)
8466 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8467
8468 void *pvMem;
8469 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8470 if (rcStrict != VINF_SUCCESS)
8471 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8472
8473 /*
8474 * Fill in the mapping table entry.
8475 */
8476 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8477 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8478 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8479 pVCpu->iem.s.cActiveMappings++;
8480
8481 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8482 *ppvMem = pvMem;
8483 return VINF_SUCCESS;
8484}
8485
8486
8487/**
8488 * Commits the guest memory if bounce buffered and unmaps it.
8489 *
8490 * @returns Strict VBox status code.
8491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8492 * @param pvMem The mapping.
8493 * @param fAccess The kind of access.
8494 */
8495IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8496{
8497 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8498 AssertReturn(iMemMap >= 0, iMemMap);
8499
8500 /* If it's bounce buffered, we may need to write back the buffer. */
8501 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8502 {
8503 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8504 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8505 }
8506 /* Otherwise unlock it. */
8507 else
8508 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8509
8510 /* Free the entry. */
8511 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8512 Assert(pVCpu->iem.s.cActiveMappings != 0);
8513 pVCpu->iem.s.cActiveMappings--;
8514 return VINF_SUCCESS;
8515}
8516
8517#ifdef IEM_WITH_SETJMP
8518
8519/**
8520 * Maps the specified guest memory for the given kind of access, longjmp on
8521 * error.
8522 *
8523 * This may be using bounce buffering of the memory if it's crossing a page
8524 * boundary or if there is an access handler installed for any of it. Because
8525 * of lock prefix guarantees, we're in for some extra clutter when this
8526 * happens.
8527 *
8528 * This may raise a \#GP, \#SS, \#PF or \#AC.
8529 *
8530 * @returns Pointer to the mapped memory.
8531 *
8532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8533 * @param cbMem The number of bytes to map. This is usually 1,
8534 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8535 * string operations it can be up to a page.
8536 * @param iSegReg The index of the segment register to use for
8537 * this access. The base and limits are checked.
8538 * Use UINT8_MAX to indicate that no segmentation
8539 * is required (for IDT, GDT and LDT accesses).
8540 * @param GCPtrMem The address of the guest memory.
8541 * @param fAccess How the memory is being accessed. The
8542 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8543 * how to map the memory, while the
8544 * IEM_ACCESS_WHAT_XXX bit is used when raising
8545 * exceptions.
8546 */
8547IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8548{
8549 /*
8550 * Check the input and figure out which mapping entry to use.
8551 */
8552 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8553 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8554 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8555
8556 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8557 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8558 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8559 {
8560 iMemMap = iemMemMapFindFree(pVCpu);
8561 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8562 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8563 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8564 pVCpu->iem.s.aMemMappings[2].fAccess),
8565 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8566 }
8567
8568 /*
8569 * Map the memory, checking that we can actually access it. If something
8570 * slightly complicated happens, fall back on bounce buffering.
8571 */
8572 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8573 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8574 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8575
8576 /* Crossing a page boundary? */
8577 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8578 { /* No (likely). */ }
8579 else
8580 {
8581 void *pvMem;
8582 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8583 if (rcStrict == VINF_SUCCESS)
8584 return pvMem;
8585 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8586 }
8587
8588 RTGCPHYS GCPhysFirst;
8589 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8590 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8591 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8592
8593 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8594 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8595 if (fAccess & IEM_ACCESS_TYPE_READ)
8596 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8597
8598 void *pvMem;
8599 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8600 if (rcStrict == VINF_SUCCESS)
8601 { /* likely */ }
8602 else
8603 {
8604 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8605 if (rcStrict == VINF_SUCCESS)
8606 return pvMem;
8607 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8608 }
8609
8610 /*
8611 * Fill in the mapping table entry.
8612 */
8613 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8614 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8615 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8616 pVCpu->iem.s.cActiveMappings++;
8617
8618 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8619 return pvMem;
8620}
8621
8622
8623/**
8624 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8625 *
8626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8627 * @param pvMem The mapping.
8628 * @param fAccess The kind of access.
8629 */
8630IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8631{
8632 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8633 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8634
8635 /* If it's bounce buffered, we may need to write back the buffer. */
8636 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8637 {
8638 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8639 {
8640 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8641 if (rcStrict == VINF_SUCCESS)
8642 return;
8643 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8644 }
8645 }
8646 /* Otherwise unlock it. */
8647 else
8648 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8649
8650 /* Free the entry. */
8651 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8652 Assert(pVCpu->iem.s.cActiveMappings != 0);
8653 pVCpu->iem.s.cActiveMappings--;
8654}
8655
8656#endif
8657
8658#ifndef IN_RING3
8659/**
8660 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8661 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8662 *
8663 * Allows the instruction to be completed and retired, while the IEM user will
8664 * return to ring-3 immediately afterwards and do the postponed writes there.
8665 *
8666 * @returns VBox status code (no strict statuses). Caller must check
8667 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8669 * @param pvMem The mapping.
8670 * @param fAccess The kind of access.
8671 */
8672IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8673{
8674 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8675 AssertReturn(iMemMap >= 0, iMemMap);
8676
8677 /* If it's bounce buffered, we may need to write back the buffer. */
8678 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8679 {
8680 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8681 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8682 }
8683 /* Otherwise unlock it. */
8684 else
8685 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8686
8687 /* Free the entry. */
8688 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8689 Assert(pVCpu->iem.s.cActiveMappings != 0);
8690 pVCpu->iem.s.cActiveMappings--;
8691 return VINF_SUCCESS;
8692}
8693#endif
8694
8695
8696/**
8697 * Rollbacks mappings, releasing page locks and such.
8698 *
8699 * The caller shall only call this after checking cActiveMappings.
8700 *
8701 * @returns Strict VBox status code to pass up.
8702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8703 */
8704IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8705{
8706 Assert(pVCpu->iem.s.cActiveMappings > 0);
8707
8708 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8709 while (iMemMap-- > 0)
8710 {
8711 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8712 if (fAccess != IEM_ACCESS_INVALID)
8713 {
8714 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8715 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8716 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8717 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8718 Assert(pVCpu->iem.s.cActiveMappings > 0);
8719 pVCpu->iem.s.cActiveMappings--;
8720 }
8721 }
8722}
8723
8724
8725/**
8726 * Fetches a data byte.
8727 *
8728 * @returns Strict VBox status code.
8729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8730 * @param pu8Dst Where to return the byte.
8731 * @param iSegReg The index of the segment register to use for
8732 * this access. The base and limits are checked.
8733 * @param GCPtrMem The address of the guest memory.
8734 */
8735IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8736{
8737 /* The lazy approach for now... */
8738 uint8_t const *pu8Src;
8739 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8740 if (rc == VINF_SUCCESS)
8741 {
8742 *pu8Dst = *pu8Src;
8743 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8744 }
8745 return rc;
8746}
8747
8748
8749#ifdef IEM_WITH_SETJMP
8750/**
8751 * Fetches a data byte, longjmp on error.
8752 *
8753 * @returns The byte.
8754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8755 * @param iSegReg The index of the segment register to use for
8756 * this access. The base and limits are checked.
8757 * @param GCPtrMem The address of the guest memory.
8758 */
8759DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8760{
8761 /* The lazy approach for now... */
8762 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8763 uint8_t const bRet = *pu8Src;
8764 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8765 return bRet;
8766}
8767#endif /* IEM_WITH_SETJMP */
8768
8769
8770/**
8771 * Fetches a data word.
8772 *
8773 * @returns Strict VBox status code.
8774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8775 * @param pu16Dst Where to return the word.
8776 * @param iSegReg The index of the segment register to use for
8777 * this access. The base and limits are checked.
8778 * @param GCPtrMem The address of the guest memory.
8779 */
8780IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8781{
8782 /* The lazy approach for now... */
8783 uint16_t const *pu16Src;
8784 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8785 if (rc == VINF_SUCCESS)
8786 {
8787 *pu16Dst = *pu16Src;
8788 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8789 }
8790 return rc;
8791}
8792
8793
8794#ifdef IEM_WITH_SETJMP
8795/**
8796 * Fetches a data word, longjmp on error.
8797 *
8798 * @returns The word
8799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8800 * @param iSegReg The index of the segment register to use for
8801 * this access. The base and limits are checked.
8802 * @param GCPtrMem The address of the guest memory.
8803 */
8804DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8805{
8806 /* The lazy approach for now... */
8807 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8808 uint16_t const u16Ret = *pu16Src;
8809 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8810 return u16Ret;
8811}
8812#endif
8813
8814
8815/**
8816 * Fetches a data dword.
8817 *
8818 * @returns Strict VBox status code.
8819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8820 * @param pu32Dst Where to return the dword.
8821 * @param iSegReg The index of the segment register to use for
8822 * this access. The base and limits are checked.
8823 * @param GCPtrMem The address of the guest memory.
8824 */
8825IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8826{
8827 /* The lazy approach for now... */
8828 uint32_t const *pu32Src;
8829 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8830 if (rc == VINF_SUCCESS)
8831 {
8832 *pu32Dst = *pu32Src;
8833 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8834 }
8835 return rc;
8836}
8837
8838
8839#ifdef IEM_WITH_SETJMP
8840
8841IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8842{
8843 Assert(cbMem >= 1);
8844 Assert(iSegReg < X86_SREG_COUNT);
8845
8846 /*
8847 * 64-bit mode is simpler.
8848 */
8849 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8850 {
8851 if (iSegReg >= X86_SREG_FS)
8852 {
8853 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8854 GCPtrMem += pSel->u64Base;
8855 }
8856
8857 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8858 return GCPtrMem;
8859 }
8860 /*
8861 * 16-bit and 32-bit segmentation.
8862 */
8863 else
8864 {
8865 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8866 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8867 == X86DESCATTR_P /* data, expand up */
8868 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8869 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8870 {
8871 /* expand up */
8872 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8873 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8874 && GCPtrLast32 > (uint32_t)GCPtrMem))
8875 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8876 }
8877 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8878 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8879 {
8880 /* expand down */
8881 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8882 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8883 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8884 && GCPtrLast32 > (uint32_t)GCPtrMem))
8885 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8886 }
8887 else
8888 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8889 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8890 }
8891 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8892}
8893
8894
8895IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8896{
8897 Assert(cbMem >= 1);
8898 Assert(iSegReg < X86_SREG_COUNT);
8899
8900 /*
8901 * 64-bit mode is simpler.
8902 */
8903 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8904 {
8905 if (iSegReg >= X86_SREG_FS)
8906 {
8907 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8908 GCPtrMem += pSel->u64Base;
8909 }
8910
8911 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8912 return GCPtrMem;
8913 }
8914 /*
8915 * 16-bit and 32-bit segmentation.
8916 */
8917 else
8918 {
8919 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8920 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8921 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8922 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8923 {
8924 /* expand up */
8925 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8926 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8927 && GCPtrLast32 > (uint32_t)GCPtrMem))
8928 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8929 }
8930 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8931 {
8932 /* expand down */
8933 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8934 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8935 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8936 && GCPtrLast32 > (uint32_t)GCPtrMem))
8937 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8938 }
8939 else
8940 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8941 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8942 }
8943 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8944}
8945
8946
8947/**
8948 * Fetches a data dword, longjmp on error, fallback/safe version.
8949 *
8950 * @returns The dword
8951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8952 * @param iSegReg The index of the segment register to use for
8953 * this access. The base and limits are checked.
8954 * @param GCPtrMem The address of the guest memory.
8955 */
8956IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8957{
8958 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8959 uint32_t const u32Ret = *pu32Src;
8960 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8961 return u32Ret;
8962}
8963
8964
8965/**
8966 * Fetches a data dword, longjmp on error.
8967 *
8968 * @returns The dword
8969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8970 * @param iSegReg The index of the segment register to use for
8971 * this access. The base and limits are checked.
8972 * @param GCPtrMem The address of the guest memory.
8973 */
8974DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8975{
8976# ifdef IEM_WITH_DATA_TLB
8977 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8978 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8979 {
8980 /// @todo more later.
8981 }
8982
8983 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8984# else
8985 /* The lazy approach. */
8986 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8987 uint32_t const u32Ret = *pu32Src;
8988 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8989 return u32Ret;
8990# endif
8991}
8992#endif
8993
8994
8995#ifdef SOME_UNUSED_FUNCTION
8996/**
8997 * Fetches a data dword and sign extends it to a qword.
8998 *
8999 * @returns Strict VBox status code.
9000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9001 * @param pu64Dst Where to return the sign extended value.
9002 * @param iSegReg The index of the segment register to use for
9003 * this access. The base and limits are checked.
9004 * @param GCPtrMem The address of the guest memory.
9005 */
9006IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9007{
9008 /* The lazy approach for now... */
9009 int32_t const *pi32Src;
9010 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9011 if (rc == VINF_SUCCESS)
9012 {
9013 *pu64Dst = *pi32Src;
9014 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9015 }
9016#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9017 else
9018 *pu64Dst = 0;
9019#endif
9020 return rc;
9021}
9022#endif
9023
9024
9025/**
9026 * Fetches a data qword.
9027 *
9028 * @returns Strict VBox status code.
9029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9030 * @param pu64Dst Where to return the qword.
9031 * @param iSegReg The index of the segment register to use for
9032 * this access. The base and limits are checked.
9033 * @param GCPtrMem The address of the guest memory.
9034 */
9035IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9036{
9037 /* The lazy approach for now... */
9038 uint64_t const *pu64Src;
9039 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9040 if (rc == VINF_SUCCESS)
9041 {
9042 *pu64Dst = *pu64Src;
9043 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9044 }
9045 return rc;
9046}
9047
9048
9049#ifdef IEM_WITH_SETJMP
9050/**
9051 * Fetches a data qword, longjmp on error.
9052 *
9053 * @returns The qword.
9054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9055 * @param iSegReg The index of the segment register to use for
9056 * this access. The base and limits are checked.
9057 * @param GCPtrMem The address of the guest memory.
9058 */
9059DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9060{
9061 /* The lazy approach for now... */
9062 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9063 uint64_t const u64Ret = *pu64Src;
9064 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9065 return u64Ret;
9066}
9067#endif
9068
9069
9070/**
9071 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9072 *
9073 * @returns Strict VBox status code.
9074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9075 * @param pu64Dst Where to return the qword.
9076 * @param iSegReg The index of the segment register to use for
9077 * this access. The base and limits are checked.
9078 * @param GCPtrMem The address of the guest memory.
9079 */
9080IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9081{
9082 /* The lazy approach for now... */
9083 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9084 if (RT_UNLIKELY(GCPtrMem & 15))
9085 return iemRaiseGeneralProtectionFault0(pVCpu);
9086
9087 uint64_t const *pu64Src;
9088 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9089 if (rc == VINF_SUCCESS)
9090 {
9091 *pu64Dst = *pu64Src;
9092 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9093 }
9094 return rc;
9095}
9096
9097
9098#ifdef IEM_WITH_SETJMP
9099/**
9100 * Fetches a data qword, longjmp on error.
9101 *
9102 * @returns The qword.
9103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9104 * @param iSegReg The index of the segment register to use for
9105 * this access. The base and limits are checked.
9106 * @param GCPtrMem The address of the guest memory.
9107 */
9108DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9109{
9110 /* The lazy approach for now... */
9111 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9112 if (RT_LIKELY(!(GCPtrMem & 15)))
9113 {
9114 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9115 uint64_t const u64Ret = *pu64Src;
9116 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9117 return u64Ret;
9118 }
9119
9120 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9121 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9122}
9123#endif
9124
9125
9126/**
9127 * Fetches a data tword.
9128 *
9129 * @returns Strict VBox status code.
9130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9131 * @param pr80Dst Where to return the tword.
9132 * @param iSegReg The index of the segment register to use for
9133 * this access. The base and limits are checked.
9134 * @param GCPtrMem The address of the guest memory.
9135 */
9136IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9137{
9138 /* The lazy approach for now... */
9139 PCRTFLOAT80U pr80Src;
9140 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9141 if (rc == VINF_SUCCESS)
9142 {
9143 *pr80Dst = *pr80Src;
9144 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9145 }
9146 return rc;
9147}
9148
9149
9150#ifdef IEM_WITH_SETJMP
9151/**
9152 * Fetches a data tword, longjmp on error.
9153 *
9154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9155 * @param pr80Dst Where to return the tword.
9156 * @param iSegReg The index of the segment register to use for
9157 * this access. The base and limits are checked.
9158 * @param GCPtrMem The address of the guest memory.
9159 */
9160DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9161{
9162 /* The lazy approach for now... */
9163 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9164 *pr80Dst = *pr80Src;
9165 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9166}
9167#endif
9168
9169
9170/**
9171 * Fetches a data dqword (double qword), generally SSE related.
9172 *
9173 * @returns Strict VBox status code.
9174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9175 * @param pu128Dst Where to return the qword.
9176 * @param iSegReg The index of the segment register to use for
9177 * this access. The base and limits are checked.
9178 * @param GCPtrMem The address of the guest memory.
9179 */
9180IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9181{
9182 /* The lazy approach for now... */
9183 uint128_t const *pu128Src;
9184 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9185 if (rc == VINF_SUCCESS)
9186 {
9187 *pu128Dst = *pu128Src;
9188 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9189 }
9190 return rc;
9191}
9192
9193
9194#ifdef IEM_WITH_SETJMP
9195/**
9196 * Fetches a data dqword (double qword), generally SSE related.
9197 *
9198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9199 * @param pu128Dst Where to return the qword.
9200 * @param iSegReg The index of the segment register to use for
9201 * this access. The base and limits are checked.
9202 * @param GCPtrMem The address of the guest memory.
9203 */
9204IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9205{
9206 /* The lazy approach for now... */
9207 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9208 *pu128Dst = *pu128Src;
9209 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9210}
9211#endif
9212
9213
9214/**
9215 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9216 * related.
9217 *
9218 * Raises \#GP(0) if not aligned.
9219 *
9220 * @returns Strict VBox status code.
9221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9222 * @param pu128Dst Where to return the qword.
9223 * @param iSegReg The index of the segment register to use for
9224 * this access. The base and limits are checked.
9225 * @param GCPtrMem The address of the guest memory.
9226 */
9227IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9228{
9229 /* The lazy approach for now... */
9230 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9231 if ( (GCPtrMem & 15)
9232 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9233 return iemRaiseGeneralProtectionFault0(pVCpu);
9234
9235 uint128_t const *pu128Src;
9236 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9237 if (rc == VINF_SUCCESS)
9238 {
9239 *pu128Dst = *pu128Src;
9240 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9241 }
9242 return rc;
9243}
9244
9245
9246#ifdef IEM_WITH_SETJMP
9247/**
9248 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9249 * related, longjmp on error.
9250 *
9251 * Raises \#GP(0) if not aligned.
9252 *
9253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9254 * @param pu128Dst Where to return the qword.
9255 * @param iSegReg The index of the segment register to use for
9256 * this access. The base and limits are checked.
9257 * @param GCPtrMem The address of the guest memory.
9258 */
9259DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9260{
9261 /* The lazy approach for now... */
9262 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9263 if ( (GCPtrMem & 15) == 0
9264 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9265 {
9266 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9267 IEM_ACCESS_DATA_R);
9268 *pu128Dst = *pu128Src;
9269 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9270 return;
9271 }
9272
9273 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9274 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9275}
9276#endif
9277
9278
9279
9280/**
9281 * Fetches a descriptor register (lgdt, lidt).
9282 *
9283 * @returns Strict VBox status code.
9284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9285 * @param pcbLimit Where to return the limit.
9286 * @param pGCPtrBase Where to return the base.
9287 * @param iSegReg The index of the segment register to use for
9288 * this access. The base and limits are checked.
9289 * @param GCPtrMem The address of the guest memory.
9290 * @param enmOpSize The effective operand size.
9291 */
9292IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9293 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9294{
9295 /*
9296 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9297 * little special:
9298 * - The two reads are done separately.
9299 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9300 * - We suspect the 386 to actually commit the limit before the base in
9301 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9302 * don't try emulate this eccentric behavior, because it's not well
9303 * enough understood and rather hard to trigger.
9304 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9305 */
9306 VBOXSTRICTRC rcStrict;
9307 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9308 {
9309 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9310 if (rcStrict == VINF_SUCCESS)
9311 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9312 }
9313 else
9314 {
9315 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9316 if (enmOpSize == IEMMODE_32BIT)
9317 {
9318 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9319 {
9320 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9321 if (rcStrict == VINF_SUCCESS)
9322 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9323 }
9324 else
9325 {
9326 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9327 if (rcStrict == VINF_SUCCESS)
9328 {
9329 *pcbLimit = (uint16_t)uTmp;
9330 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9331 }
9332 }
9333 if (rcStrict == VINF_SUCCESS)
9334 *pGCPtrBase = uTmp;
9335 }
9336 else
9337 {
9338 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9339 if (rcStrict == VINF_SUCCESS)
9340 {
9341 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9342 if (rcStrict == VINF_SUCCESS)
9343 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9344 }
9345 }
9346 }
9347 return rcStrict;
9348}
9349
9350
9351
9352/**
9353 * Stores a data byte.
9354 *
9355 * @returns Strict VBox status code.
9356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9357 * @param iSegReg The index of the segment register to use for
9358 * this access. The base and limits are checked.
9359 * @param GCPtrMem The address of the guest memory.
9360 * @param u8Value The value to store.
9361 */
9362IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9363{
9364 /* The lazy approach for now... */
9365 uint8_t *pu8Dst;
9366 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9367 if (rc == VINF_SUCCESS)
9368 {
9369 *pu8Dst = u8Value;
9370 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9371 }
9372 return rc;
9373}
9374
9375
9376#ifdef IEM_WITH_SETJMP
9377/**
9378 * Stores a data byte, longjmp on error.
9379 *
9380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9381 * @param iSegReg The index of the segment register to use for
9382 * this access. The base and limits are checked.
9383 * @param GCPtrMem The address of the guest memory.
9384 * @param u8Value The value to store.
9385 */
9386IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9387{
9388 /* The lazy approach for now... */
9389 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9390 *pu8Dst = u8Value;
9391 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9392}
9393#endif
9394
9395
9396/**
9397 * Stores a data word.
9398 *
9399 * @returns Strict VBox status code.
9400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9401 * @param iSegReg The index of the segment register to use for
9402 * this access. The base and limits are checked.
9403 * @param GCPtrMem The address of the guest memory.
9404 * @param u16Value The value to store.
9405 */
9406IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9407{
9408 /* The lazy approach for now... */
9409 uint16_t *pu16Dst;
9410 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9411 if (rc == VINF_SUCCESS)
9412 {
9413 *pu16Dst = u16Value;
9414 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9415 }
9416 return rc;
9417}
9418
9419
9420#ifdef IEM_WITH_SETJMP
9421/**
9422 * Stores a data word, longjmp on error.
9423 *
9424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9425 * @param iSegReg The index of the segment register to use for
9426 * this access. The base and limits are checked.
9427 * @param GCPtrMem The address of the guest memory.
9428 * @param u16Value The value to store.
9429 */
9430IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9431{
9432 /* The lazy approach for now... */
9433 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9434 *pu16Dst = u16Value;
9435 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9436}
9437#endif
9438
9439
9440/**
9441 * Stores a data dword.
9442 *
9443 * @returns Strict VBox status code.
9444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9445 * @param iSegReg The index of the segment register to use for
9446 * this access. The base and limits are checked.
9447 * @param GCPtrMem The address of the guest memory.
9448 * @param u32Value The value to store.
9449 */
9450IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9451{
9452 /* The lazy approach for now... */
9453 uint32_t *pu32Dst;
9454 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9455 if (rc == VINF_SUCCESS)
9456 {
9457 *pu32Dst = u32Value;
9458 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9459 }
9460 return rc;
9461}
9462
9463
9464#ifdef IEM_WITH_SETJMP
9465/**
9466 * Stores a data dword.
9467 *
9468 * @returns Strict VBox status code.
9469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9470 * @param iSegReg The index of the segment register to use for
9471 * this access. The base and limits are checked.
9472 * @param GCPtrMem The address of the guest memory.
9473 * @param u32Value The value to store.
9474 */
9475IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9476{
9477 /* The lazy approach for now... */
9478 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9479 *pu32Dst = u32Value;
9480 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9481}
9482#endif
9483
9484
9485/**
9486 * Stores a data qword.
9487 *
9488 * @returns Strict VBox status code.
9489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9490 * @param iSegReg The index of the segment register to use for
9491 * this access. The base and limits are checked.
9492 * @param GCPtrMem The address of the guest memory.
9493 * @param u64Value The value to store.
9494 */
9495IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9496{
9497 /* The lazy approach for now... */
9498 uint64_t *pu64Dst;
9499 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9500 if (rc == VINF_SUCCESS)
9501 {
9502 *pu64Dst = u64Value;
9503 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9504 }
9505 return rc;
9506}
9507
9508
9509#ifdef IEM_WITH_SETJMP
9510/**
9511 * Stores a data qword, longjmp on error.
9512 *
9513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9514 * @param iSegReg The index of the segment register to use for
9515 * this access. The base and limits are checked.
9516 * @param GCPtrMem The address of the guest memory.
9517 * @param u64Value The value to store.
9518 */
9519IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9520{
9521 /* The lazy approach for now... */
9522 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9523 *pu64Dst = u64Value;
9524 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9525}
9526#endif
9527
9528
9529/**
9530 * Stores a data dqword.
9531 *
9532 * @returns Strict VBox status code.
9533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9534 * @param iSegReg The index of the segment register to use for
9535 * this access. The base and limits are checked.
9536 * @param GCPtrMem The address of the guest memory.
9537 * @param u128Value The value to store.
9538 */
9539IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9540{
9541 /* The lazy approach for now... */
9542 uint128_t *pu128Dst;
9543 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9544 if (rc == VINF_SUCCESS)
9545 {
9546 *pu128Dst = u128Value;
9547 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9548 }
9549 return rc;
9550}
9551
9552
9553#ifdef IEM_WITH_SETJMP
9554/**
9555 * Stores a data dqword, longjmp on error.
9556 *
9557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9558 * @param iSegReg The index of the segment register to use for
9559 * this access. The base and limits are checked.
9560 * @param GCPtrMem The address of the guest memory.
9561 * @param u128Value The value to store.
9562 */
9563IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9564{
9565 /* The lazy approach for now... */
9566 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9567 *pu128Dst = u128Value;
9568 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9569}
9570#endif
9571
9572
9573/**
9574 * Stores a data dqword, SSE aligned.
9575 *
9576 * @returns Strict VBox status code.
9577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9578 * @param iSegReg The index of the segment register to use for
9579 * this access. The base and limits are checked.
9580 * @param GCPtrMem The address of the guest memory.
9581 * @param u128Value The value to store.
9582 */
9583IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9584{
9585 /* The lazy approach for now... */
9586 if ( (GCPtrMem & 15)
9587 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9588 return iemRaiseGeneralProtectionFault0(pVCpu);
9589
9590 uint128_t *pu128Dst;
9591 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9592 if (rc == VINF_SUCCESS)
9593 {
9594 *pu128Dst = u128Value;
9595 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9596 }
9597 return rc;
9598}
9599
9600
9601#ifdef IEM_WITH_SETJMP
9602/**
9603 * Stores a data dqword, SSE aligned.
9604 *
9605 * @returns Strict VBox status code.
9606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9607 * @param iSegReg The index of the segment register to use for
9608 * this access. The base and limits are checked.
9609 * @param GCPtrMem The address of the guest memory.
9610 * @param u128Value The value to store.
9611 */
9612DECL_NO_INLINE(IEM_STATIC, void)
9613iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9614{
9615 /* The lazy approach for now... */
9616 if ( (GCPtrMem & 15) == 0
9617 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9618 {
9619 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9620 *pu128Dst = u128Value;
9621 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9622 return;
9623 }
9624
9625 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9626 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9627}
9628#endif
9629
9630
9631/**
9632 * Stores a descriptor register (sgdt, sidt).
9633 *
9634 * @returns Strict VBox status code.
9635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9636 * @param cbLimit The limit.
9637 * @param GCPtrBase The base address.
9638 * @param iSegReg The index of the segment register to use for
9639 * this access. The base and limits are checked.
9640 * @param GCPtrMem The address of the guest memory.
9641 */
9642IEM_STATIC VBOXSTRICTRC
9643iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9644{
9645 /*
9646 * The SIDT and SGDT instructions actually stores the data using two
9647 * independent writes. The instructions does not respond to opsize prefixes.
9648 */
9649 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9650 if (rcStrict == VINF_SUCCESS)
9651 {
9652 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9653 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9654 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9655 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9656 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9657 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9658 else
9659 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9660 }
9661 return rcStrict;
9662}
9663
9664
9665/**
9666 * Pushes a word onto the stack.
9667 *
9668 * @returns Strict VBox status code.
9669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9670 * @param u16Value The value to push.
9671 */
9672IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9673{
9674 /* Increment the stack pointer. */
9675 uint64_t uNewRsp;
9676 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9677 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9678
9679 /* Write the word the lazy way. */
9680 uint16_t *pu16Dst;
9681 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9682 if (rc == VINF_SUCCESS)
9683 {
9684 *pu16Dst = u16Value;
9685 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9686 }
9687
9688 /* Commit the new RSP value unless we an access handler made trouble. */
9689 if (rc == VINF_SUCCESS)
9690 pCtx->rsp = uNewRsp;
9691
9692 return rc;
9693}
9694
9695
9696/**
9697 * Pushes a dword onto the stack.
9698 *
9699 * @returns Strict VBox status code.
9700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9701 * @param u32Value The value to push.
9702 */
9703IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9704{
9705 /* Increment the stack pointer. */
9706 uint64_t uNewRsp;
9707 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9708 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9709
9710 /* Write the dword the lazy way. */
9711 uint32_t *pu32Dst;
9712 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9713 if (rc == VINF_SUCCESS)
9714 {
9715 *pu32Dst = u32Value;
9716 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9717 }
9718
9719 /* Commit the new RSP value unless we an access handler made trouble. */
9720 if (rc == VINF_SUCCESS)
9721 pCtx->rsp = uNewRsp;
9722
9723 return rc;
9724}
9725
9726
9727/**
9728 * Pushes a dword segment register value onto the stack.
9729 *
9730 * @returns Strict VBox status code.
9731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9732 * @param u32Value The value to push.
9733 */
9734IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9735{
9736 /* Increment the stack pointer. */
9737 uint64_t uNewRsp;
9738 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9739 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9740
9741 VBOXSTRICTRC rc;
9742 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9743 {
9744 /* The recompiler writes a full dword. */
9745 uint32_t *pu32Dst;
9746 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9747 if (rc == VINF_SUCCESS)
9748 {
9749 *pu32Dst = u32Value;
9750 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9751 }
9752 }
9753 else
9754 {
9755 /* The intel docs talks about zero extending the selector register
9756 value. My actual intel CPU here might be zero extending the value
9757 but it still only writes the lower word... */
9758 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9759 * happens when crossing an electric page boundrary, is the high word checked
9760 * for write accessibility or not? Probably it is. What about segment limits?
9761 * It appears this behavior is also shared with trap error codes.
9762 *
9763 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9764 * ancient hardware when it actually did change. */
9765 uint16_t *pu16Dst;
9766 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9767 if (rc == VINF_SUCCESS)
9768 {
9769 *pu16Dst = (uint16_t)u32Value;
9770 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9771 }
9772 }
9773
9774 /* Commit the new RSP value unless we an access handler made trouble. */
9775 if (rc == VINF_SUCCESS)
9776 pCtx->rsp = uNewRsp;
9777
9778 return rc;
9779}
9780
9781
9782/**
9783 * Pushes a qword onto the stack.
9784 *
9785 * @returns Strict VBox status code.
9786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9787 * @param u64Value The value to push.
9788 */
9789IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9790{
9791 /* Increment the stack pointer. */
9792 uint64_t uNewRsp;
9793 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9794 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9795
9796 /* Write the word the lazy way. */
9797 uint64_t *pu64Dst;
9798 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9799 if (rc == VINF_SUCCESS)
9800 {
9801 *pu64Dst = u64Value;
9802 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9803 }
9804
9805 /* Commit the new RSP value unless we an access handler made trouble. */
9806 if (rc == VINF_SUCCESS)
9807 pCtx->rsp = uNewRsp;
9808
9809 return rc;
9810}
9811
9812
9813/**
9814 * Pops a word from the stack.
9815 *
9816 * @returns Strict VBox status code.
9817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9818 * @param pu16Value Where to store the popped value.
9819 */
9820IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9821{
9822 /* Increment the stack pointer. */
9823 uint64_t uNewRsp;
9824 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9825 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9826
9827 /* Write the word the lazy way. */
9828 uint16_t const *pu16Src;
9829 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9830 if (rc == VINF_SUCCESS)
9831 {
9832 *pu16Value = *pu16Src;
9833 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9834
9835 /* Commit the new RSP value. */
9836 if (rc == VINF_SUCCESS)
9837 pCtx->rsp = uNewRsp;
9838 }
9839
9840 return rc;
9841}
9842
9843
9844/**
9845 * Pops a dword from the stack.
9846 *
9847 * @returns Strict VBox status code.
9848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9849 * @param pu32Value Where to store the popped value.
9850 */
9851IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9852{
9853 /* Increment the stack pointer. */
9854 uint64_t uNewRsp;
9855 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9856 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9857
9858 /* Write the word the lazy way. */
9859 uint32_t const *pu32Src;
9860 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9861 if (rc == VINF_SUCCESS)
9862 {
9863 *pu32Value = *pu32Src;
9864 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9865
9866 /* Commit the new RSP value. */
9867 if (rc == VINF_SUCCESS)
9868 pCtx->rsp = uNewRsp;
9869 }
9870
9871 return rc;
9872}
9873
9874
9875/**
9876 * Pops a qword from the stack.
9877 *
9878 * @returns Strict VBox status code.
9879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9880 * @param pu64Value Where to store the popped value.
9881 */
9882IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9883{
9884 /* Increment the stack pointer. */
9885 uint64_t uNewRsp;
9886 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9887 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9888
9889 /* Write the word the lazy way. */
9890 uint64_t const *pu64Src;
9891 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9892 if (rc == VINF_SUCCESS)
9893 {
9894 *pu64Value = *pu64Src;
9895 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9896
9897 /* Commit the new RSP value. */
9898 if (rc == VINF_SUCCESS)
9899 pCtx->rsp = uNewRsp;
9900 }
9901
9902 return rc;
9903}
9904
9905
9906/**
9907 * Pushes a word onto the stack, using a temporary stack pointer.
9908 *
9909 * @returns Strict VBox status code.
9910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9911 * @param u16Value The value to push.
9912 * @param pTmpRsp Pointer to the temporary stack pointer.
9913 */
9914IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9915{
9916 /* Increment the stack pointer. */
9917 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9918 RTUINT64U NewRsp = *pTmpRsp;
9919 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9920
9921 /* Write the word the lazy way. */
9922 uint16_t *pu16Dst;
9923 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9924 if (rc == VINF_SUCCESS)
9925 {
9926 *pu16Dst = u16Value;
9927 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9928 }
9929
9930 /* Commit the new RSP value unless we an access handler made trouble. */
9931 if (rc == VINF_SUCCESS)
9932 *pTmpRsp = NewRsp;
9933
9934 return rc;
9935}
9936
9937
9938/**
9939 * Pushes a dword onto the stack, using a temporary stack pointer.
9940 *
9941 * @returns Strict VBox status code.
9942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9943 * @param u32Value The value to push.
9944 * @param pTmpRsp Pointer to the temporary stack pointer.
9945 */
9946IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9947{
9948 /* Increment the stack pointer. */
9949 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9950 RTUINT64U NewRsp = *pTmpRsp;
9951 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9952
9953 /* Write the word the lazy way. */
9954 uint32_t *pu32Dst;
9955 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9956 if (rc == VINF_SUCCESS)
9957 {
9958 *pu32Dst = u32Value;
9959 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9960 }
9961
9962 /* Commit the new RSP value unless we an access handler made trouble. */
9963 if (rc == VINF_SUCCESS)
9964 *pTmpRsp = NewRsp;
9965
9966 return rc;
9967}
9968
9969
9970/**
9971 * Pushes a dword onto the stack, using a temporary stack pointer.
9972 *
9973 * @returns Strict VBox status code.
9974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9975 * @param u64Value The value to push.
9976 * @param pTmpRsp Pointer to the temporary stack pointer.
9977 */
9978IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9979{
9980 /* Increment the stack pointer. */
9981 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9982 RTUINT64U NewRsp = *pTmpRsp;
9983 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9984
9985 /* Write the word the lazy way. */
9986 uint64_t *pu64Dst;
9987 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9988 if (rc == VINF_SUCCESS)
9989 {
9990 *pu64Dst = u64Value;
9991 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9992 }
9993
9994 /* Commit the new RSP value unless we an access handler made trouble. */
9995 if (rc == VINF_SUCCESS)
9996 *pTmpRsp = NewRsp;
9997
9998 return rc;
9999}
10000
10001
10002/**
10003 * Pops a word from the stack, using a temporary stack pointer.
10004 *
10005 * @returns Strict VBox status code.
10006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10007 * @param pu16Value Where to store the popped value.
10008 * @param pTmpRsp Pointer to the temporary stack pointer.
10009 */
10010IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10011{
10012 /* Increment the stack pointer. */
10013 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10014 RTUINT64U NewRsp = *pTmpRsp;
10015 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10016
10017 /* Write the word the lazy way. */
10018 uint16_t const *pu16Src;
10019 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10020 if (rc == VINF_SUCCESS)
10021 {
10022 *pu16Value = *pu16Src;
10023 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10024
10025 /* Commit the new RSP value. */
10026 if (rc == VINF_SUCCESS)
10027 *pTmpRsp = NewRsp;
10028 }
10029
10030 return rc;
10031}
10032
10033
10034/**
10035 * Pops a dword from the stack, using a temporary stack pointer.
10036 *
10037 * @returns Strict VBox status code.
10038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10039 * @param pu32Value Where to store the popped value.
10040 * @param pTmpRsp Pointer to the temporary stack pointer.
10041 */
10042IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10043{
10044 /* Increment the stack pointer. */
10045 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10046 RTUINT64U NewRsp = *pTmpRsp;
10047 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10048
10049 /* Write the word the lazy way. */
10050 uint32_t const *pu32Src;
10051 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10052 if (rc == VINF_SUCCESS)
10053 {
10054 *pu32Value = *pu32Src;
10055 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10056
10057 /* Commit the new RSP value. */
10058 if (rc == VINF_SUCCESS)
10059 *pTmpRsp = NewRsp;
10060 }
10061
10062 return rc;
10063}
10064
10065
10066/**
10067 * Pops a qword from the stack, using a temporary stack pointer.
10068 *
10069 * @returns Strict VBox status code.
10070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10071 * @param pu64Value Where to store the popped value.
10072 * @param pTmpRsp Pointer to the temporary stack pointer.
10073 */
10074IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10075{
10076 /* Increment the stack pointer. */
10077 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10078 RTUINT64U NewRsp = *pTmpRsp;
10079 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10080
10081 /* Write the word the lazy way. */
10082 uint64_t const *pu64Src;
10083 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10084 if (rcStrict == VINF_SUCCESS)
10085 {
10086 *pu64Value = *pu64Src;
10087 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10088
10089 /* Commit the new RSP value. */
10090 if (rcStrict == VINF_SUCCESS)
10091 *pTmpRsp = NewRsp;
10092 }
10093
10094 return rcStrict;
10095}
10096
10097
10098/**
10099 * Begin a special stack push (used by interrupt, exceptions and such).
10100 *
10101 * This will raise \#SS or \#PF if appropriate.
10102 *
10103 * @returns Strict VBox status code.
10104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10105 * @param cbMem The number of bytes to push onto the stack.
10106 * @param ppvMem Where to return the pointer to the stack memory.
10107 * As with the other memory functions this could be
10108 * direct access or bounce buffered access, so
10109 * don't commit register until the commit call
10110 * succeeds.
10111 * @param puNewRsp Where to return the new RSP value. This must be
10112 * passed unchanged to
10113 * iemMemStackPushCommitSpecial().
10114 */
10115IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10116{
10117 Assert(cbMem < UINT8_MAX);
10118 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10119 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10120 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10121}
10122
10123
10124/**
10125 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10126 *
10127 * This will update the rSP.
10128 *
10129 * @returns Strict VBox status code.
10130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10131 * @param pvMem The pointer returned by
10132 * iemMemStackPushBeginSpecial().
10133 * @param uNewRsp The new RSP value returned by
10134 * iemMemStackPushBeginSpecial().
10135 */
10136IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10137{
10138 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10139 if (rcStrict == VINF_SUCCESS)
10140 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10141 return rcStrict;
10142}
10143
10144
10145/**
10146 * Begin a special stack pop (used by iret, retf and such).
10147 *
10148 * This will raise \#SS or \#PF if appropriate.
10149 *
10150 * @returns Strict VBox status code.
10151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10152 * @param cbMem The number of bytes to pop from the stack.
10153 * @param ppvMem Where to return the pointer to the stack memory.
10154 * @param puNewRsp Where to return the new RSP value. This must be
10155 * assigned to CPUMCTX::rsp manually some time
10156 * after iemMemStackPopDoneSpecial() has been
10157 * called.
10158 */
10159IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10160{
10161 Assert(cbMem < UINT8_MAX);
10162 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10163 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10164 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10165}
10166
10167
10168/**
10169 * Continue a special stack pop (used by iret and retf).
10170 *
10171 * This will raise \#SS or \#PF if appropriate.
10172 *
10173 * @returns Strict VBox status code.
10174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10175 * @param cbMem The number of bytes to pop from the stack.
10176 * @param ppvMem Where to return the pointer to the stack memory.
10177 * @param puNewRsp Where to return the new RSP value. This must be
10178 * assigned to CPUMCTX::rsp manually some time
10179 * after iemMemStackPopDoneSpecial() has been
10180 * called.
10181 */
10182IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10183{
10184 Assert(cbMem < UINT8_MAX);
10185 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10186 RTUINT64U NewRsp;
10187 NewRsp.u = *puNewRsp;
10188 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10189 *puNewRsp = NewRsp.u;
10190 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10191}
10192
10193
10194/**
10195 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10196 * iemMemStackPopContinueSpecial).
10197 *
10198 * The caller will manually commit the rSP.
10199 *
10200 * @returns Strict VBox status code.
10201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10202 * @param pvMem The pointer returned by
10203 * iemMemStackPopBeginSpecial() or
10204 * iemMemStackPopContinueSpecial().
10205 */
10206IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10207{
10208 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10209}
10210
10211
10212/**
10213 * Fetches a system table byte.
10214 *
10215 * @returns Strict VBox status code.
10216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10217 * @param pbDst Where to return the byte.
10218 * @param iSegReg The index of the segment register to use for
10219 * this access. The base and limits are checked.
10220 * @param GCPtrMem The address of the guest memory.
10221 */
10222IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10223{
10224 /* The lazy approach for now... */
10225 uint8_t const *pbSrc;
10226 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10227 if (rc == VINF_SUCCESS)
10228 {
10229 *pbDst = *pbSrc;
10230 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10231 }
10232 return rc;
10233}
10234
10235
10236/**
10237 * Fetches a system table word.
10238 *
10239 * @returns Strict VBox status code.
10240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10241 * @param pu16Dst Where to return the word.
10242 * @param iSegReg The index of the segment register to use for
10243 * this access. The base and limits are checked.
10244 * @param GCPtrMem The address of the guest memory.
10245 */
10246IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10247{
10248 /* The lazy approach for now... */
10249 uint16_t const *pu16Src;
10250 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10251 if (rc == VINF_SUCCESS)
10252 {
10253 *pu16Dst = *pu16Src;
10254 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10255 }
10256 return rc;
10257}
10258
10259
10260/**
10261 * Fetches a system table dword.
10262 *
10263 * @returns Strict VBox status code.
10264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10265 * @param pu32Dst Where to return the dword.
10266 * @param iSegReg The index of the segment register to use for
10267 * this access. The base and limits are checked.
10268 * @param GCPtrMem The address of the guest memory.
10269 */
10270IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10271{
10272 /* The lazy approach for now... */
10273 uint32_t const *pu32Src;
10274 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10275 if (rc == VINF_SUCCESS)
10276 {
10277 *pu32Dst = *pu32Src;
10278 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10279 }
10280 return rc;
10281}
10282
10283
10284/**
10285 * Fetches a system table qword.
10286 *
10287 * @returns Strict VBox status code.
10288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10289 * @param pu64Dst Where to return the qword.
10290 * @param iSegReg The index of the segment register to use for
10291 * this access. The base and limits are checked.
10292 * @param GCPtrMem The address of the guest memory.
10293 */
10294IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10295{
10296 /* The lazy approach for now... */
10297 uint64_t const *pu64Src;
10298 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10299 if (rc == VINF_SUCCESS)
10300 {
10301 *pu64Dst = *pu64Src;
10302 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10303 }
10304 return rc;
10305}
10306
10307
10308/**
10309 * Fetches a descriptor table entry with caller specified error code.
10310 *
10311 * @returns Strict VBox status code.
10312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10313 * @param pDesc Where to return the descriptor table entry.
10314 * @param uSel The selector which table entry to fetch.
10315 * @param uXcpt The exception to raise on table lookup error.
10316 * @param uErrorCode The error code associated with the exception.
10317 */
10318IEM_STATIC VBOXSTRICTRC
10319iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10320{
10321 AssertPtr(pDesc);
10322 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10323
10324 /** @todo did the 286 require all 8 bytes to be accessible? */
10325 /*
10326 * Get the selector table base and check bounds.
10327 */
10328 RTGCPTR GCPtrBase;
10329 if (uSel & X86_SEL_LDT)
10330 {
10331 if ( !pCtx->ldtr.Attr.n.u1Present
10332 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10333 {
10334 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10335 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10336 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10337 uErrorCode, 0);
10338 }
10339
10340 Assert(pCtx->ldtr.Attr.n.u1Present);
10341 GCPtrBase = pCtx->ldtr.u64Base;
10342 }
10343 else
10344 {
10345 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10346 {
10347 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10348 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10349 uErrorCode, 0);
10350 }
10351 GCPtrBase = pCtx->gdtr.pGdt;
10352 }
10353
10354 /*
10355 * Read the legacy descriptor and maybe the long mode extensions if
10356 * required.
10357 */
10358 VBOXSTRICTRC rcStrict;
10359 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10360 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10361 else
10362 {
10363 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10364 if (rcStrict == VINF_SUCCESS)
10365 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10366 if (rcStrict == VINF_SUCCESS)
10367 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10368 if (rcStrict == VINF_SUCCESS)
10369 pDesc->Legacy.au16[3] = 0;
10370 else
10371 return rcStrict;
10372 }
10373
10374 if (rcStrict == VINF_SUCCESS)
10375 {
10376 if ( !IEM_IS_LONG_MODE(pVCpu)
10377 || pDesc->Legacy.Gen.u1DescType)
10378 pDesc->Long.au64[1] = 0;
10379 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10380 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10381 else
10382 {
10383 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10384 /** @todo is this the right exception? */
10385 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10386 }
10387 }
10388 return rcStrict;
10389}
10390
10391
10392/**
10393 * Fetches a descriptor table entry.
10394 *
10395 * @returns Strict VBox status code.
10396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10397 * @param pDesc Where to return the descriptor table entry.
10398 * @param uSel The selector which table entry to fetch.
10399 * @param uXcpt The exception to raise on table lookup error.
10400 */
10401IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10402{
10403 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10404}
10405
10406
10407/**
10408 * Fakes a long mode stack selector for SS = 0.
10409 *
10410 * @param pDescSs Where to return the fake stack descriptor.
10411 * @param uDpl The DPL we want.
10412 */
10413IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10414{
10415 pDescSs->Long.au64[0] = 0;
10416 pDescSs->Long.au64[1] = 0;
10417 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10418 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10419 pDescSs->Long.Gen.u2Dpl = uDpl;
10420 pDescSs->Long.Gen.u1Present = 1;
10421 pDescSs->Long.Gen.u1Long = 1;
10422}
10423
10424
10425/**
10426 * Marks the selector descriptor as accessed (only non-system descriptors).
10427 *
10428 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10429 * will therefore skip the limit checks.
10430 *
10431 * @returns Strict VBox status code.
10432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10433 * @param uSel The selector.
10434 */
10435IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10436{
10437 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10438
10439 /*
10440 * Get the selector table base and calculate the entry address.
10441 */
10442 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10443 ? pCtx->ldtr.u64Base
10444 : pCtx->gdtr.pGdt;
10445 GCPtr += uSel & X86_SEL_MASK;
10446
10447 /*
10448 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10449 * ugly stuff to avoid this. This will make sure it's an atomic access
10450 * as well more or less remove any question about 8-bit or 32-bit accesss.
10451 */
10452 VBOXSTRICTRC rcStrict;
10453 uint32_t volatile *pu32;
10454 if ((GCPtr & 3) == 0)
10455 {
10456 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10457 GCPtr += 2 + 2;
10458 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10459 if (rcStrict != VINF_SUCCESS)
10460 return rcStrict;
10461 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10462 }
10463 else
10464 {
10465 /* The misaligned GDT/LDT case, map the whole thing. */
10466 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10467 if (rcStrict != VINF_SUCCESS)
10468 return rcStrict;
10469 switch ((uintptr_t)pu32 & 3)
10470 {
10471 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10472 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10473 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10474 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10475 }
10476 }
10477
10478 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10479}
10480
10481/** @} */
10482
10483
10484/*
10485 * Include the C/C++ implementation of instruction.
10486 */
10487#include "IEMAllCImpl.cpp.h"
10488
10489
10490
10491/** @name "Microcode" macros.
10492 *
10493 * The idea is that we should be able to use the same code to interpret
10494 * instructions as well as recompiler instructions. Thus this obfuscation.
10495 *
10496 * @{
10497 */
10498#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10499#define IEM_MC_END() }
10500#define IEM_MC_PAUSE() do {} while (0)
10501#define IEM_MC_CONTINUE() do {} while (0)
10502
10503/** Internal macro. */
10504#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10505 do \
10506 { \
10507 VBOXSTRICTRC rcStrict2 = a_Expr; \
10508 if (rcStrict2 != VINF_SUCCESS) \
10509 return rcStrict2; \
10510 } while (0)
10511
10512
10513#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10514#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10515#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10516#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10517#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10518#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10519#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10520#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10521#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10522 do { \
10523 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10524 return iemRaiseDeviceNotAvailable(pVCpu); \
10525 } while (0)
10526#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
10527 do { \
10528 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
10529 return iemRaiseDeviceNotAvailable(pVCpu); \
10530 } while (0)
10531#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10532 do { \
10533 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10534 return iemRaiseMathFault(pVCpu); \
10535 } while (0)
10536#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10537 do { \
10538 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10539 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10540 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10541 return iemRaiseUndefinedOpcode(pVCpu); \
10542 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10543 return iemRaiseDeviceNotAvailable(pVCpu); \
10544 } while (0)
10545#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10546 do { \
10547 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10548 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10549 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10550 return iemRaiseUndefinedOpcode(pVCpu); \
10551 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10552 return iemRaiseDeviceNotAvailable(pVCpu); \
10553 } while (0)
10554#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10555 do { \
10556 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10557 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10558 return iemRaiseUndefinedOpcode(pVCpu); \
10559 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10560 return iemRaiseDeviceNotAvailable(pVCpu); \
10561 } while (0)
10562#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10563 do { \
10564 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10565 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10566 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10567 return iemRaiseUndefinedOpcode(pVCpu); \
10568 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10569 return iemRaiseDeviceNotAvailable(pVCpu); \
10570 } while (0)
10571#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10572 do { \
10573 if (pVCpu->iem.s.uCpl != 0) \
10574 return iemRaiseGeneralProtectionFault0(pVCpu); \
10575 } while (0)
10576#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
10577 do { \
10578 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
10579 else return iemRaiseGeneralProtectionFault0(pVCpu); \
10580 } while (0)
10581
10582
10583#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10584#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10585#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10586#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10587#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10588#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10589#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10590 uint32_t a_Name; \
10591 uint32_t *a_pName = &a_Name
10592#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10593 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10594
10595#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10596#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10597
10598#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10599#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10600#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10601#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10602#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10603#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10604#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10605#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10606#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10607#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10608#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10609#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10610#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10611#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10612#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10613#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10614#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10615#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10616#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10617#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10618#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10619#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10620#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10621#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10622#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10623#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10624#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10625#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10626#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10627/** @note Not for IOPL or IF testing or modification. */
10628#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10629#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10630#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10631#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10632
10633#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10634#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10635#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10636#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10637#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10638#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10639#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10640#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10641#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10642#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10643#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10644 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10645
10646#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10647#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10648/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10649 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10650#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10651#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10652/** @note Not for IOPL or IF testing or modification. */
10653#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10654
10655#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10656#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10657#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10658 do { \
10659 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10660 *pu32Reg += (a_u32Value); \
10661 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10662 } while (0)
10663#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10664
10665#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10666#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10667#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10668 do { \
10669 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10670 *pu32Reg -= (a_u32Value); \
10671 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10672 } while (0)
10673#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10674#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10675
10676#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10677#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10678#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10679#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10680#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10681#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10682#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10683
10684#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10685#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10686#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10687#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10688
10689#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10690#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10691#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10692
10693#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10694#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10695#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10696
10697#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10698#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10699#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10700
10701#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10702#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10703#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10704
10705#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10706
10707#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10708
10709#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10710#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10711#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10712 do { \
10713 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10714 *pu32Reg &= (a_u32Value); \
10715 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10716 } while (0)
10717#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10718
10719#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10720#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10721#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10722 do { \
10723 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10724 *pu32Reg |= (a_u32Value); \
10725 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10726 } while (0)
10727#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10728
10729
10730/** @note Not for IOPL or IF modification. */
10731#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10732/** @note Not for IOPL or IF modification. */
10733#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10734/** @note Not for IOPL or IF modification. */
10735#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10736
10737#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10738
10739
10740#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10741 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10742#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10743 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10744#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10745 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10746#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10747 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10748#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10749 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10750#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10751 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10752#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10753 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10754
10755#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10756 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10757#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10758 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10759#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10760 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10761#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10762 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10763#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10764 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10765#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10766 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10767 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10768 } while (0)
10769#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10770 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10771 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10772 } while (0)
10773#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10774 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10775#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10776 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10777#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10778 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10779#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10780 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10781 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10782
10783#ifndef IEM_WITH_SETJMP
10784# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10785 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10786# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10787 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10788# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10789 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10790#else
10791# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10792 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10793# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10794 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10795# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10796 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10797#endif
10798
10799#ifndef IEM_WITH_SETJMP
10800# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10801 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10802# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10803 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10804# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10805 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10806#else
10807# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10808 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10809# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10810 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10811# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10812 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10813#endif
10814
10815#ifndef IEM_WITH_SETJMP
10816# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10817 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10818# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10819 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10820# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10821 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10822#else
10823# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10824 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10825# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10826 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10827# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10828 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10829#endif
10830
10831#ifdef SOME_UNUSED_FUNCTION
10832# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10833 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10834#endif
10835
10836#ifndef IEM_WITH_SETJMP
10837# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10838 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10839# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10840 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10841# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10842 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10843# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10844 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10845#else
10846# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10847 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10848# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10849 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10850# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10851 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10852# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10853 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10854#endif
10855
10856#ifndef IEM_WITH_SETJMP
10857# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10858 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10859# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10860 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10861# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10862 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10863#else
10864# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10865 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10866# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10867 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10868# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10869 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10870#endif
10871
10872#ifndef IEM_WITH_SETJMP
10873# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10874 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10875# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10876 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10877#else
10878# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10879 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10880# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10881 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10882#endif
10883
10884
10885
10886#ifndef IEM_WITH_SETJMP
10887# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10888 do { \
10889 uint8_t u8Tmp; \
10890 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10891 (a_u16Dst) = u8Tmp; \
10892 } while (0)
10893# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10894 do { \
10895 uint8_t u8Tmp; \
10896 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10897 (a_u32Dst) = u8Tmp; \
10898 } while (0)
10899# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10900 do { \
10901 uint8_t u8Tmp; \
10902 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10903 (a_u64Dst) = u8Tmp; \
10904 } while (0)
10905# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10906 do { \
10907 uint16_t u16Tmp; \
10908 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10909 (a_u32Dst) = u16Tmp; \
10910 } while (0)
10911# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10912 do { \
10913 uint16_t u16Tmp; \
10914 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10915 (a_u64Dst) = u16Tmp; \
10916 } while (0)
10917# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10918 do { \
10919 uint32_t u32Tmp; \
10920 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10921 (a_u64Dst) = u32Tmp; \
10922 } while (0)
10923#else /* IEM_WITH_SETJMP */
10924# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10925 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10926# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10927 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10928# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10929 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10930# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10931 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10932# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10933 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10934# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10935 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10936#endif /* IEM_WITH_SETJMP */
10937
10938#ifndef IEM_WITH_SETJMP
10939# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10940 do { \
10941 uint8_t u8Tmp; \
10942 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10943 (a_u16Dst) = (int8_t)u8Tmp; \
10944 } while (0)
10945# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10946 do { \
10947 uint8_t u8Tmp; \
10948 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10949 (a_u32Dst) = (int8_t)u8Tmp; \
10950 } while (0)
10951# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10952 do { \
10953 uint8_t u8Tmp; \
10954 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10955 (a_u64Dst) = (int8_t)u8Tmp; \
10956 } while (0)
10957# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10958 do { \
10959 uint16_t u16Tmp; \
10960 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10961 (a_u32Dst) = (int16_t)u16Tmp; \
10962 } while (0)
10963# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10964 do { \
10965 uint16_t u16Tmp; \
10966 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10967 (a_u64Dst) = (int16_t)u16Tmp; \
10968 } while (0)
10969# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10970 do { \
10971 uint32_t u32Tmp; \
10972 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10973 (a_u64Dst) = (int32_t)u32Tmp; \
10974 } while (0)
10975#else /* IEM_WITH_SETJMP */
10976# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10977 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10978# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10979 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10980# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10981 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10982# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10983 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10984# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10985 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10986# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10987 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10988#endif /* IEM_WITH_SETJMP */
10989
10990#ifndef IEM_WITH_SETJMP
10991# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10992 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10993# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10994 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10995# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10996 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10997# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10998 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10999#else
11000# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11001 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11002# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11003 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11004# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11005 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11006# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11007 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11008#endif
11009
11010#ifndef IEM_WITH_SETJMP
11011# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11012 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11013# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11014 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11015# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11016 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11017# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11018 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11019#else
11020# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11021 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11022# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11023 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11024# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11025 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11026# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11027 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11028#endif
11029
11030#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11031#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11032#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11033#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11034#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11035#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11036#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11037 do { \
11038 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11039 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11040 } while (0)
11041
11042#ifndef IEM_WITH_SETJMP
11043# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11044 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11045# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11046 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11047#else
11048# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11049 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11050# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11051 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11052#endif
11053
11054
11055#define IEM_MC_PUSH_U16(a_u16Value) \
11056 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11057#define IEM_MC_PUSH_U32(a_u32Value) \
11058 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11059#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11060 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11061#define IEM_MC_PUSH_U64(a_u64Value) \
11062 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11063
11064#define IEM_MC_POP_U16(a_pu16Value) \
11065 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11066#define IEM_MC_POP_U32(a_pu32Value) \
11067 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11068#define IEM_MC_POP_U64(a_pu64Value) \
11069 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11070
11071/** Maps guest memory for direct or bounce buffered access.
11072 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11073 * @remarks May return.
11074 */
11075#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11076 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11077
11078/** Maps guest memory for direct or bounce buffered access.
11079 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11080 * @remarks May return.
11081 */
11082#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11083 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11084
11085/** Commits the memory and unmaps the guest memory.
11086 * @remarks May return.
11087 */
11088#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11089 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11090
11091/** Commits the memory and unmaps the guest memory unless the FPU status word
11092 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11093 * that would cause FLD not to store.
11094 *
11095 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11096 * store, while \#P will not.
11097 *
11098 * @remarks May in theory return - for now.
11099 */
11100#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11101 do { \
11102 if ( !(a_u16FSW & X86_FSW_ES) \
11103 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11104 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11105 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11106 } while (0)
11107
11108/** Calculate efficient address from R/M. */
11109#ifndef IEM_WITH_SETJMP
11110# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11111 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11112#else
11113# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11114 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11115#endif
11116
11117#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11118#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11119#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11120#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11121#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11122#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11123#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11124
11125/**
11126 * Defers the rest of the instruction emulation to a C implementation routine
11127 * and returns, only taking the standard parameters.
11128 *
11129 * @param a_pfnCImpl The pointer to the C routine.
11130 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11131 */
11132#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11133
11134/**
11135 * Defers the rest of instruction emulation to a C implementation routine and
11136 * returns, taking one argument in addition to the standard ones.
11137 *
11138 * @param a_pfnCImpl The pointer to the C routine.
11139 * @param a0 The argument.
11140 */
11141#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11142
11143/**
11144 * Defers the rest of the instruction emulation to a C implementation routine
11145 * and returns, taking two arguments in addition to the standard ones.
11146 *
11147 * @param a_pfnCImpl The pointer to the C routine.
11148 * @param a0 The first extra argument.
11149 * @param a1 The second extra argument.
11150 */
11151#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11152
11153/**
11154 * Defers the rest of the instruction emulation to a C implementation routine
11155 * and returns, taking three arguments in addition to the standard ones.
11156 *
11157 * @param a_pfnCImpl The pointer to the C routine.
11158 * @param a0 The first extra argument.
11159 * @param a1 The second extra argument.
11160 * @param a2 The third extra argument.
11161 */
11162#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11163
11164/**
11165 * Defers the rest of the instruction emulation to a C implementation routine
11166 * and returns, taking four arguments in addition to the standard ones.
11167 *
11168 * @param a_pfnCImpl The pointer to the C routine.
11169 * @param a0 The first extra argument.
11170 * @param a1 The second extra argument.
11171 * @param a2 The third extra argument.
11172 * @param a3 The fourth extra argument.
11173 */
11174#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11175
11176/**
11177 * Defers the rest of the instruction emulation to a C implementation routine
11178 * and returns, taking two arguments in addition to the standard ones.
11179 *
11180 * @param a_pfnCImpl The pointer to the C routine.
11181 * @param a0 The first extra argument.
11182 * @param a1 The second extra argument.
11183 * @param a2 The third extra argument.
11184 * @param a3 The fourth extra argument.
11185 * @param a4 The fifth extra argument.
11186 */
11187#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11188
11189/**
11190 * Defers the entire instruction emulation to a C implementation routine and
11191 * returns, only taking the standard parameters.
11192 *
11193 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11194 *
11195 * @param a_pfnCImpl The pointer to the C routine.
11196 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11197 */
11198#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11199
11200/**
11201 * Defers the entire instruction emulation to a C implementation routine and
11202 * returns, taking one argument in addition to the standard ones.
11203 *
11204 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11205 *
11206 * @param a_pfnCImpl The pointer to the C routine.
11207 * @param a0 The argument.
11208 */
11209#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11210
11211/**
11212 * Defers the entire instruction emulation to a C implementation routine and
11213 * returns, taking two arguments in addition to the standard ones.
11214 *
11215 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11216 *
11217 * @param a_pfnCImpl The pointer to the C routine.
11218 * @param a0 The first extra argument.
11219 * @param a1 The second extra argument.
11220 */
11221#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11222
11223/**
11224 * Defers the entire instruction emulation to a C implementation routine and
11225 * returns, taking three arguments in addition to the standard ones.
11226 *
11227 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11228 *
11229 * @param a_pfnCImpl The pointer to the C routine.
11230 * @param a0 The first extra argument.
11231 * @param a1 The second extra argument.
11232 * @param a2 The third extra argument.
11233 */
11234#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11235
11236/**
11237 * Calls a FPU assembly implementation taking one visible argument.
11238 *
11239 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11240 * @param a0 The first extra argument.
11241 */
11242#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11243 do { \
11244 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11245 } while (0)
11246
11247/**
11248 * Calls a FPU assembly implementation taking two visible arguments.
11249 *
11250 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11251 * @param a0 The first extra argument.
11252 * @param a1 The second extra argument.
11253 */
11254#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11255 do { \
11256 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11257 } while (0)
11258
11259/**
11260 * Calls a FPU assembly implementation taking three visible arguments.
11261 *
11262 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11263 * @param a0 The first extra argument.
11264 * @param a1 The second extra argument.
11265 * @param a2 The third extra argument.
11266 */
11267#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11268 do { \
11269 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11270 } while (0)
11271
11272#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11273 do { \
11274 (a_FpuData).FSW = (a_FSW); \
11275 (a_FpuData).r80Result = *(a_pr80Value); \
11276 } while (0)
11277
11278/** Pushes FPU result onto the stack. */
11279#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11280 iemFpuPushResult(pVCpu, &a_FpuData)
11281/** Pushes FPU result onto the stack and sets the FPUDP. */
11282#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11283 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11284
11285/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11286#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11287 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11288
11289/** Stores FPU result in a stack register. */
11290#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11291 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11292/** Stores FPU result in a stack register and pops the stack. */
11293#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11294 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11295/** Stores FPU result in a stack register and sets the FPUDP. */
11296#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11297 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11298/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11299 * stack. */
11300#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11301 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11302
11303/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11304#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11305 iemFpuUpdateOpcodeAndIp(pVCpu)
11306/** Free a stack register (for FFREE and FFREEP). */
11307#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11308 iemFpuStackFree(pVCpu, a_iStReg)
11309/** Increment the FPU stack pointer. */
11310#define IEM_MC_FPU_STACK_INC_TOP() \
11311 iemFpuStackIncTop(pVCpu)
11312/** Decrement the FPU stack pointer. */
11313#define IEM_MC_FPU_STACK_DEC_TOP() \
11314 iemFpuStackDecTop(pVCpu)
11315
11316/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11317#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11318 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11319/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11320#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11321 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11322/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11323#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11324 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11325/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11326#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11327 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11328/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11329 * stack. */
11330#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11331 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11332/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11333#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11334 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11335
11336/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11337#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11338 iemFpuStackUnderflow(pVCpu, a_iStDst)
11339/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11340 * stack. */
11341#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11342 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11343/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11344 * FPUDS. */
11345#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11346 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11347/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11348 * FPUDS. Pops stack. */
11349#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11350 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11351/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11352 * stack twice. */
11353#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11354 iemFpuStackUnderflowThenPopPop(pVCpu)
11355/** Raises a FPU stack underflow exception for an instruction pushing a result
11356 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11357#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11358 iemFpuStackPushUnderflow(pVCpu)
11359/** Raises a FPU stack underflow exception for an instruction pushing a result
11360 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11361#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11362 iemFpuStackPushUnderflowTwo(pVCpu)
11363
11364/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11365 * FPUIP, FPUCS and FOP. */
11366#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11367 iemFpuStackPushOverflow(pVCpu)
11368/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11369 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11370#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11371 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11372/** Prepares for using the FPU state.
11373 * Ensures that we can use the host FPU in the current context (RC+R0.
11374 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11375#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11376/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11377#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11378/** Actualizes the guest FPU state so it can be accessed and modified. */
11379#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11380
11381/** Prepares for using the SSE state.
11382 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11383 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11384#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11385/** Actualizes the guest XMM0..15 register state for read-only access. */
11386#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11387/** Actualizes the guest XMM0..15 register state for read-write access. */
11388#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11389
11390/**
11391 * Calls a MMX assembly implementation taking two visible arguments.
11392 *
11393 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11394 * @param a0 The first extra argument.
11395 * @param a1 The second extra argument.
11396 */
11397#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11398 do { \
11399 IEM_MC_PREPARE_FPU_USAGE(); \
11400 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11401 } while (0)
11402
11403/**
11404 * Calls a MMX assembly implementation taking three visible arguments.
11405 *
11406 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11407 * @param a0 The first extra argument.
11408 * @param a1 The second extra argument.
11409 * @param a2 The third extra argument.
11410 */
11411#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11412 do { \
11413 IEM_MC_PREPARE_FPU_USAGE(); \
11414 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11415 } while (0)
11416
11417
11418/**
11419 * Calls a SSE assembly implementation taking two visible arguments.
11420 *
11421 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11422 * @param a0 The first extra argument.
11423 * @param a1 The second extra argument.
11424 */
11425#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11426 do { \
11427 IEM_MC_PREPARE_SSE_USAGE(); \
11428 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11429 } while (0)
11430
11431/**
11432 * Calls a SSE assembly implementation taking three visible arguments.
11433 *
11434 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11435 * @param a0 The first extra argument.
11436 * @param a1 The second extra argument.
11437 * @param a2 The third extra argument.
11438 */
11439#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11440 do { \
11441 IEM_MC_PREPARE_SSE_USAGE(); \
11442 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11443 } while (0)
11444
11445/** @note Not for IOPL or IF testing. */
11446#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11447/** @note Not for IOPL or IF testing. */
11448#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11449/** @note Not for IOPL or IF testing. */
11450#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11451/** @note Not for IOPL or IF testing. */
11452#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11453/** @note Not for IOPL or IF testing. */
11454#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11455 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11456 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11457/** @note Not for IOPL or IF testing. */
11458#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11459 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11460 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11461/** @note Not for IOPL or IF testing. */
11462#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11463 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11464 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11465 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11466/** @note Not for IOPL or IF testing. */
11467#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11468 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11469 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11470 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11471#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11472#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11473#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11474/** @note Not for IOPL or IF testing. */
11475#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11476 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11477 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11478/** @note Not for IOPL or IF testing. */
11479#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11480 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11481 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11482/** @note Not for IOPL or IF testing. */
11483#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11484 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11485 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11486/** @note Not for IOPL or IF testing. */
11487#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11488 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11489 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11490/** @note Not for IOPL or IF testing. */
11491#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11492 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11493 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11494/** @note Not for IOPL or IF testing. */
11495#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11496 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11497 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11498#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11499#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11500
11501#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11502 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11503#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11504 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11505#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11506 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11507#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11508 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11509#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11510 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11511#define IEM_MC_IF_FCW_IM() \
11512 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11513
11514#define IEM_MC_ELSE() } else {
11515#define IEM_MC_ENDIF() } do {} while (0)
11516
11517/** @} */
11518
11519
11520/** @name Opcode Debug Helpers.
11521 * @{
11522 */
11523#ifdef VBOX_WITH_STATISTICS
11524# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11525#else
11526# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11527#endif
11528
11529#ifdef DEBUG
11530# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11531 do { \
11532 IEMOP_INC_STATS(a_Stats); \
11533 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11534 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11535 } while (0)
11536
11537# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11538 do { \
11539 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11540 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11541 (void)RT_CONCAT(OP_,a_Upper); \
11542 (void)(a_fDisHints); \
11543 (void)(a_fIemHints); \
11544 } while (0)
11545
11546# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11547 do { \
11548 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11549 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11550 (void)RT_CONCAT(OP_,a_Upper); \
11551 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11552 (void)(a_fDisHints); \
11553 (void)(a_fIemHints); \
11554 } while (0)
11555
11556# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11557 do { \
11558 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11559 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11560 (void)RT_CONCAT(OP_,a_Upper); \
11561 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11562 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11563 (void)(a_fDisHints); \
11564 (void)(a_fIemHints); \
11565 } while (0)
11566
11567# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11568 do { \
11569 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11570 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11571 (void)RT_CONCAT(OP_,a_Upper); \
11572 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11573 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11574 (void)RT_CONCAT(OP_PARM_,a_Op3); \
11575 (void)(a_fDisHints); \
11576 (void)(a_fIemHints); \
11577 } while (0)
11578
11579# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11580 do { \
11581 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11582 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11583 (void)RT_CONCAT(OP_,a_Upper); \
11584 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11585 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11586 (void)RT_CONCAT(OP_PARM_,a_Op3); \
11587 (void)RT_CONCAT(OP_PARM_,a_Op4); \
11588 (void)(a_fDisHints); \
11589 (void)(a_fIemHints); \
11590 } while (0)
11591
11592#else
11593# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
11594
11595# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11596 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11597# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11598 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11599# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11600 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11601# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11602 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11603# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11604 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
11605
11606#endif
11607
11608#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11609 IEMOP_MNEMONIC0EX(a_Lower, \
11610 #a_Lower, \
11611 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
11612#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11613 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
11614 #a_Lower " " #a_Op1, \
11615 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
11616#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11617 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
11618 #a_Lower " " #a_Op1 "," #a_Op2, \
11619 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
11620#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11621 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
11622 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
11623 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
11624#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11625 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
11626 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
11627 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
11628
11629/** @} */
11630
11631
11632/** @name Opcode Helpers.
11633 * @{
11634 */
11635
11636#ifdef IN_RING3
11637# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11638 do { \
11639 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11640 else \
11641 { \
11642 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11643 return IEMOP_RAISE_INVALID_OPCODE(); \
11644 } \
11645 } while (0)
11646#else
11647# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11648 do { \
11649 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11650 else return IEMOP_RAISE_INVALID_OPCODE(); \
11651 } while (0)
11652#endif
11653
11654/** The instruction requires a 186 or later. */
11655#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11656# define IEMOP_HLP_MIN_186() do { } while (0)
11657#else
11658# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11659#endif
11660
11661/** The instruction requires a 286 or later. */
11662#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11663# define IEMOP_HLP_MIN_286() do { } while (0)
11664#else
11665# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11666#endif
11667
11668/** The instruction requires a 386 or later. */
11669#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11670# define IEMOP_HLP_MIN_386() do { } while (0)
11671#else
11672# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11673#endif
11674
11675/** The instruction requires a 386 or later if the given expression is true. */
11676#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11677# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11678#else
11679# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11680#endif
11681
11682/** The instruction requires a 486 or later. */
11683#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11684# define IEMOP_HLP_MIN_486() do { } while (0)
11685#else
11686# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11687#endif
11688
11689/** The instruction requires a Pentium (586) or later. */
11690#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
11691# define IEMOP_HLP_MIN_586() do { } while (0)
11692#else
11693# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
11694#endif
11695
11696/** The instruction requires a PentiumPro (686) or later. */
11697#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
11698# define IEMOP_HLP_MIN_686() do { } while (0)
11699#else
11700# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
11701#endif
11702
11703
11704/** The instruction raises an \#UD in real and V8086 mode. */
11705#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11706 do \
11707 { \
11708 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11709 return IEMOP_RAISE_INVALID_OPCODE(); \
11710 } while (0)
11711
11712#if 0
11713#ifdef VBOX_WITH_NESTED_HWVIRT
11714/** The instruction raises an \#UD when SVM is not enabled. */
11715#define IEMOP_HLP_NEEDS_SVM_ENABLED() \
11716 do \
11717 { \
11718 if (IEM_IS_SVM_ENABLED(pVCpu)) \
11719 return IEMOP_RAISE_INVALID_OPCODE(); \
11720 } while (0)
11721#endif
11722#endif
11723
11724/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11725 * 64-bit mode. */
11726#define IEMOP_HLP_NO_64BIT() \
11727 do \
11728 { \
11729 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11730 return IEMOP_RAISE_INVALID_OPCODE(); \
11731 } while (0)
11732
11733/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11734 * 64-bit mode. */
11735#define IEMOP_HLP_ONLY_64BIT() \
11736 do \
11737 { \
11738 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11739 return IEMOP_RAISE_INVALID_OPCODE(); \
11740 } while (0)
11741
11742/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11743#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11744 do \
11745 { \
11746 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11747 iemRecalEffOpSize64Default(pVCpu); \
11748 } while (0)
11749
11750/** The instruction has 64-bit operand size if 64-bit mode. */
11751#define IEMOP_HLP_64BIT_OP_SIZE() \
11752 do \
11753 { \
11754 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11755 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11756 } while (0)
11757
11758/** Only a REX prefix immediately preceeding the first opcode byte takes
11759 * effect. This macro helps ensuring this as well as logging bad guest code. */
11760#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11761 do \
11762 { \
11763 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11764 { \
11765 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11766 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11767 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11768 pVCpu->iem.s.uRexB = 0; \
11769 pVCpu->iem.s.uRexIndex = 0; \
11770 pVCpu->iem.s.uRexReg = 0; \
11771 iemRecalEffOpSize(pVCpu); \
11772 } \
11773 } while (0)
11774
11775/**
11776 * Done decoding.
11777 */
11778#define IEMOP_HLP_DONE_DECODING() \
11779 do \
11780 { \
11781 /*nothing for now, maybe later... */ \
11782 } while (0)
11783
11784/**
11785 * Done decoding, raise \#UD exception if lock prefix present.
11786 */
11787#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11788 do \
11789 { \
11790 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11791 { /* likely */ } \
11792 else \
11793 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11794 } while (0)
11795#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11796 do \
11797 { \
11798 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11799 { /* likely */ } \
11800 else \
11801 { \
11802 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11803 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11804 } \
11805 } while (0)
11806#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11807 do \
11808 { \
11809 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11810 { /* likely */ } \
11811 else \
11812 { \
11813 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11814 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11815 } \
11816 } while (0)
11817
11818/**
11819 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11820 * are present.
11821 */
11822#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11823 do \
11824 { \
11825 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11826 { /* likely */ } \
11827 else \
11828 return IEMOP_RAISE_INVALID_OPCODE(); \
11829 } while (0)
11830
11831
11832/**
11833 * Calculates the effective address of a ModR/M memory operand.
11834 *
11835 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11836 *
11837 * @return Strict VBox status code.
11838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11839 * @param bRm The ModRM byte.
11840 * @param cbImm The size of any immediate following the
11841 * effective address opcode bytes. Important for
11842 * RIP relative addressing.
11843 * @param pGCPtrEff Where to return the effective address.
11844 */
11845IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11846{
11847 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11848 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11849# define SET_SS_DEF() \
11850 do \
11851 { \
11852 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11853 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11854 } while (0)
11855
11856 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11857 {
11858/** @todo Check the effective address size crap! */
11859 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11860 {
11861 uint16_t u16EffAddr;
11862
11863 /* Handle the disp16 form with no registers first. */
11864 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11865 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11866 else
11867 {
11868 /* Get the displacment. */
11869 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11870 {
11871 case 0: u16EffAddr = 0; break;
11872 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11873 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11874 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11875 }
11876
11877 /* Add the base and index registers to the disp. */
11878 switch (bRm & X86_MODRM_RM_MASK)
11879 {
11880 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11881 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11882 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11883 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11884 case 4: u16EffAddr += pCtx->si; break;
11885 case 5: u16EffAddr += pCtx->di; break;
11886 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11887 case 7: u16EffAddr += pCtx->bx; break;
11888 }
11889 }
11890
11891 *pGCPtrEff = u16EffAddr;
11892 }
11893 else
11894 {
11895 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11896 uint32_t u32EffAddr;
11897
11898 /* Handle the disp32 form with no registers first. */
11899 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11900 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11901 else
11902 {
11903 /* Get the register (or SIB) value. */
11904 switch ((bRm & X86_MODRM_RM_MASK))
11905 {
11906 case 0: u32EffAddr = pCtx->eax; break;
11907 case 1: u32EffAddr = pCtx->ecx; break;
11908 case 2: u32EffAddr = pCtx->edx; break;
11909 case 3: u32EffAddr = pCtx->ebx; break;
11910 case 4: /* SIB */
11911 {
11912 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11913
11914 /* Get the index and scale it. */
11915 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11916 {
11917 case 0: u32EffAddr = pCtx->eax; break;
11918 case 1: u32EffAddr = pCtx->ecx; break;
11919 case 2: u32EffAddr = pCtx->edx; break;
11920 case 3: u32EffAddr = pCtx->ebx; break;
11921 case 4: u32EffAddr = 0; /*none */ break;
11922 case 5: u32EffAddr = pCtx->ebp; break;
11923 case 6: u32EffAddr = pCtx->esi; break;
11924 case 7: u32EffAddr = pCtx->edi; break;
11925 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11926 }
11927 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11928
11929 /* add base */
11930 switch (bSib & X86_SIB_BASE_MASK)
11931 {
11932 case 0: u32EffAddr += pCtx->eax; break;
11933 case 1: u32EffAddr += pCtx->ecx; break;
11934 case 2: u32EffAddr += pCtx->edx; break;
11935 case 3: u32EffAddr += pCtx->ebx; break;
11936 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11937 case 5:
11938 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11939 {
11940 u32EffAddr += pCtx->ebp;
11941 SET_SS_DEF();
11942 }
11943 else
11944 {
11945 uint32_t u32Disp;
11946 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11947 u32EffAddr += u32Disp;
11948 }
11949 break;
11950 case 6: u32EffAddr += pCtx->esi; break;
11951 case 7: u32EffAddr += pCtx->edi; break;
11952 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11953 }
11954 break;
11955 }
11956 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11957 case 6: u32EffAddr = pCtx->esi; break;
11958 case 7: u32EffAddr = pCtx->edi; break;
11959 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11960 }
11961
11962 /* Get and add the displacement. */
11963 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11964 {
11965 case 0:
11966 break;
11967 case 1:
11968 {
11969 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11970 u32EffAddr += i8Disp;
11971 break;
11972 }
11973 case 2:
11974 {
11975 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11976 u32EffAddr += u32Disp;
11977 break;
11978 }
11979 default:
11980 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11981 }
11982
11983 }
11984 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11985 *pGCPtrEff = u32EffAddr;
11986 else
11987 {
11988 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11989 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11990 }
11991 }
11992 }
11993 else
11994 {
11995 uint64_t u64EffAddr;
11996
11997 /* Handle the rip+disp32 form with no registers first. */
11998 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11999 {
12000 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12001 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12002 }
12003 else
12004 {
12005 /* Get the register (or SIB) value. */
12006 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12007 {
12008 case 0: u64EffAddr = pCtx->rax; break;
12009 case 1: u64EffAddr = pCtx->rcx; break;
12010 case 2: u64EffAddr = pCtx->rdx; break;
12011 case 3: u64EffAddr = pCtx->rbx; break;
12012 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12013 case 6: u64EffAddr = pCtx->rsi; break;
12014 case 7: u64EffAddr = pCtx->rdi; break;
12015 case 8: u64EffAddr = pCtx->r8; break;
12016 case 9: u64EffAddr = pCtx->r9; break;
12017 case 10: u64EffAddr = pCtx->r10; break;
12018 case 11: u64EffAddr = pCtx->r11; break;
12019 case 13: u64EffAddr = pCtx->r13; break;
12020 case 14: u64EffAddr = pCtx->r14; break;
12021 case 15: u64EffAddr = pCtx->r15; break;
12022 /* SIB */
12023 case 4:
12024 case 12:
12025 {
12026 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12027
12028 /* Get the index and scale it. */
12029 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12030 {
12031 case 0: u64EffAddr = pCtx->rax; break;
12032 case 1: u64EffAddr = pCtx->rcx; break;
12033 case 2: u64EffAddr = pCtx->rdx; break;
12034 case 3: u64EffAddr = pCtx->rbx; break;
12035 case 4: u64EffAddr = 0; /*none */ break;
12036 case 5: u64EffAddr = pCtx->rbp; break;
12037 case 6: u64EffAddr = pCtx->rsi; break;
12038 case 7: u64EffAddr = pCtx->rdi; break;
12039 case 8: u64EffAddr = pCtx->r8; break;
12040 case 9: u64EffAddr = pCtx->r9; break;
12041 case 10: u64EffAddr = pCtx->r10; break;
12042 case 11: u64EffAddr = pCtx->r11; break;
12043 case 12: u64EffAddr = pCtx->r12; break;
12044 case 13: u64EffAddr = pCtx->r13; break;
12045 case 14: u64EffAddr = pCtx->r14; break;
12046 case 15: u64EffAddr = pCtx->r15; break;
12047 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12048 }
12049 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12050
12051 /* add base */
12052 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12053 {
12054 case 0: u64EffAddr += pCtx->rax; break;
12055 case 1: u64EffAddr += pCtx->rcx; break;
12056 case 2: u64EffAddr += pCtx->rdx; break;
12057 case 3: u64EffAddr += pCtx->rbx; break;
12058 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12059 case 6: u64EffAddr += pCtx->rsi; break;
12060 case 7: u64EffAddr += pCtx->rdi; break;
12061 case 8: u64EffAddr += pCtx->r8; break;
12062 case 9: u64EffAddr += pCtx->r9; break;
12063 case 10: u64EffAddr += pCtx->r10; break;
12064 case 11: u64EffAddr += pCtx->r11; break;
12065 case 12: u64EffAddr += pCtx->r12; break;
12066 case 14: u64EffAddr += pCtx->r14; break;
12067 case 15: u64EffAddr += pCtx->r15; break;
12068 /* complicated encodings */
12069 case 5:
12070 case 13:
12071 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12072 {
12073 if (!pVCpu->iem.s.uRexB)
12074 {
12075 u64EffAddr += pCtx->rbp;
12076 SET_SS_DEF();
12077 }
12078 else
12079 u64EffAddr += pCtx->r13;
12080 }
12081 else
12082 {
12083 uint32_t u32Disp;
12084 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12085 u64EffAddr += (int32_t)u32Disp;
12086 }
12087 break;
12088 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12089 }
12090 break;
12091 }
12092 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12093 }
12094
12095 /* Get and add the displacement. */
12096 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12097 {
12098 case 0:
12099 break;
12100 case 1:
12101 {
12102 int8_t i8Disp;
12103 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12104 u64EffAddr += i8Disp;
12105 break;
12106 }
12107 case 2:
12108 {
12109 uint32_t u32Disp;
12110 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12111 u64EffAddr += (int32_t)u32Disp;
12112 break;
12113 }
12114 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12115 }
12116
12117 }
12118
12119 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12120 *pGCPtrEff = u64EffAddr;
12121 else
12122 {
12123 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12124 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12125 }
12126 }
12127
12128 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12129 return VINF_SUCCESS;
12130}
12131
12132
12133/**
12134 * Calculates the effective address of a ModR/M memory operand.
12135 *
12136 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12137 *
12138 * @return Strict VBox status code.
12139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12140 * @param bRm The ModRM byte.
12141 * @param cbImm The size of any immediate following the
12142 * effective address opcode bytes. Important for
12143 * RIP relative addressing.
12144 * @param pGCPtrEff Where to return the effective address.
12145 * @param offRsp RSP displacement.
12146 */
12147IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
12148{
12149 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12150 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12151# define SET_SS_DEF() \
12152 do \
12153 { \
12154 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12155 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12156 } while (0)
12157
12158 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12159 {
12160/** @todo Check the effective address size crap! */
12161 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12162 {
12163 uint16_t u16EffAddr;
12164
12165 /* Handle the disp16 form with no registers first. */
12166 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12167 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12168 else
12169 {
12170 /* Get the displacment. */
12171 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12172 {
12173 case 0: u16EffAddr = 0; break;
12174 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12175 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12176 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12177 }
12178
12179 /* Add the base and index registers to the disp. */
12180 switch (bRm & X86_MODRM_RM_MASK)
12181 {
12182 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12183 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12184 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12185 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12186 case 4: u16EffAddr += pCtx->si; break;
12187 case 5: u16EffAddr += pCtx->di; break;
12188 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12189 case 7: u16EffAddr += pCtx->bx; break;
12190 }
12191 }
12192
12193 *pGCPtrEff = u16EffAddr;
12194 }
12195 else
12196 {
12197 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12198 uint32_t u32EffAddr;
12199
12200 /* Handle the disp32 form with no registers first. */
12201 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12202 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12203 else
12204 {
12205 /* Get the register (or SIB) value. */
12206 switch ((bRm & X86_MODRM_RM_MASK))
12207 {
12208 case 0: u32EffAddr = pCtx->eax; break;
12209 case 1: u32EffAddr = pCtx->ecx; break;
12210 case 2: u32EffAddr = pCtx->edx; break;
12211 case 3: u32EffAddr = pCtx->ebx; break;
12212 case 4: /* SIB */
12213 {
12214 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12215
12216 /* Get the index and scale it. */
12217 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12218 {
12219 case 0: u32EffAddr = pCtx->eax; break;
12220 case 1: u32EffAddr = pCtx->ecx; break;
12221 case 2: u32EffAddr = pCtx->edx; break;
12222 case 3: u32EffAddr = pCtx->ebx; break;
12223 case 4: u32EffAddr = 0; /*none */ break;
12224 case 5: u32EffAddr = pCtx->ebp; break;
12225 case 6: u32EffAddr = pCtx->esi; break;
12226 case 7: u32EffAddr = pCtx->edi; break;
12227 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12228 }
12229 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12230
12231 /* add base */
12232 switch (bSib & X86_SIB_BASE_MASK)
12233 {
12234 case 0: u32EffAddr += pCtx->eax; break;
12235 case 1: u32EffAddr += pCtx->ecx; break;
12236 case 2: u32EffAddr += pCtx->edx; break;
12237 case 3: u32EffAddr += pCtx->ebx; break;
12238 case 4:
12239 u32EffAddr += pCtx->esp + offRsp;
12240 SET_SS_DEF();
12241 break;
12242 case 5:
12243 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12244 {
12245 u32EffAddr += pCtx->ebp;
12246 SET_SS_DEF();
12247 }
12248 else
12249 {
12250 uint32_t u32Disp;
12251 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12252 u32EffAddr += u32Disp;
12253 }
12254 break;
12255 case 6: u32EffAddr += pCtx->esi; break;
12256 case 7: u32EffAddr += pCtx->edi; break;
12257 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12258 }
12259 break;
12260 }
12261 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12262 case 6: u32EffAddr = pCtx->esi; break;
12263 case 7: u32EffAddr = pCtx->edi; break;
12264 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12265 }
12266
12267 /* Get and add the displacement. */
12268 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12269 {
12270 case 0:
12271 break;
12272 case 1:
12273 {
12274 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12275 u32EffAddr += i8Disp;
12276 break;
12277 }
12278 case 2:
12279 {
12280 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12281 u32EffAddr += u32Disp;
12282 break;
12283 }
12284 default:
12285 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12286 }
12287
12288 }
12289 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12290 *pGCPtrEff = u32EffAddr;
12291 else
12292 {
12293 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12294 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12295 }
12296 }
12297 }
12298 else
12299 {
12300 uint64_t u64EffAddr;
12301
12302 /* Handle the rip+disp32 form with no registers first. */
12303 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12304 {
12305 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12306 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12307 }
12308 else
12309 {
12310 /* Get the register (or SIB) value. */
12311 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12312 {
12313 case 0: u64EffAddr = pCtx->rax; break;
12314 case 1: u64EffAddr = pCtx->rcx; break;
12315 case 2: u64EffAddr = pCtx->rdx; break;
12316 case 3: u64EffAddr = pCtx->rbx; break;
12317 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12318 case 6: u64EffAddr = pCtx->rsi; break;
12319 case 7: u64EffAddr = pCtx->rdi; break;
12320 case 8: u64EffAddr = pCtx->r8; break;
12321 case 9: u64EffAddr = pCtx->r9; break;
12322 case 10: u64EffAddr = pCtx->r10; break;
12323 case 11: u64EffAddr = pCtx->r11; break;
12324 case 13: u64EffAddr = pCtx->r13; break;
12325 case 14: u64EffAddr = pCtx->r14; break;
12326 case 15: u64EffAddr = pCtx->r15; break;
12327 /* SIB */
12328 case 4:
12329 case 12:
12330 {
12331 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12332
12333 /* Get the index and scale it. */
12334 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12335 {
12336 case 0: u64EffAddr = pCtx->rax; break;
12337 case 1: u64EffAddr = pCtx->rcx; break;
12338 case 2: u64EffAddr = pCtx->rdx; break;
12339 case 3: u64EffAddr = pCtx->rbx; break;
12340 case 4: u64EffAddr = 0; /*none */ break;
12341 case 5: u64EffAddr = pCtx->rbp; break;
12342 case 6: u64EffAddr = pCtx->rsi; break;
12343 case 7: u64EffAddr = pCtx->rdi; break;
12344 case 8: u64EffAddr = pCtx->r8; break;
12345 case 9: u64EffAddr = pCtx->r9; break;
12346 case 10: u64EffAddr = pCtx->r10; break;
12347 case 11: u64EffAddr = pCtx->r11; break;
12348 case 12: u64EffAddr = pCtx->r12; break;
12349 case 13: u64EffAddr = pCtx->r13; break;
12350 case 14: u64EffAddr = pCtx->r14; break;
12351 case 15: u64EffAddr = pCtx->r15; break;
12352 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12353 }
12354 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12355
12356 /* add base */
12357 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12358 {
12359 case 0: u64EffAddr += pCtx->rax; break;
12360 case 1: u64EffAddr += pCtx->rcx; break;
12361 case 2: u64EffAddr += pCtx->rdx; break;
12362 case 3: u64EffAddr += pCtx->rbx; break;
12363 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12364 case 6: u64EffAddr += pCtx->rsi; break;
12365 case 7: u64EffAddr += pCtx->rdi; break;
12366 case 8: u64EffAddr += pCtx->r8; break;
12367 case 9: u64EffAddr += pCtx->r9; break;
12368 case 10: u64EffAddr += pCtx->r10; break;
12369 case 11: u64EffAddr += pCtx->r11; break;
12370 case 12: u64EffAddr += pCtx->r12; break;
12371 case 14: u64EffAddr += pCtx->r14; break;
12372 case 15: u64EffAddr += pCtx->r15; break;
12373 /* complicated encodings */
12374 case 5:
12375 case 13:
12376 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12377 {
12378 if (!pVCpu->iem.s.uRexB)
12379 {
12380 u64EffAddr += pCtx->rbp;
12381 SET_SS_DEF();
12382 }
12383 else
12384 u64EffAddr += pCtx->r13;
12385 }
12386 else
12387 {
12388 uint32_t u32Disp;
12389 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12390 u64EffAddr += (int32_t)u32Disp;
12391 }
12392 break;
12393 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12394 }
12395 break;
12396 }
12397 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12398 }
12399
12400 /* Get and add the displacement. */
12401 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12402 {
12403 case 0:
12404 break;
12405 case 1:
12406 {
12407 int8_t i8Disp;
12408 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12409 u64EffAddr += i8Disp;
12410 break;
12411 }
12412 case 2:
12413 {
12414 uint32_t u32Disp;
12415 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12416 u64EffAddr += (int32_t)u32Disp;
12417 break;
12418 }
12419 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12420 }
12421
12422 }
12423
12424 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12425 *pGCPtrEff = u64EffAddr;
12426 else
12427 {
12428 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12429 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12430 }
12431 }
12432
12433 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12434 return VINF_SUCCESS;
12435}
12436
12437
12438#ifdef IEM_WITH_SETJMP
12439/**
12440 * Calculates the effective address of a ModR/M memory operand.
12441 *
12442 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12443 *
12444 * May longjmp on internal error.
12445 *
12446 * @return The effective address.
12447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12448 * @param bRm The ModRM byte.
12449 * @param cbImm The size of any immediate following the
12450 * effective address opcode bytes. Important for
12451 * RIP relative addressing.
12452 */
12453IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12454{
12455 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12456 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12457# define SET_SS_DEF() \
12458 do \
12459 { \
12460 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12461 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12462 } while (0)
12463
12464 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12465 {
12466/** @todo Check the effective address size crap! */
12467 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12468 {
12469 uint16_t u16EffAddr;
12470
12471 /* Handle the disp16 form with no registers first. */
12472 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12473 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12474 else
12475 {
12476 /* Get the displacment. */
12477 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12478 {
12479 case 0: u16EffAddr = 0; break;
12480 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12481 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12482 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12483 }
12484
12485 /* Add the base and index registers to the disp. */
12486 switch (bRm & X86_MODRM_RM_MASK)
12487 {
12488 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12489 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12490 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12491 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12492 case 4: u16EffAddr += pCtx->si; break;
12493 case 5: u16EffAddr += pCtx->di; break;
12494 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12495 case 7: u16EffAddr += pCtx->bx; break;
12496 }
12497 }
12498
12499 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12500 return u16EffAddr;
12501 }
12502
12503 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12504 uint32_t u32EffAddr;
12505
12506 /* Handle the disp32 form with no registers first. */
12507 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12508 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12509 else
12510 {
12511 /* Get the register (or SIB) value. */
12512 switch ((bRm & X86_MODRM_RM_MASK))
12513 {
12514 case 0: u32EffAddr = pCtx->eax; break;
12515 case 1: u32EffAddr = pCtx->ecx; break;
12516 case 2: u32EffAddr = pCtx->edx; break;
12517 case 3: u32EffAddr = pCtx->ebx; break;
12518 case 4: /* SIB */
12519 {
12520 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12521
12522 /* Get the index and scale it. */
12523 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12524 {
12525 case 0: u32EffAddr = pCtx->eax; break;
12526 case 1: u32EffAddr = pCtx->ecx; break;
12527 case 2: u32EffAddr = pCtx->edx; break;
12528 case 3: u32EffAddr = pCtx->ebx; break;
12529 case 4: u32EffAddr = 0; /*none */ break;
12530 case 5: u32EffAddr = pCtx->ebp; break;
12531 case 6: u32EffAddr = pCtx->esi; break;
12532 case 7: u32EffAddr = pCtx->edi; break;
12533 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12534 }
12535 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12536
12537 /* add base */
12538 switch (bSib & X86_SIB_BASE_MASK)
12539 {
12540 case 0: u32EffAddr += pCtx->eax; break;
12541 case 1: u32EffAddr += pCtx->ecx; break;
12542 case 2: u32EffAddr += pCtx->edx; break;
12543 case 3: u32EffAddr += pCtx->ebx; break;
12544 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12545 case 5:
12546 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12547 {
12548 u32EffAddr += pCtx->ebp;
12549 SET_SS_DEF();
12550 }
12551 else
12552 {
12553 uint32_t u32Disp;
12554 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12555 u32EffAddr += u32Disp;
12556 }
12557 break;
12558 case 6: u32EffAddr += pCtx->esi; break;
12559 case 7: u32EffAddr += pCtx->edi; break;
12560 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12561 }
12562 break;
12563 }
12564 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12565 case 6: u32EffAddr = pCtx->esi; break;
12566 case 7: u32EffAddr = pCtx->edi; break;
12567 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12568 }
12569
12570 /* Get and add the displacement. */
12571 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12572 {
12573 case 0:
12574 break;
12575 case 1:
12576 {
12577 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12578 u32EffAddr += i8Disp;
12579 break;
12580 }
12581 case 2:
12582 {
12583 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12584 u32EffAddr += u32Disp;
12585 break;
12586 }
12587 default:
12588 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
12589 }
12590 }
12591
12592 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12593 {
12594 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12595 return u32EffAddr;
12596 }
12597 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12598 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12599 return u32EffAddr & UINT16_MAX;
12600 }
12601
12602 uint64_t u64EffAddr;
12603
12604 /* Handle the rip+disp32 form with no registers first. */
12605 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12606 {
12607 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12608 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12609 }
12610 else
12611 {
12612 /* Get the register (or SIB) value. */
12613 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12614 {
12615 case 0: u64EffAddr = pCtx->rax; break;
12616 case 1: u64EffAddr = pCtx->rcx; break;
12617 case 2: u64EffAddr = pCtx->rdx; break;
12618 case 3: u64EffAddr = pCtx->rbx; break;
12619 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12620 case 6: u64EffAddr = pCtx->rsi; break;
12621 case 7: u64EffAddr = pCtx->rdi; break;
12622 case 8: u64EffAddr = pCtx->r8; break;
12623 case 9: u64EffAddr = pCtx->r9; break;
12624 case 10: u64EffAddr = pCtx->r10; break;
12625 case 11: u64EffAddr = pCtx->r11; break;
12626 case 13: u64EffAddr = pCtx->r13; break;
12627 case 14: u64EffAddr = pCtx->r14; break;
12628 case 15: u64EffAddr = pCtx->r15; break;
12629 /* SIB */
12630 case 4:
12631 case 12:
12632 {
12633 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12634
12635 /* Get the index and scale it. */
12636 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12637 {
12638 case 0: u64EffAddr = pCtx->rax; break;
12639 case 1: u64EffAddr = pCtx->rcx; break;
12640 case 2: u64EffAddr = pCtx->rdx; break;
12641 case 3: u64EffAddr = pCtx->rbx; break;
12642 case 4: u64EffAddr = 0; /*none */ break;
12643 case 5: u64EffAddr = pCtx->rbp; break;
12644 case 6: u64EffAddr = pCtx->rsi; break;
12645 case 7: u64EffAddr = pCtx->rdi; break;
12646 case 8: u64EffAddr = pCtx->r8; break;
12647 case 9: u64EffAddr = pCtx->r9; break;
12648 case 10: u64EffAddr = pCtx->r10; break;
12649 case 11: u64EffAddr = pCtx->r11; break;
12650 case 12: u64EffAddr = pCtx->r12; break;
12651 case 13: u64EffAddr = pCtx->r13; break;
12652 case 14: u64EffAddr = pCtx->r14; break;
12653 case 15: u64EffAddr = pCtx->r15; break;
12654 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12655 }
12656 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12657
12658 /* add base */
12659 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12660 {
12661 case 0: u64EffAddr += pCtx->rax; break;
12662 case 1: u64EffAddr += pCtx->rcx; break;
12663 case 2: u64EffAddr += pCtx->rdx; break;
12664 case 3: u64EffAddr += pCtx->rbx; break;
12665 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12666 case 6: u64EffAddr += pCtx->rsi; break;
12667 case 7: u64EffAddr += pCtx->rdi; break;
12668 case 8: u64EffAddr += pCtx->r8; break;
12669 case 9: u64EffAddr += pCtx->r9; break;
12670 case 10: u64EffAddr += pCtx->r10; break;
12671 case 11: u64EffAddr += pCtx->r11; break;
12672 case 12: u64EffAddr += pCtx->r12; break;
12673 case 14: u64EffAddr += pCtx->r14; break;
12674 case 15: u64EffAddr += pCtx->r15; break;
12675 /* complicated encodings */
12676 case 5:
12677 case 13:
12678 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12679 {
12680 if (!pVCpu->iem.s.uRexB)
12681 {
12682 u64EffAddr += pCtx->rbp;
12683 SET_SS_DEF();
12684 }
12685 else
12686 u64EffAddr += pCtx->r13;
12687 }
12688 else
12689 {
12690 uint32_t u32Disp;
12691 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12692 u64EffAddr += (int32_t)u32Disp;
12693 }
12694 break;
12695 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12696 }
12697 break;
12698 }
12699 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12700 }
12701
12702 /* Get and add the displacement. */
12703 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12704 {
12705 case 0:
12706 break;
12707 case 1:
12708 {
12709 int8_t i8Disp;
12710 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12711 u64EffAddr += i8Disp;
12712 break;
12713 }
12714 case 2:
12715 {
12716 uint32_t u32Disp;
12717 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12718 u64EffAddr += (int32_t)u32Disp;
12719 break;
12720 }
12721 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
12722 }
12723
12724 }
12725
12726 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12727 {
12728 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12729 return u64EffAddr;
12730 }
12731 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12732 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12733 return u64EffAddr & UINT32_MAX;
12734}
12735#endif /* IEM_WITH_SETJMP */
12736
12737
12738/** @} */
12739
12740
12741
12742/*
12743 * Include the instructions
12744 */
12745#include "IEMAllInstructions.cpp.h"
12746
12747
12748
12749
12750#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12751
12752/**
12753 * Sets up execution verification mode.
12754 */
12755IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12756{
12757 PVMCPU pVCpu = pVCpu;
12758 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12759
12760 /*
12761 * Always note down the address of the current instruction.
12762 */
12763 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12764 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12765
12766 /*
12767 * Enable verification and/or logging.
12768 */
12769 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12770 if ( fNewNoRem
12771 && ( 0
12772#if 0 /* auto enable on first paged protected mode interrupt */
12773 || ( pOrgCtx->eflags.Bits.u1IF
12774 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12775 && TRPMHasTrap(pVCpu)
12776 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12777#endif
12778#if 0
12779 || ( pOrgCtx->cs == 0x10
12780 && ( pOrgCtx->rip == 0x90119e3e
12781 || pOrgCtx->rip == 0x901d9810)
12782#endif
12783#if 0 /* Auto enable DSL - FPU stuff. */
12784 || ( pOrgCtx->cs == 0x10
12785 && (// pOrgCtx->rip == 0xc02ec07f
12786 //|| pOrgCtx->rip == 0xc02ec082
12787 //|| pOrgCtx->rip == 0xc02ec0c9
12788 0
12789 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12790#endif
12791#if 0 /* Auto enable DSL - fstp st0 stuff. */
12792 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12793#endif
12794#if 0
12795 || pOrgCtx->rip == 0x9022bb3a
12796#endif
12797#if 0
12798 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12799#endif
12800#if 0
12801 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12802 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12803#endif
12804#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12805 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12806 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12807 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12808#endif
12809#if 0 /* NT4SP1 - xadd early boot. */
12810 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12811#endif
12812#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12813 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12814#endif
12815#if 0 /* NT4SP1 - cmpxchg (AMD). */
12816 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12817#endif
12818#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12819 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12820#endif
12821#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12822 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12823
12824#endif
12825#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12826 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12827
12828#endif
12829#if 0 /* NT4SP1 - frstor [ecx] */
12830 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12831#endif
12832#if 0 /* xxxxxx - All long mode code. */
12833 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12834#endif
12835#if 0 /* rep movsq linux 3.7 64-bit boot. */
12836 || (pOrgCtx->rip == 0x0000000000100241)
12837#endif
12838#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12839 || (pOrgCtx->rip == 0x000000000215e240)
12840#endif
12841#if 0 /* DOS's size-overridden iret to v8086. */
12842 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12843#endif
12844 )
12845 )
12846 {
12847 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12848 RTLogFlags(NULL, "enabled");
12849 fNewNoRem = false;
12850 }
12851 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12852 {
12853 pVCpu->iem.s.fNoRem = fNewNoRem;
12854 if (!fNewNoRem)
12855 {
12856 LogAlways(("Enabling verification mode!\n"));
12857 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12858 }
12859 else
12860 LogAlways(("Disabling verification mode!\n"));
12861 }
12862
12863 /*
12864 * Switch state.
12865 */
12866 if (IEM_VERIFICATION_ENABLED(pVCpu))
12867 {
12868 static CPUMCTX s_DebugCtx; /* Ugly! */
12869
12870 s_DebugCtx = *pOrgCtx;
12871 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12872 }
12873
12874 /*
12875 * See if there is an interrupt pending in TRPM and inject it if we can.
12876 */
12877 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12878 if ( pOrgCtx->eflags.Bits.u1IF
12879 && TRPMHasTrap(pVCpu)
12880 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12881 {
12882 uint8_t u8TrapNo;
12883 TRPMEVENT enmType;
12884 RTGCUINT uErrCode;
12885 RTGCPTR uCr2;
12886 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12887 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12888 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12889 TRPMResetTrap(pVCpu);
12890 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12891 }
12892
12893 /*
12894 * Reset the counters.
12895 */
12896 pVCpu->iem.s.cIOReads = 0;
12897 pVCpu->iem.s.cIOWrites = 0;
12898 pVCpu->iem.s.fIgnoreRaxRdx = false;
12899 pVCpu->iem.s.fOverlappingMovs = false;
12900 pVCpu->iem.s.fProblematicMemory = false;
12901 pVCpu->iem.s.fUndefinedEFlags = 0;
12902
12903 if (IEM_VERIFICATION_ENABLED(pVCpu))
12904 {
12905 /*
12906 * Free all verification records.
12907 */
12908 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12909 pVCpu->iem.s.pIemEvtRecHead = NULL;
12910 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12911 do
12912 {
12913 while (pEvtRec)
12914 {
12915 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12916 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12917 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12918 pEvtRec = pNext;
12919 }
12920 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12921 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12922 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12923 } while (pEvtRec);
12924 }
12925}
12926
12927
12928/**
12929 * Allocate an event record.
12930 * @returns Pointer to a record.
12931 */
12932IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12933{
12934 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12935 return NULL;
12936
12937 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12938 if (pEvtRec)
12939 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12940 else
12941 {
12942 if (!pVCpu->iem.s.ppIemEvtRecNext)
12943 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12944
12945 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12946 if (!pEvtRec)
12947 return NULL;
12948 }
12949 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12950 pEvtRec->pNext = NULL;
12951 return pEvtRec;
12952}
12953
12954
12955/**
12956 * IOMMMIORead notification.
12957 */
12958VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12959{
12960 PVMCPU pVCpu = VMMGetCpu(pVM);
12961 if (!pVCpu)
12962 return;
12963 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12964 if (!pEvtRec)
12965 return;
12966 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12967 pEvtRec->u.RamRead.GCPhys = GCPhys;
12968 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12969 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12970 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12971}
12972
12973
12974/**
12975 * IOMMMIOWrite notification.
12976 */
12977VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12978{
12979 PVMCPU pVCpu = VMMGetCpu(pVM);
12980 if (!pVCpu)
12981 return;
12982 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12983 if (!pEvtRec)
12984 return;
12985 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12986 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12987 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12988 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12989 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12990 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12991 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12992 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12993 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12994}
12995
12996
12997/**
12998 * IOMIOPortRead notification.
12999 */
13000VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
13001{
13002 PVMCPU pVCpu = VMMGetCpu(pVM);
13003 if (!pVCpu)
13004 return;
13005 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13006 if (!pEvtRec)
13007 return;
13008 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13009 pEvtRec->u.IOPortRead.Port = Port;
13010 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13011 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13012 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13013}
13014
13015/**
13016 * IOMIOPortWrite notification.
13017 */
13018VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13019{
13020 PVMCPU pVCpu = VMMGetCpu(pVM);
13021 if (!pVCpu)
13022 return;
13023 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13024 if (!pEvtRec)
13025 return;
13026 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13027 pEvtRec->u.IOPortWrite.Port = Port;
13028 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13029 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13030 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13031 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13032}
13033
13034
13035VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
13036{
13037 PVMCPU pVCpu = VMMGetCpu(pVM);
13038 if (!pVCpu)
13039 return;
13040 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13041 if (!pEvtRec)
13042 return;
13043 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
13044 pEvtRec->u.IOPortStrRead.Port = Port;
13045 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
13046 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
13047 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13048 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13049}
13050
13051
13052VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
13053{
13054 PVMCPU pVCpu = VMMGetCpu(pVM);
13055 if (!pVCpu)
13056 return;
13057 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13058 if (!pEvtRec)
13059 return;
13060 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
13061 pEvtRec->u.IOPortStrWrite.Port = Port;
13062 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
13063 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
13064 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13065 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13066}
13067
13068
13069/**
13070 * Fakes and records an I/O port read.
13071 *
13072 * @returns VINF_SUCCESS.
13073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13074 * @param Port The I/O port.
13075 * @param pu32Value Where to store the fake value.
13076 * @param cbValue The size of the access.
13077 */
13078IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13079{
13080 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13081 if (pEvtRec)
13082 {
13083 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13084 pEvtRec->u.IOPortRead.Port = Port;
13085 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13086 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13087 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13088 }
13089 pVCpu->iem.s.cIOReads++;
13090 *pu32Value = 0xcccccccc;
13091 return VINF_SUCCESS;
13092}
13093
13094
13095/**
13096 * Fakes and records an I/O port write.
13097 *
13098 * @returns VINF_SUCCESS.
13099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13100 * @param Port The I/O port.
13101 * @param u32Value The value being written.
13102 * @param cbValue The size of the access.
13103 */
13104IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13105{
13106 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13107 if (pEvtRec)
13108 {
13109 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13110 pEvtRec->u.IOPortWrite.Port = Port;
13111 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13112 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13113 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13114 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13115 }
13116 pVCpu->iem.s.cIOWrites++;
13117 return VINF_SUCCESS;
13118}
13119
13120
13121/**
13122 * Used to add extra details about a stub case.
13123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13124 */
13125IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
13126{
13127 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13128 PVM pVM = pVCpu->CTX_SUFF(pVM);
13129 PVMCPU pVCpu = pVCpu;
13130 char szRegs[4096];
13131 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
13132 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
13133 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
13134 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
13135 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
13136 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
13137 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
13138 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
13139 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
13140 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
13141 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
13142 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
13143 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
13144 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
13145 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
13146 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
13147 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
13148 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
13149 " efer=%016VR{efer}\n"
13150 " pat=%016VR{pat}\n"
13151 " sf_mask=%016VR{sf_mask}\n"
13152 "krnl_gs_base=%016VR{krnl_gs_base}\n"
13153 " lstar=%016VR{lstar}\n"
13154 " star=%016VR{star} cstar=%016VR{cstar}\n"
13155 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
13156 );
13157
13158 char szInstr1[256];
13159 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
13160 DBGF_DISAS_FLAGS_DEFAULT_MODE,
13161 szInstr1, sizeof(szInstr1), NULL);
13162 char szInstr2[256];
13163 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
13164 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13165 szInstr2, sizeof(szInstr2), NULL);
13166
13167 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
13168}
13169
13170
13171/**
13172 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
13173 * dump to the assertion info.
13174 *
13175 * @param pEvtRec The record to dump.
13176 */
13177IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
13178{
13179 switch (pEvtRec->enmEvent)
13180 {
13181 case IEMVERIFYEVENT_IOPORT_READ:
13182 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
13183 pEvtRec->u.IOPortWrite.Port,
13184 pEvtRec->u.IOPortWrite.cbValue);
13185 break;
13186 case IEMVERIFYEVENT_IOPORT_WRITE:
13187 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
13188 pEvtRec->u.IOPortWrite.Port,
13189 pEvtRec->u.IOPortWrite.cbValue,
13190 pEvtRec->u.IOPortWrite.u32Value);
13191 break;
13192 case IEMVERIFYEVENT_IOPORT_STR_READ:
13193 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
13194 pEvtRec->u.IOPortStrWrite.Port,
13195 pEvtRec->u.IOPortStrWrite.cbValue,
13196 pEvtRec->u.IOPortStrWrite.cTransfers);
13197 break;
13198 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13199 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
13200 pEvtRec->u.IOPortStrWrite.Port,
13201 pEvtRec->u.IOPortStrWrite.cbValue,
13202 pEvtRec->u.IOPortStrWrite.cTransfers);
13203 break;
13204 case IEMVERIFYEVENT_RAM_READ:
13205 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13206 pEvtRec->u.RamRead.GCPhys,
13207 pEvtRec->u.RamRead.cb);
13208 break;
13209 case IEMVERIFYEVENT_RAM_WRITE:
13210 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13211 pEvtRec->u.RamWrite.GCPhys,
13212 pEvtRec->u.RamWrite.cb,
13213 (int)pEvtRec->u.RamWrite.cb,
13214 pEvtRec->u.RamWrite.ab);
13215 break;
13216 default:
13217 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13218 break;
13219 }
13220}
13221
13222
13223/**
13224 * Raises an assertion on the specified record, showing the given message with
13225 * a record dump attached.
13226 *
13227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13228 * @param pEvtRec1 The first record.
13229 * @param pEvtRec2 The second record.
13230 * @param pszMsg The message explaining why we're asserting.
13231 */
13232IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13233{
13234 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13235 iemVerifyAssertAddRecordDump(pEvtRec1);
13236 iemVerifyAssertAddRecordDump(pEvtRec2);
13237 iemVerifyAssertMsg2(pVCpu);
13238 RTAssertPanic();
13239}
13240
13241
13242/**
13243 * Raises an assertion on the specified record, showing the given message with
13244 * a record dump attached.
13245 *
13246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13247 * @param pEvtRec1 The first record.
13248 * @param pszMsg The message explaining why we're asserting.
13249 */
13250IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13251{
13252 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13253 iemVerifyAssertAddRecordDump(pEvtRec);
13254 iemVerifyAssertMsg2(pVCpu);
13255 RTAssertPanic();
13256}
13257
13258
13259/**
13260 * Verifies a write record.
13261 *
13262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13263 * @param pEvtRec The write record.
13264 * @param fRem Set if REM was doing the other executing. If clear
13265 * it was HM.
13266 */
13267IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13268{
13269 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13270 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13271 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13272 if ( RT_FAILURE(rc)
13273 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13274 {
13275 /* fend off ins */
13276 if ( !pVCpu->iem.s.cIOReads
13277 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13278 || ( pEvtRec->u.RamWrite.cb != 1
13279 && pEvtRec->u.RamWrite.cb != 2
13280 && pEvtRec->u.RamWrite.cb != 4) )
13281 {
13282 /* fend off ROMs and MMIO */
13283 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13284 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13285 {
13286 /* fend off fxsave */
13287 if (pEvtRec->u.RamWrite.cb != 512)
13288 {
13289 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13290 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13291 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13292 RTAssertMsg2Add("%s: %.*Rhxs\n"
13293 "iem: %.*Rhxs\n",
13294 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13295 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13296 iemVerifyAssertAddRecordDump(pEvtRec);
13297 iemVerifyAssertMsg2(pVCpu);
13298 RTAssertPanic();
13299 }
13300 }
13301 }
13302 }
13303
13304}
13305
13306/**
13307 * Performs the post-execution verfication checks.
13308 */
13309IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13310{
13311 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13312 return rcStrictIem;
13313
13314 /*
13315 * Switch back the state.
13316 */
13317 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13318 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13319 Assert(pOrgCtx != pDebugCtx);
13320 IEM_GET_CTX(pVCpu) = pOrgCtx;
13321
13322 /*
13323 * Execute the instruction in REM.
13324 */
13325 bool fRem = false;
13326 PVM pVM = pVCpu->CTX_SUFF(pVM);
13327 PVMCPU pVCpu = pVCpu;
13328 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13329#ifdef IEM_VERIFICATION_MODE_FULL_HM
13330 if ( HMIsEnabled(pVM)
13331 && pVCpu->iem.s.cIOReads == 0
13332 && pVCpu->iem.s.cIOWrites == 0
13333 && !pVCpu->iem.s.fProblematicMemory)
13334 {
13335 uint64_t uStartRip = pOrgCtx->rip;
13336 unsigned iLoops = 0;
13337 do
13338 {
13339 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13340 iLoops++;
13341 } while ( rc == VINF_SUCCESS
13342 || ( rc == VINF_EM_DBG_STEPPED
13343 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13344 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13345 || ( pOrgCtx->rip != pDebugCtx->rip
13346 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13347 && iLoops < 8) );
13348 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13349 rc = VINF_SUCCESS;
13350 }
13351#endif
13352 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13353 || rc == VINF_IOM_R3_IOPORT_READ
13354 || rc == VINF_IOM_R3_IOPORT_WRITE
13355 || rc == VINF_IOM_R3_MMIO_READ
13356 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13357 || rc == VINF_IOM_R3_MMIO_WRITE
13358 || rc == VINF_CPUM_R3_MSR_READ
13359 || rc == VINF_CPUM_R3_MSR_WRITE
13360 || rc == VINF_EM_RESCHEDULE
13361 )
13362 {
13363 EMRemLock(pVM);
13364 rc = REMR3EmulateInstruction(pVM, pVCpu);
13365 AssertRC(rc);
13366 EMRemUnlock(pVM);
13367 fRem = true;
13368 }
13369
13370# if 1 /* Skip unimplemented instructions for now. */
13371 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13372 {
13373 IEM_GET_CTX(pVCpu) = pOrgCtx;
13374 if (rc == VINF_EM_DBG_STEPPED)
13375 return VINF_SUCCESS;
13376 return rc;
13377 }
13378# endif
13379
13380 /*
13381 * Compare the register states.
13382 */
13383 unsigned cDiffs = 0;
13384 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13385 {
13386 //Log(("REM and IEM ends up with different registers!\n"));
13387 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13388
13389# define CHECK_FIELD(a_Field) \
13390 do \
13391 { \
13392 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13393 { \
13394 switch (sizeof(pOrgCtx->a_Field)) \
13395 { \
13396 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13397 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13398 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13399 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13400 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13401 } \
13402 cDiffs++; \
13403 } \
13404 } while (0)
13405# define CHECK_XSTATE_FIELD(a_Field) \
13406 do \
13407 { \
13408 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13409 { \
13410 switch (sizeof(pOrgXState->a_Field)) \
13411 { \
13412 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13413 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13414 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13415 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13416 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13417 } \
13418 cDiffs++; \
13419 } \
13420 } while (0)
13421
13422# define CHECK_BIT_FIELD(a_Field) \
13423 do \
13424 { \
13425 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13426 { \
13427 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13428 cDiffs++; \
13429 } \
13430 } while (0)
13431
13432# define CHECK_SEL(a_Sel) \
13433 do \
13434 { \
13435 CHECK_FIELD(a_Sel.Sel); \
13436 CHECK_FIELD(a_Sel.Attr.u); \
13437 CHECK_FIELD(a_Sel.u64Base); \
13438 CHECK_FIELD(a_Sel.u32Limit); \
13439 CHECK_FIELD(a_Sel.fFlags); \
13440 } while (0)
13441
13442 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13443 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13444
13445#if 1 /* The recompiler doesn't update these the intel way. */
13446 if (fRem)
13447 {
13448 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13449 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13450 pOrgXState->x87.CS = pDebugXState->x87.CS;
13451 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13452 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13453 pOrgXState->x87.DS = pDebugXState->x87.DS;
13454 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13455 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13456 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13457 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13458 }
13459#endif
13460 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13461 {
13462 RTAssertMsg2Weak(" the FPU state differs\n");
13463 cDiffs++;
13464 CHECK_XSTATE_FIELD(x87.FCW);
13465 CHECK_XSTATE_FIELD(x87.FSW);
13466 CHECK_XSTATE_FIELD(x87.FTW);
13467 CHECK_XSTATE_FIELD(x87.FOP);
13468 CHECK_XSTATE_FIELD(x87.FPUIP);
13469 CHECK_XSTATE_FIELD(x87.CS);
13470 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13471 CHECK_XSTATE_FIELD(x87.FPUDP);
13472 CHECK_XSTATE_FIELD(x87.DS);
13473 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13474 CHECK_XSTATE_FIELD(x87.MXCSR);
13475 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13476 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13477 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13478 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13479 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13480 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13481 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13482 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13483 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13484 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13485 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13486 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13487 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13488 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13489 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13490 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13491 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13492 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13493 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13494 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13495 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13496 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13497 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13498 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13499 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13500 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13501 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13502 }
13503 CHECK_FIELD(rip);
13504 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13505 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13506 {
13507 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13508 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13509 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13510 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13511 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13512 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13513 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13514 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13515 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13516 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13517 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13518 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13519 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13520 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13521 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13522 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13523 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13524 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13525 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13526 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13527 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13528 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13529 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13530 }
13531
13532 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13533 CHECK_FIELD(rax);
13534 CHECK_FIELD(rcx);
13535 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13536 CHECK_FIELD(rdx);
13537 CHECK_FIELD(rbx);
13538 CHECK_FIELD(rsp);
13539 CHECK_FIELD(rbp);
13540 CHECK_FIELD(rsi);
13541 CHECK_FIELD(rdi);
13542 CHECK_FIELD(r8);
13543 CHECK_FIELD(r9);
13544 CHECK_FIELD(r10);
13545 CHECK_FIELD(r11);
13546 CHECK_FIELD(r12);
13547 CHECK_FIELD(r13);
13548 CHECK_SEL(cs);
13549 CHECK_SEL(ss);
13550 CHECK_SEL(ds);
13551 CHECK_SEL(es);
13552 CHECK_SEL(fs);
13553 CHECK_SEL(gs);
13554 CHECK_FIELD(cr0);
13555
13556 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13557 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13558 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13559 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13560 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13561 {
13562 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13563 { /* ignore */ }
13564 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13565 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13566 && fRem)
13567 { /* ignore */ }
13568 else
13569 CHECK_FIELD(cr2);
13570 }
13571 CHECK_FIELD(cr3);
13572 CHECK_FIELD(cr4);
13573 CHECK_FIELD(dr[0]);
13574 CHECK_FIELD(dr[1]);
13575 CHECK_FIELD(dr[2]);
13576 CHECK_FIELD(dr[3]);
13577 CHECK_FIELD(dr[6]);
13578 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13579 CHECK_FIELD(dr[7]);
13580 CHECK_FIELD(gdtr.cbGdt);
13581 CHECK_FIELD(gdtr.pGdt);
13582 CHECK_FIELD(idtr.cbIdt);
13583 CHECK_FIELD(idtr.pIdt);
13584 CHECK_SEL(ldtr);
13585 CHECK_SEL(tr);
13586 CHECK_FIELD(SysEnter.cs);
13587 CHECK_FIELD(SysEnter.eip);
13588 CHECK_FIELD(SysEnter.esp);
13589 CHECK_FIELD(msrEFER);
13590 CHECK_FIELD(msrSTAR);
13591 CHECK_FIELD(msrPAT);
13592 CHECK_FIELD(msrLSTAR);
13593 CHECK_FIELD(msrCSTAR);
13594 CHECK_FIELD(msrSFMASK);
13595 CHECK_FIELD(msrKERNELGSBASE);
13596
13597 if (cDiffs != 0)
13598 {
13599 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13600 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13601 RTAssertPanic();
13602 static bool volatile s_fEnterDebugger = true;
13603 if (s_fEnterDebugger)
13604 DBGFSTOP(pVM);
13605
13606# if 1 /* Ignore unimplemented instructions for now. */
13607 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13608 rcStrictIem = VINF_SUCCESS;
13609# endif
13610 }
13611# undef CHECK_FIELD
13612# undef CHECK_BIT_FIELD
13613 }
13614
13615 /*
13616 * If the register state compared fine, check the verification event
13617 * records.
13618 */
13619 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13620 {
13621 /*
13622 * Compare verficiation event records.
13623 * - I/O port accesses should be a 1:1 match.
13624 */
13625 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13626 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13627 while (pIemRec && pOtherRec)
13628 {
13629 /* Since we might miss RAM writes and reads, ignore reads and check
13630 that any written memory is the same extra ones. */
13631 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13632 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13633 && pIemRec->pNext)
13634 {
13635 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13636 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13637 pIemRec = pIemRec->pNext;
13638 }
13639
13640 /* Do the compare. */
13641 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13642 {
13643 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13644 break;
13645 }
13646 bool fEquals;
13647 switch (pIemRec->enmEvent)
13648 {
13649 case IEMVERIFYEVENT_IOPORT_READ:
13650 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13651 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13652 break;
13653 case IEMVERIFYEVENT_IOPORT_WRITE:
13654 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13655 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13656 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13657 break;
13658 case IEMVERIFYEVENT_IOPORT_STR_READ:
13659 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13660 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13661 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13662 break;
13663 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13664 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13665 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13666 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13667 break;
13668 case IEMVERIFYEVENT_RAM_READ:
13669 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13670 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13671 break;
13672 case IEMVERIFYEVENT_RAM_WRITE:
13673 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13674 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13675 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13676 break;
13677 default:
13678 fEquals = false;
13679 break;
13680 }
13681 if (!fEquals)
13682 {
13683 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13684 break;
13685 }
13686
13687 /* advance */
13688 pIemRec = pIemRec->pNext;
13689 pOtherRec = pOtherRec->pNext;
13690 }
13691
13692 /* Ignore extra writes and reads. */
13693 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13694 {
13695 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13696 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13697 pIemRec = pIemRec->pNext;
13698 }
13699 if (pIemRec != NULL)
13700 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13701 else if (pOtherRec != NULL)
13702 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13703 }
13704 IEM_GET_CTX(pVCpu) = pOrgCtx;
13705
13706 return rcStrictIem;
13707}
13708
13709#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13710
13711/* stubs */
13712IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13713{
13714 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13715 return VERR_INTERNAL_ERROR;
13716}
13717
13718IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13719{
13720 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13721 return VERR_INTERNAL_ERROR;
13722}
13723
13724#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13725
13726
13727#ifdef LOG_ENABLED
13728/**
13729 * Logs the current instruction.
13730 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13731 * @param pCtx The current CPU context.
13732 * @param fSameCtx Set if we have the same context information as the VMM,
13733 * clear if we may have already executed an instruction in
13734 * our debug context. When clear, we assume IEMCPU holds
13735 * valid CPU mode info.
13736 */
13737IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13738{
13739# ifdef IN_RING3
13740 if (LogIs2Enabled())
13741 {
13742 char szInstr[256];
13743 uint32_t cbInstr = 0;
13744 if (fSameCtx)
13745 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13746 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13747 szInstr, sizeof(szInstr), &cbInstr);
13748 else
13749 {
13750 uint32_t fFlags = 0;
13751 switch (pVCpu->iem.s.enmCpuMode)
13752 {
13753 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13754 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13755 case IEMMODE_16BIT:
13756 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13757 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13758 else
13759 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13760 break;
13761 }
13762 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13763 szInstr, sizeof(szInstr), &cbInstr);
13764 }
13765
13766 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13767 Log2(("****\n"
13768 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13769 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13770 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13771 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13772 " %s\n"
13773 ,
13774 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13775 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13776 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13777 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13778 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13779 szInstr));
13780
13781 if (LogIs3Enabled())
13782 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13783 }
13784 else
13785# endif
13786 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13787 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13788 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
13789}
13790#endif
13791
13792
13793/**
13794 * Makes status code addjustments (pass up from I/O and access handler)
13795 * as well as maintaining statistics.
13796 *
13797 * @returns Strict VBox status code to pass up.
13798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13799 * @param rcStrict The status from executing an instruction.
13800 */
13801DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13802{
13803 if (rcStrict != VINF_SUCCESS)
13804 {
13805 if (RT_SUCCESS(rcStrict))
13806 {
13807 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13808 || rcStrict == VINF_IOM_R3_IOPORT_READ
13809 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13810 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13811 || rcStrict == VINF_IOM_R3_MMIO_READ
13812 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13813 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13814 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13815 || rcStrict == VINF_CPUM_R3_MSR_READ
13816 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13817 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13818 || rcStrict == VINF_EM_RAW_TO_R3
13819 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13820 /* raw-mode / virt handlers only: */
13821 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13822 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13823 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13824 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13825 || rcStrict == VINF_SELM_SYNC_GDT
13826 || rcStrict == VINF_CSAM_PENDING_ACTION
13827 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13828 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13829/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13830 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13831 if (rcPassUp == VINF_SUCCESS)
13832 pVCpu->iem.s.cRetInfStatuses++;
13833 else if ( rcPassUp < VINF_EM_FIRST
13834 || rcPassUp > VINF_EM_LAST
13835 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13836 {
13837 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13838 pVCpu->iem.s.cRetPassUpStatus++;
13839 rcStrict = rcPassUp;
13840 }
13841 else
13842 {
13843 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13844 pVCpu->iem.s.cRetInfStatuses++;
13845 }
13846 }
13847 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13848 pVCpu->iem.s.cRetAspectNotImplemented++;
13849 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13850 pVCpu->iem.s.cRetInstrNotImplemented++;
13851#ifdef IEM_VERIFICATION_MODE_FULL
13852 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13853 rcStrict = VINF_SUCCESS;
13854#endif
13855 else
13856 pVCpu->iem.s.cRetErrStatuses++;
13857 }
13858 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13859 {
13860 pVCpu->iem.s.cRetPassUpStatus++;
13861 rcStrict = pVCpu->iem.s.rcPassUp;
13862 }
13863
13864 return rcStrict;
13865}
13866
13867
13868/**
13869 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13870 * IEMExecOneWithPrefetchedByPC.
13871 *
13872 * Similar code is found in IEMExecLots.
13873 *
13874 * @return Strict VBox status code.
13875 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13877 * @param fExecuteInhibit If set, execute the instruction following CLI,
13878 * POP SS and MOV SS,GR.
13879 */
13880DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13881{
13882#ifdef IEM_WITH_SETJMP
13883 VBOXSTRICTRC rcStrict;
13884 jmp_buf JmpBuf;
13885 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13886 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13887 if ((rcStrict = setjmp(JmpBuf)) == 0)
13888 {
13889 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13890 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13891 }
13892 else
13893 pVCpu->iem.s.cLongJumps++;
13894 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13895#else
13896 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13897 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13898#endif
13899 if (rcStrict == VINF_SUCCESS)
13900 pVCpu->iem.s.cInstructions++;
13901 if (pVCpu->iem.s.cActiveMappings > 0)
13902 {
13903 Assert(rcStrict != VINF_SUCCESS);
13904 iemMemRollback(pVCpu);
13905 }
13906//#ifdef DEBUG
13907// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13908//#endif
13909
13910 /* Execute the next instruction as well if a cli, pop ss or
13911 mov ss, Gr has just completed successfully. */
13912 if ( fExecuteInhibit
13913 && rcStrict == VINF_SUCCESS
13914 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13915 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13916 {
13917 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13918 if (rcStrict == VINF_SUCCESS)
13919 {
13920#ifdef LOG_ENABLED
13921 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13922#endif
13923#ifdef IEM_WITH_SETJMP
13924 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13925 if ((rcStrict = setjmp(JmpBuf)) == 0)
13926 {
13927 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13928 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13929 }
13930 else
13931 pVCpu->iem.s.cLongJumps++;
13932 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13933#else
13934 IEM_OPCODE_GET_NEXT_U8(&b);
13935 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13936#endif
13937 if (rcStrict == VINF_SUCCESS)
13938 pVCpu->iem.s.cInstructions++;
13939 if (pVCpu->iem.s.cActiveMappings > 0)
13940 {
13941 Assert(rcStrict != VINF_SUCCESS);
13942 iemMemRollback(pVCpu);
13943 }
13944 }
13945 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13946 }
13947
13948 /*
13949 * Return value fiddling, statistics and sanity assertions.
13950 */
13951 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13952
13953 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13954 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13955#if defined(IEM_VERIFICATION_MODE_FULL)
13956 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13957 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13958 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13959 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13960#endif
13961 return rcStrict;
13962}
13963
13964
13965#ifdef IN_RC
13966/**
13967 * Re-enters raw-mode or ensure we return to ring-3.
13968 *
13969 * @returns rcStrict, maybe modified.
13970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13971 * @param pCtx The current CPU context.
13972 * @param rcStrict The status code returne by the interpreter.
13973 */
13974DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13975{
13976 if ( !pVCpu->iem.s.fInPatchCode
13977 && ( rcStrict == VINF_SUCCESS
13978 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13979 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13980 {
13981 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13982 CPUMRawEnter(pVCpu);
13983 else
13984 {
13985 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13986 rcStrict = VINF_EM_RESCHEDULE;
13987 }
13988 }
13989 return rcStrict;
13990}
13991#endif
13992
13993
13994/**
13995 * Execute one instruction.
13996 *
13997 * @return Strict VBox status code.
13998 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13999 */
14000VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14001{
14002#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14003 if (++pVCpu->iem.s.cVerifyDepth == 1)
14004 iemExecVerificationModeSetup(pVCpu);
14005#endif
14006#ifdef LOG_ENABLED
14007 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14008 iemLogCurInstr(pVCpu, pCtx, true);
14009#endif
14010
14011 /*
14012 * Do the decoding and emulation.
14013 */
14014 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14015 if (rcStrict == VINF_SUCCESS)
14016 rcStrict = iemExecOneInner(pVCpu, true);
14017
14018#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14019 /*
14020 * Assert some sanity.
14021 */
14022 if (pVCpu->iem.s.cVerifyDepth == 1)
14023 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14024 pVCpu->iem.s.cVerifyDepth--;
14025#endif
14026#ifdef IN_RC
14027 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14028#endif
14029 if (rcStrict != VINF_SUCCESS)
14030 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14031 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14032 return rcStrict;
14033}
14034
14035
14036VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14037{
14038 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14039 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14040
14041 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14042 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14043 if (rcStrict == VINF_SUCCESS)
14044 {
14045 rcStrict = iemExecOneInner(pVCpu, true);
14046 if (pcbWritten)
14047 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14048 }
14049
14050#ifdef IN_RC
14051 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14052#endif
14053 return rcStrict;
14054}
14055
14056
14057VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14058 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14059{
14060 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14061 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14062
14063 VBOXSTRICTRC rcStrict;
14064 if ( cbOpcodeBytes
14065 && pCtx->rip == OpcodeBytesPC)
14066 {
14067 iemInitDecoder(pVCpu, false);
14068#ifdef IEM_WITH_CODE_TLB
14069 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14070 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14071 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14072 pVCpu->iem.s.offCurInstrStart = 0;
14073 pVCpu->iem.s.offInstrNextByte = 0;
14074#else
14075 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14076 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14077#endif
14078 rcStrict = VINF_SUCCESS;
14079 }
14080 else
14081 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14082 if (rcStrict == VINF_SUCCESS)
14083 {
14084 rcStrict = iemExecOneInner(pVCpu, true);
14085 }
14086
14087#ifdef IN_RC
14088 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14089#endif
14090 return rcStrict;
14091}
14092
14093
14094VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14095{
14096 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14097 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14098
14099 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14100 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14101 if (rcStrict == VINF_SUCCESS)
14102 {
14103 rcStrict = iemExecOneInner(pVCpu, false);
14104 if (pcbWritten)
14105 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14106 }
14107
14108#ifdef IN_RC
14109 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14110#endif
14111 return rcStrict;
14112}
14113
14114
14115VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14116 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14117{
14118 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14119 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14120
14121 VBOXSTRICTRC rcStrict;
14122 if ( cbOpcodeBytes
14123 && pCtx->rip == OpcodeBytesPC)
14124 {
14125 iemInitDecoder(pVCpu, true);
14126#ifdef IEM_WITH_CODE_TLB
14127 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14128 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14129 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14130 pVCpu->iem.s.offCurInstrStart = 0;
14131 pVCpu->iem.s.offInstrNextByte = 0;
14132#else
14133 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14134 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14135#endif
14136 rcStrict = VINF_SUCCESS;
14137 }
14138 else
14139 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14140 if (rcStrict == VINF_SUCCESS)
14141 rcStrict = iemExecOneInner(pVCpu, false);
14142
14143#ifdef IN_RC
14144 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14145#endif
14146 return rcStrict;
14147}
14148
14149
14150/**
14151 * For debugging DISGetParamSize, may come in handy.
14152 *
14153 * @returns Strict VBox status code.
14154 * @param pVCpu The cross context virtual CPU structure of the
14155 * calling EMT.
14156 * @param pCtxCore The context core structure.
14157 * @param OpcodeBytesPC The PC of the opcode bytes.
14158 * @param pvOpcodeBytes Prefeched opcode bytes.
14159 * @param cbOpcodeBytes Number of prefetched bytes.
14160 * @param pcbWritten Where to return the number of bytes written.
14161 * Optional.
14162 */
14163VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14164 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14165 uint32_t *pcbWritten)
14166{
14167 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14168 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14169
14170 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14171 VBOXSTRICTRC rcStrict;
14172 if ( cbOpcodeBytes
14173 && pCtx->rip == OpcodeBytesPC)
14174 {
14175 iemInitDecoder(pVCpu, true);
14176#ifdef IEM_WITH_CODE_TLB
14177 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14178 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14179 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14180 pVCpu->iem.s.offCurInstrStart = 0;
14181 pVCpu->iem.s.offInstrNextByte = 0;
14182#else
14183 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14184 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14185#endif
14186 rcStrict = VINF_SUCCESS;
14187 }
14188 else
14189 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14190 if (rcStrict == VINF_SUCCESS)
14191 {
14192 rcStrict = iemExecOneInner(pVCpu, false);
14193 if (pcbWritten)
14194 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14195 }
14196
14197#ifdef IN_RC
14198 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14199#endif
14200 return rcStrict;
14201}
14202
14203
14204VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14205{
14206 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14207
14208#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14209 /*
14210 * See if there is an interrupt pending in TRPM, inject it if we can.
14211 */
14212 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14213# ifdef IEM_VERIFICATION_MODE_FULL
14214 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14215# endif
14216 if ( pCtx->eflags.Bits.u1IF
14217 && TRPMHasTrap(pVCpu)
14218 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14219 {
14220 uint8_t u8TrapNo;
14221 TRPMEVENT enmType;
14222 RTGCUINT uErrCode;
14223 RTGCPTR uCr2;
14224 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14225 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14226 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14227 TRPMResetTrap(pVCpu);
14228 }
14229
14230 /*
14231 * Log the state.
14232 */
14233# ifdef LOG_ENABLED
14234 iemLogCurInstr(pVCpu, pCtx, true);
14235# endif
14236
14237 /*
14238 * Do the decoding and emulation.
14239 */
14240 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14241 if (rcStrict == VINF_SUCCESS)
14242 rcStrict = iemExecOneInner(pVCpu, true);
14243
14244 /*
14245 * Assert some sanity.
14246 */
14247 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14248
14249 /*
14250 * Log and return.
14251 */
14252 if (rcStrict != VINF_SUCCESS)
14253 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14254 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14255 if (pcInstructions)
14256 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14257 return rcStrict;
14258
14259#else /* Not verification mode */
14260
14261 /*
14262 * See if there is an interrupt pending in TRPM, inject it if we can.
14263 */
14264 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14265# ifdef IEM_VERIFICATION_MODE_FULL
14266 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14267# endif
14268 if ( pCtx->eflags.Bits.u1IF
14269 && TRPMHasTrap(pVCpu)
14270 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14271 {
14272 uint8_t u8TrapNo;
14273 TRPMEVENT enmType;
14274 RTGCUINT uErrCode;
14275 RTGCPTR uCr2;
14276 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14277 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14278 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14279 TRPMResetTrap(pVCpu);
14280 }
14281
14282 /*
14283 * Initial decoder init w/ prefetch, then setup setjmp.
14284 */
14285 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14286 if (rcStrict == VINF_SUCCESS)
14287 {
14288# ifdef IEM_WITH_SETJMP
14289 jmp_buf JmpBuf;
14290 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14291 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14292 pVCpu->iem.s.cActiveMappings = 0;
14293 if ((rcStrict = setjmp(JmpBuf)) == 0)
14294# endif
14295 {
14296 /*
14297 * The run loop. We limit ourselves to 4096 instructions right now.
14298 */
14299 PVM pVM = pVCpu->CTX_SUFF(pVM);
14300 uint32_t cInstr = 4096;
14301 for (;;)
14302 {
14303 /*
14304 * Log the state.
14305 */
14306# ifdef LOG_ENABLED
14307 iemLogCurInstr(pVCpu, pCtx, true);
14308# endif
14309
14310 /*
14311 * Do the decoding and emulation.
14312 */
14313 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14314 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14315 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14316 {
14317 Assert(pVCpu->iem.s.cActiveMappings == 0);
14318 pVCpu->iem.s.cInstructions++;
14319 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14320 {
14321 uint32_t fCpu = pVCpu->fLocalForcedActions
14322 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14323 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14324 | VMCPU_FF_TLB_FLUSH
14325# ifdef VBOX_WITH_RAW_MODE
14326 | VMCPU_FF_TRPM_SYNC_IDT
14327 | VMCPU_FF_SELM_SYNC_TSS
14328 | VMCPU_FF_SELM_SYNC_GDT
14329 | VMCPU_FF_SELM_SYNC_LDT
14330# endif
14331 | VMCPU_FF_INHIBIT_INTERRUPTS
14332 | VMCPU_FF_BLOCK_NMIS
14333 | VMCPU_FF_UNHALT ));
14334
14335 if (RT_LIKELY( ( !fCpu
14336 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14337 && !pCtx->rflags.Bits.u1IF) )
14338 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14339 {
14340 if (cInstr-- > 0)
14341 {
14342 Assert(pVCpu->iem.s.cActiveMappings == 0);
14343 iemReInitDecoder(pVCpu);
14344 continue;
14345 }
14346 }
14347 }
14348 Assert(pVCpu->iem.s.cActiveMappings == 0);
14349 }
14350 else if (pVCpu->iem.s.cActiveMappings > 0)
14351 iemMemRollback(pVCpu);
14352 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14353 break;
14354 }
14355 }
14356# ifdef IEM_WITH_SETJMP
14357 else
14358 {
14359 if (pVCpu->iem.s.cActiveMappings > 0)
14360 iemMemRollback(pVCpu);
14361 pVCpu->iem.s.cLongJumps++;
14362 }
14363 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14364# endif
14365
14366 /*
14367 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14368 */
14369 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14370 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14371# if defined(IEM_VERIFICATION_MODE_FULL)
14372 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14373 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14374 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14375 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14376# endif
14377 }
14378
14379 /*
14380 * Maybe re-enter raw-mode and log.
14381 */
14382# ifdef IN_RC
14383 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14384# endif
14385 if (rcStrict != VINF_SUCCESS)
14386 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14387 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14388 if (pcInstructions)
14389 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14390 return rcStrict;
14391#endif /* Not verification mode */
14392}
14393
14394
14395
14396/**
14397 * Injects a trap, fault, abort, software interrupt or external interrupt.
14398 *
14399 * The parameter list matches TRPMQueryTrapAll pretty closely.
14400 *
14401 * @returns Strict VBox status code.
14402 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14403 * @param u8TrapNo The trap number.
14404 * @param enmType What type is it (trap/fault/abort), software
14405 * interrupt or hardware interrupt.
14406 * @param uErrCode The error code if applicable.
14407 * @param uCr2 The CR2 value if applicable.
14408 * @param cbInstr The instruction length (only relevant for
14409 * software interrupts).
14410 */
14411VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14412 uint8_t cbInstr)
14413{
14414 iemInitDecoder(pVCpu, false);
14415#ifdef DBGFTRACE_ENABLED
14416 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14417 u8TrapNo, enmType, uErrCode, uCr2);
14418#endif
14419
14420 uint32_t fFlags;
14421 switch (enmType)
14422 {
14423 case TRPM_HARDWARE_INT:
14424 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14425 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14426 uErrCode = uCr2 = 0;
14427 break;
14428
14429 case TRPM_SOFTWARE_INT:
14430 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14431 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14432 uErrCode = uCr2 = 0;
14433 break;
14434
14435 case TRPM_TRAP:
14436 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14437 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14438 if (u8TrapNo == X86_XCPT_PF)
14439 fFlags |= IEM_XCPT_FLAGS_CR2;
14440 switch (u8TrapNo)
14441 {
14442 case X86_XCPT_DF:
14443 case X86_XCPT_TS:
14444 case X86_XCPT_NP:
14445 case X86_XCPT_SS:
14446 case X86_XCPT_PF:
14447 case X86_XCPT_AC:
14448 fFlags |= IEM_XCPT_FLAGS_ERR;
14449 break;
14450
14451 case X86_XCPT_NMI:
14452 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14453 break;
14454 }
14455 break;
14456
14457 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14458 }
14459
14460 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14461}
14462
14463
14464/**
14465 * Injects the active TRPM event.
14466 *
14467 * @returns Strict VBox status code.
14468 * @param pVCpu The cross context virtual CPU structure.
14469 */
14470VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14471{
14472#ifndef IEM_IMPLEMENTS_TASKSWITCH
14473 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14474#else
14475 uint8_t u8TrapNo;
14476 TRPMEVENT enmType;
14477 RTGCUINT uErrCode;
14478 RTGCUINTPTR uCr2;
14479 uint8_t cbInstr;
14480 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14481 if (RT_FAILURE(rc))
14482 return rc;
14483
14484 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14485
14486 /** @todo Are there any other codes that imply the event was successfully
14487 * delivered to the guest? See @bugref{6607}. */
14488 if ( rcStrict == VINF_SUCCESS
14489 || rcStrict == VINF_IEM_RAISED_XCPT)
14490 {
14491 TRPMResetTrap(pVCpu);
14492 }
14493 return rcStrict;
14494#endif
14495}
14496
14497
14498VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14499{
14500 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14501 return VERR_NOT_IMPLEMENTED;
14502}
14503
14504
14505VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14506{
14507 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14508 return VERR_NOT_IMPLEMENTED;
14509}
14510
14511
14512#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14513/**
14514 * Executes a IRET instruction with default operand size.
14515 *
14516 * This is for PATM.
14517 *
14518 * @returns VBox status code.
14519 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14520 * @param pCtxCore The register frame.
14521 */
14522VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14523{
14524 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14525
14526 iemCtxCoreToCtx(pCtx, pCtxCore);
14527 iemInitDecoder(pVCpu);
14528 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14529 if (rcStrict == VINF_SUCCESS)
14530 iemCtxToCtxCore(pCtxCore, pCtx);
14531 else
14532 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14533 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14534 return rcStrict;
14535}
14536#endif
14537
14538
14539/**
14540 * Macro used by the IEMExec* method to check the given instruction length.
14541 *
14542 * Will return on failure!
14543 *
14544 * @param a_cbInstr The given instruction length.
14545 * @param a_cbMin The minimum length.
14546 */
14547#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14548 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14549 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14550
14551
14552/**
14553 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14554 *
14555 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14556 *
14557 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14559 * @param rcStrict The status code to fiddle.
14560 */
14561DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14562{
14563 iemUninitExec(pVCpu);
14564#ifdef IN_RC
14565 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14566 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14567#else
14568 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14569#endif
14570}
14571
14572
14573/**
14574 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14575 *
14576 * This API ASSUMES that the caller has already verified that the guest code is
14577 * allowed to access the I/O port. (The I/O port is in the DX register in the
14578 * guest state.)
14579 *
14580 * @returns Strict VBox status code.
14581 * @param pVCpu The cross context virtual CPU structure.
14582 * @param cbValue The size of the I/O port access (1, 2, or 4).
14583 * @param enmAddrMode The addressing mode.
14584 * @param fRepPrefix Indicates whether a repeat prefix is used
14585 * (doesn't matter which for this instruction).
14586 * @param cbInstr The instruction length in bytes.
14587 * @param iEffSeg The effective segment address.
14588 * @param fIoChecked Whether the access to the I/O port has been
14589 * checked or not. It's typically checked in the
14590 * HM scenario.
14591 */
14592VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14593 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14594{
14595 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14596 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14597
14598 /*
14599 * State init.
14600 */
14601 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14602
14603 /*
14604 * Switch orgy for getting to the right handler.
14605 */
14606 VBOXSTRICTRC rcStrict;
14607 if (fRepPrefix)
14608 {
14609 switch (enmAddrMode)
14610 {
14611 case IEMMODE_16BIT:
14612 switch (cbValue)
14613 {
14614 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14615 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14616 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14617 default:
14618 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14619 }
14620 break;
14621
14622 case IEMMODE_32BIT:
14623 switch (cbValue)
14624 {
14625 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14626 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14627 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14628 default:
14629 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14630 }
14631 break;
14632
14633 case IEMMODE_64BIT:
14634 switch (cbValue)
14635 {
14636 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14637 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14638 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14639 default:
14640 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14641 }
14642 break;
14643
14644 default:
14645 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14646 }
14647 }
14648 else
14649 {
14650 switch (enmAddrMode)
14651 {
14652 case IEMMODE_16BIT:
14653 switch (cbValue)
14654 {
14655 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14656 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14657 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14658 default:
14659 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14660 }
14661 break;
14662
14663 case IEMMODE_32BIT:
14664 switch (cbValue)
14665 {
14666 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14667 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14668 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14669 default:
14670 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14671 }
14672 break;
14673
14674 case IEMMODE_64BIT:
14675 switch (cbValue)
14676 {
14677 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14678 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14679 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14680 default:
14681 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14682 }
14683 break;
14684
14685 default:
14686 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14687 }
14688 }
14689
14690 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14691}
14692
14693
14694/**
14695 * Interface for HM and EM for executing string I/O IN (read) instructions.
14696 *
14697 * This API ASSUMES that the caller has already verified that the guest code is
14698 * allowed to access the I/O port. (The I/O port is in the DX register in the
14699 * guest state.)
14700 *
14701 * @returns Strict VBox status code.
14702 * @param pVCpu The cross context virtual CPU structure.
14703 * @param cbValue The size of the I/O port access (1, 2, or 4).
14704 * @param enmAddrMode The addressing mode.
14705 * @param fRepPrefix Indicates whether a repeat prefix is used
14706 * (doesn't matter which for this instruction).
14707 * @param cbInstr The instruction length in bytes.
14708 * @param fIoChecked Whether the access to the I/O port has been
14709 * checked or not. It's typically checked in the
14710 * HM scenario.
14711 */
14712VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14713 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14714{
14715 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14716
14717 /*
14718 * State init.
14719 */
14720 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14721
14722 /*
14723 * Switch orgy for getting to the right handler.
14724 */
14725 VBOXSTRICTRC rcStrict;
14726 if (fRepPrefix)
14727 {
14728 switch (enmAddrMode)
14729 {
14730 case IEMMODE_16BIT:
14731 switch (cbValue)
14732 {
14733 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14734 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14735 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14736 default:
14737 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14738 }
14739 break;
14740
14741 case IEMMODE_32BIT:
14742 switch (cbValue)
14743 {
14744 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14745 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14746 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14747 default:
14748 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14749 }
14750 break;
14751
14752 case IEMMODE_64BIT:
14753 switch (cbValue)
14754 {
14755 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14756 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14757 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14758 default:
14759 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14760 }
14761 break;
14762
14763 default:
14764 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14765 }
14766 }
14767 else
14768 {
14769 switch (enmAddrMode)
14770 {
14771 case IEMMODE_16BIT:
14772 switch (cbValue)
14773 {
14774 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14775 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14776 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14777 default:
14778 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14779 }
14780 break;
14781
14782 case IEMMODE_32BIT:
14783 switch (cbValue)
14784 {
14785 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14786 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14787 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14788 default:
14789 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14790 }
14791 break;
14792
14793 case IEMMODE_64BIT:
14794 switch (cbValue)
14795 {
14796 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14797 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14798 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14799 default:
14800 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14801 }
14802 break;
14803
14804 default:
14805 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14806 }
14807 }
14808
14809 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14810}
14811
14812
14813/**
14814 * Interface for rawmode to write execute an OUT instruction.
14815 *
14816 * @returns Strict VBox status code.
14817 * @param pVCpu The cross context virtual CPU structure.
14818 * @param cbInstr The instruction length in bytes.
14819 * @param u16Port The port to read.
14820 * @param cbReg The register size.
14821 *
14822 * @remarks In ring-0 not all of the state needs to be synced in.
14823 */
14824VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14825{
14826 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14827 Assert(cbReg <= 4 && cbReg != 3);
14828
14829 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14830 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14831 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14832}
14833
14834
14835/**
14836 * Interface for rawmode to write execute an IN instruction.
14837 *
14838 * @returns Strict VBox status code.
14839 * @param pVCpu The cross context virtual CPU structure.
14840 * @param cbInstr The instruction length in bytes.
14841 * @param u16Port The port to read.
14842 * @param cbReg The register size.
14843 */
14844VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14845{
14846 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14847 Assert(cbReg <= 4 && cbReg != 3);
14848
14849 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14850 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14851 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14852}
14853
14854
14855/**
14856 * Interface for HM and EM to write to a CRx register.
14857 *
14858 * @returns Strict VBox status code.
14859 * @param pVCpu The cross context virtual CPU structure.
14860 * @param cbInstr The instruction length in bytes.
14861 * @param iCrReg The control register number (destination).
14862 * @param iGReg The general purpose register number (source).
14863 *
14864 * @remarks In ring-0 not all of the state needs to be synced in.
14865 */
14866VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14867{
14868 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14869 Assert(iCrReg < 16);
14870 Assert(iGReg < 16);
14871
14872 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14873 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14874 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14875}
14876
14877
14878/**
14879 * Interface for HM and EM to read from a CRx register.
14880 *
14881 * @returns Strict VBox status code.
14882 * @param pVCpu The cross context virtual CPU structure.
14883 * @param cbInstr The instruction length in bytes.
14884 * @param iGReg The general purpose register number (destination).
14885 * @param iCrReg The control register number (source).
14886 *
14887 * @remarks In ring-0 not all of the state needs to be synced in.
14888 */
14889VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14890{
14891 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14892 Assert(iCrReg < 16);
14893 Assert(iGReg < 16);
14894
14895 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14896 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14897 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14898}
14899
14900
14901/**
14902 * Interface for HM and EM to clear the CR0[TS] bit.
14903 *
14904 * @returns Strict VBox status code.
14905 * @param pVCpu The cross context virtual CPU structure.
14906 * @param cbInstr The instruction length in bytes.
14907 *
14908 * @remarks In ring-0 not all of the state needs to be synced in.
14909 */
14910VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14911{
14912 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14913
14914 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14915 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14916 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14917}
14918
14919
14920/**
14921 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14922 *
14923 * @returns Strict VBox status code.
14924 * @param pVCpu The cross context virtual CPU structure.
14925 * @param cbInstr The instruction length in bytes.
14926 * @param uValue The value to load into CR0.
14927 *
14928 * @remarks In ring-0 not all of the state needs to be synced in.
14929 */
14930VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14931{
14932 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14933
14934 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14935 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14936 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14937}
14938
14939
14940/**
14941 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14942 *
14943 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14944 *
14945 * @returns Strict VBox status code.
14946 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14947 * @param cbInstr The instruction length in bytes.
14948 * @remarks In ring-0 not all of the state needs to be synced in.
14949 * @thread EMT(pVCpu)
14950 */
14951VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14952{
14953 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14954
14955 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14956 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14957 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14958}
14959
14960
14961#ifdef VBOX_WITH_NESTED_HWVIRT
14962/**
14963 * Checks if IEM is in the process of delivering an event (interrupt or
14964 * exception).
14965 *
14966 * @returns true if it's raising an interrupt or exception, false otherwise.
14967 * @param pVCpu The cross context virtual CPU structure.
14968 */
14969VMM_INT_DECL(bool) IEMIsRaisingIntOrXcpt(PVMCPU pVCpu)
14970{
14971 return pVCpu->iem.s.cXcptRecursions > 0;
14972}
14973
14974
14975/**
14976 * Interface for HM and EM to emulate the STGI instruction.
14977 *
14978 * @returns Strict VBox status code.
14979 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14980 * @param cbInstr The instruction length in bytes.
14981 * @thread EMT(pVCpu)
14982 */
14983VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
14984{
14985 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14986
14987 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14988 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
14989 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14990}
14991
14992
14993/**
14994 * Interface for HM and EM to emulate the STGI instruction.
14995 *
14996 * @returns Strict VBox status code.
14997 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14998 * @param cbInstr The instruction length in bytes.
14999 * @thread EMT(pVCpu)
15000 */
15001VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15002{
15003 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15004
15005 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15006 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15007 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15008}
15009
15010
15011/**
15012 * Interface for HM and EM to emulate the VMLOAD instruction.
15013 *
15014 * @returns Strict VBox status code.
15015 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15016 * @param cbInstr The instruction length in bytes.
15017 * @thread EMT(pVCpu)
15018 */
15019VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15020{
15021 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15022
15023 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15024 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15025 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15026}
15027
15028
15029/**
15030 * Interface for HM and EM to emulate the VMSAVE instruction.
15031 *
15032 * @returns Strict VBox status code.
15033 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15034 * @param cbInstr The instruction length in bytes.
15035 * @thread EMT(pVCpu)
15036 */
15037VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15038{
15039 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15040
15041 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15042 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15043 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15044}
15045
15046
15047/**
15048 * Interface for HM and EM to emulate the INVLPGA instruction.
15049 *
15050 * @returns Strict VBox status code.
15051 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15052 * @param cbInstr The instruction length in bytes.
15053 * @thread EMT(pVCpu)
15054 */
15055VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15056{
15057 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15058
15059 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15060 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15061 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15062}
15063#endif /* VBOX_WITH_NESTED_HWVIRT */
15064
15065#ifdef IN_RING3
15066
15067/**
15068 * Handles the unlikely and probably fatal merge cases.
15069 *
15070 * @returns Merged status code.
15071 * @param rcStrict Current EM status code.
15072 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15073 * with @a rcStrict.
15074 * @param iMemMap The memory mapping index. For error reporting only.
15075 * @param pVCpu The cross context virtual CPU structure of the calling
15076 * thread, for error reporting only.
15077 */
15078DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15079 unsigned iMemMap, PVMCPU pVCpu)
15080{
15081 if (RT_FAILURE_NP(rcStrict))
15082 return rcStrict;
15083
15084 if (RT_FAILURE_NP(rcStrictCommit))
15085 return rcStrictCommit;
15086
15087 if (rcStrict == rcStrictCommit)
15088 return rcStrictCommit;
15089
15090 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15091 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15092 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15093 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15094 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15095 return VERR_IOM_FF_STATUS_IPE;
15096}
15097
15098
15099/**
15100 * Helper for IOMR3ProcessForceFlag.
15101 *
15102 * @returns Merged status code.
15103 * @param rcStrict Current EM status code.
15104 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15105 * with @a rcStrict.
15106 * @param iMemMap The memory mapping index. For error reporting only.
15107 * @param pVCpu The cross context virtual CPU structure of the calling
15108 * thread, for error reporting only.
15109 */
15110DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15111{
15112 /* Simple. */
15113 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15114 return rcStrictCommit;
15115
15116 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15117 return rcStrict;
15118
15119 /* EM scheduling status codes. */
15120 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15121 && rcStrict <= VINF_EM_LAST))
15122 {
15123 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15124 && rcStrictCommit <= VINF_EM_LAST))
15125 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15126 }
15127
15128 /* Unlikely */
15129 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15130}
15131
15132
15133/**
15134 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15135 *
15136 * @returns Merge between @a rcStrict and what the commit operation returned.
15137 * @param pVM The cross context VM structure.
15138 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15139 * @param rcStrict The status code returned by ring-0 or raw-mode.
15140 */
15141VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15142{
15143 /*
15144 * Reset the pending commit.
15145 */
15146 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15147 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15148 ("%#x %#x %#x\n",
15149 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15150 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15151
15152 /*
15153 * Commit the pending bounce buffers (usually just one).
15154 */
15155 unsigned cBufs = 0;
15156 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15157 while (iMemMap-- > 0)
15158 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15159 {
15160 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15161 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15162 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15163
15164 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15165 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15166 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15167
15168 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15169 {
15170 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15171 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15172 pbBuf,
15173 cbFirst,
15174 PGMACCESSORIGIN_IEM);
15175 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15176 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15177 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15178 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15179 }
15180
15181 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15182 {
15183 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15184 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15185 pbBuf + cbFirst,
15186 cbSecond,
15187 PGMACCESSORIGIN_IEM);
15188 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15189 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15190 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15191 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15192 }
15193 cBufs++;
15194 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15195 }
15196
15197 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15198 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15199 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15200 pVCpu->iem.s.cActiveMappings = 0;
15201 return rcStrict;
15202}
15203
15204#endif /* IN_RING3 */
15205
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette