VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 74683

Last change on this file since 74683 was 74683, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 VM-exit bits; Add task switch intercept.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 629.9 KB
Line 
1/* $Id: IEMAll.cpp 74683 2018-10-08 15:13:52Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211/**
212 * CPU exception classes.
213 */
214typedef enum IEMXCPTCLASS
215{
216 IEMXCPTCLASS_BENIGN,
217 IEMXCPTCLASS_CONTRIBUTORY,
218 IEMXCPTCLASS_PAGE_FAULT,
219 IEMXCPTCLASS_DOUBLE_FAULT
220} IEMXCPTCLASS;
221
222
223/*********************************************************************************************************************************
224* Defined Constants And Macros *
225*********************************************************************************************************************************/
226/** @def IEM_WITH_SETJMP
227 * Enables alternative status code handling using setjmps.
228 *
229 * This adds a bit of expense via the setjmp() call since it saves all the
230 * non-volatile registers. However, it eliminates return code checks and allows
231 * for more optimal return value passing (return regs instead of stack buffer).
232 */
233#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
234# define IEM_WITH_SETJMP
235#endif
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in a 64-bit code segment.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Check if we're currently executing in real mode.
335 *
336 * @returns @c true if it is, @c false if not.
337 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
338 */
339#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
340
341/**
342 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
343 * @returns PCCPUMFEATURES
344 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
345 */
346#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
347
348/**
349 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
350 * @returns PCCPUMFEATURES
351 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
352 */
353#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
354
355/**
356 * Evaluates to true if we're presenting an Intel CPU to the guest.
357 */
358#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
359
360/**
361 * Evaluates to true if we're presenting an AMD CPU to the guest.
362 */
363#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
364
365/**
366 * Check if the address is canonical.
367 */
368#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
369
370/**
371 * Gets the effective VEX.VVVV value.
372 *
373 * The 4th bit is ignored if not 64-bit code.
374 * @returns effective V-register value.
375 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
376 */
377#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
378 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
379
380/** @def IEM_USE_UNALIGNED_DATA_ACCESS
381 * Use unaligned accesses instead of elaborate byte assembly. */
382#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
383# define IEM_USE_UNALIGNED_DATA_ACCESS
384#endif
385
386#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
387
388/**
389 * Check if the guest has entered VMX root operation.
390 */
391# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
392
393/**
394 * Check if the guest has entered VMX non-root operation.
395 */
396# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
397
398/**
399 * Check if the nested-guest has the given Pin-based VM-execution control set.
400 */
401# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
402 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
403
404/**
405 * Check if the nested-guest has the given Processor-based VM-execution control set.
406 */
407#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
408 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
409
410/**
411 * Check if the nested-guest has the given Secondary Processor-based VM-execution
412 * control set.
413 */
414#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
415 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
416
417/**
418 * Invokes the VMX VM-exit handler for an instruction intercept.
419 */
420# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
421 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
422
423/**
424 * Invokes the VMX VM-exit handler for an instruction intercept where the
425 * instruction provides additional VM-exit information.
426 */
427# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
428 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
429
430/**
431 * Invokes the VMX VM-exit handler for a task switch.
432 */
433# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss) \
434 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss)); } while (0)
435
436#else
437# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
438# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
439# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
440# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
441# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
442# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss) do { return VERR_VMX_IPE_1; } while (0)
443# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
444# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
445
446#endif
447
448#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
449/**
450 * Check if an SVM control/instruction intercept is set.
451 */
452# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
453 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
454
455/**
456 * Check if an SVM read CRx intercept is set.
457 */
458# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
459 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
460
461/**
462 * Check if an SVM write CRx intercept is set.
463 */
464# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
465 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
466
467/**
468 * Check if an SVM read DRx intercept is set.
469 */
470# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
471 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
472
473/**
474 * Check if an SVM write DRx intercept is set.
475 */
476# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
477 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
478
479/**
480 * Check if an SVM exception intercept is set.
481 */
482# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
483 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
484
485/**
486 * Invokes the SVM \#VMEXIT handler for the nested-guest.
487 */
488# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
489 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
490
491/**
492 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
493 * corresponding decode assist information.
494 */
495# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
496 do \
497 { \
498 uint64_t uExitInfo1; \
499 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
500 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
501 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
502 else \
503 uExitInfo1 = 0; \
504 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
505 } while (0)
506
507/** Check and handles SVM nested-guest instruction intercept and updates
508 * NRIP if needed.
509 */
510# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
511 do \
512 { \
513 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
514 { \
515 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
516 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
517 } \
518 } while (0)
519
520/** Checks and handles SVM nested-guest CR0 read intercept. */
521# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
522 do \
523 { \
524 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
525 { /* probably likely */ } \
526 else \
527 { \
528 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
529 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
530 } \
531 } while (0)
532
533/**
534 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
535 */
536# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
537 do { \
538 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
539 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
540 } while (0)
541
542#else
543# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
544# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
545# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
546# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
547# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
548# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
549# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
550# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
551# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
552# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
553# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
554
555#endif
556
557
558/*********************************************************************************************************************************
559* Global Variables *
560*********************************************************************************************************************************/
561extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
562
563
564/** Function table for the ADD instruction. */
565IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
566{
567 iemAImpl_add_u8, iemAImpl_add_u8_locked,
568 iemAImpl_add_u16, iemAImpl_add_u16_locked,
569 iemAImpl_add_u32, iemAImpl_add_u32_locked,
570 iemAImpl_add_u64, iemAImpl_add_u64_locked
571};
572
573/** Function table for the ADC instruction. */
574IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
575{
576 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
577 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
578 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
579 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
580};
581
582/** Function table for the SUB instruction. */
583IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
584{
585 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
586 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
587 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
588 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
589};
590
591/** Function table for the SBB instruction. */
592IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
593{
594 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
595 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
596 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
597 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
598};
599
600/** Function table for the OR instruction. */
601IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
602{
603 iemAImpl_or_u8, iemAImpl_or_u8_locked,
604 iemAImpl_or_u16, iemAImpl_or_u16_locked,
605 iemAImpl_or_u32, iemAImpl_or_u32_locked,
606 iemAImpl_or_u64, iemAImpl_or_u64_locked
607};
608
609/** Function table for the XOR instruction. */
610IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
611{
612 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
613 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
614 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
615 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
616};
617
618/** Function table for the AND instruction. */
619IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
620{
621 iemAImpl_and_u8, iemAImpl_and_u8_locked,
622 iemAImpl_and_u16, iemAImpl_and_u16_locked,
623 iemAImpl_and_u32, iemAImpl_and_u32_locked,
624 iemAImpl_and_u64, iemAImpl_and_u64_locked
625};
626
627/** Function table for the CMP instruction.
628 * @remarks Making operand order ASSUMPTIONS.
629 */
630IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
631{
632 iemAImpl_cmp_u8, NULL,
633 iemAImpl_cmp_u16, NULL,
634 iemAImpl_cmp_u32, NULL,
635 iemAImpl_cmp_u64, NULL
636};
637
638/** Function table for the TEST instruction.
639 * @remarks Making operand order ASSUMPTIONS.
640 */
641IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
642{
643 iemAImpl_test_u8, NULL,
644 iemAImpl_test_u16, NULL,
645 iemAImpl_test_u32, NULL,
646 iemAImpl_test_u64, NULL
647};
648
649/** Function table for the BT instruction. */
650IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
651{
652 NULL, NULL,
653 iemAImpl_bt_u16, NULL,
654 iemAImpl_bt_u32, NULL,
655 iemAImpl_bt_u64, NULL
656};
657
658/** Function table for the BTC instruction. */
659IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
660{
661 NULL, NULL,
662 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
663 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
664 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
665};
666
667/** Function table for the BTR instruction. */
668IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
669{
670 NULL, NULL,
671 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
672 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
673 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
674};
675
676/** Function table for the BTS instruction. */
677IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
678{
679 NULL, NULL,
680 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
681 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
682 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
683};
684
685/** Function table for the BSF instruction. */
686IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
687{
688 NULL, NULL,
689 iemAImpl_bsf_u16, NULL,
690 iemAImpl_bsf_u32, NULL,
691 iemAImpl_bsf_u64, NULL
692};
693
694/** Function table for the BSR instruction. */
695IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
696{
697 NULL, NULL,
698 iemAImpl_bsr_u16, NULL,
699 iemAImpl_bsr_u32, NULL,
700 iemAImpl_bsr_u64, NULL
701};
702
703/** Function table for the IMUL instruction. */
704IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
705{
706 NULL, NULL,
707 iemAImpl_imul_two_u16, NULL,
708 iemAImpl_imul_two_u32, NULL,
709 iemAImpl_imul_two_u64, NULL
710};
711
712/** Group 1 /r lookup table. */
713IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
714{
715 &g_iemAImpl_add,
716 &g_iemAImpl_or,
717 &g_iemAImpl_adc,
718 &g_iemAImpl_sbb,
719 &g_iemAImpl_and,
720 &g_iemAImpl_sub,
721 &g_iemAImpl_xor,
722 &g_iemAImpl_cmp
723};
724
725/** Function table for the INC instruction. */
726IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
727{
728 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
729 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
730 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
731 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
732};
733
734/** Function table for the DEC instruction. */
735IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
736{
737 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
738 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
739 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
740 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
741};
742
743/** Function table for the NEG instruction. */
744IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
745{
746 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
747 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
748 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
749 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
750};
751
752/** Function table for the NOT instruction. */
753IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
754{
755 iemAImpl_not_u8, iemAImpl_not_u8_locked,
756 iemAImpl_not_u16, iemAImpl_not_u16_locked,
757 iemAImpl_not_u32, iemAImpl_not_u32_locked,
758 iemAImpl_not_u64, iemAImpl_not_u64_locked
759};
760
761
762/** Function table for the ROL instruction. */
763IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
764{
765 iemAImpl_rol_u8,
766 iemAImpl_rol_u16,
767 iemAImpl_rol_u32,
768 iemAImpl_rol_u64
769};
770
771/** Function table for the ROR instruction. */
772IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
773{
774 iemAImpl_ror_u8,
775 iemAImpl_ror_u16,
776 iemAImpl_ror_u32,
777 iemAImpl_ror_u64
778};
779
780/** Function table for the RCL instruction. */
781IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
782{
783 iemAImpl_rcl_u8,
784 iemAImpl_rcl_u16,
785 iemAImpl_rcl_u32,
786 iemAImpl_rcl_u64
787};
788
789/** Function table for the RCR instruction. */
790IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
791{
792 iemAImpl_rcr_u8,
793 iemAImpl_rcr_u16,
794 iemAImpl_rcr_u32,
795 iemAImpl_rcr_u64
796};
797
798/** Function table for the SHL instruction. */
799IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
800{
801 iemAImpl_shl_u8,
802 iemAImpl_shl_u16,
803 iemAImpl_shl_u32,
804 iemAImpl_shl_u64
805};
806
807/** Function table for the SHR instruction. */
808IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
809{
810 iemAImpl_shr_u8,
811 iemAImpl_shr_u16,
812 iemAImpl_shr_u32,
813 iemAImpl_shr_u64
814};
815
816/** Function table for the SAR instruction. */
817IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
818{
819 iemAImpl_sar_u8,
820 iemAImpl_sar_u16,
821 iemAImpl_sar_u32,
822 iemAImpl_sar_u64
823};
824
825
826/** Function table for the MUL instruction. */
827IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
828{
829 iemAImpl_mul_u8,
830 iemAImpl_mul_u16,
831 iemAImpl_mul_u32,
832 iemAImpl_mul_u64
833};
834
835/** Function table for the IMUL instruction working implicitly on rAX. */
836IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
837{
838 iemAImpl_imul_u8,
839 iemAImpl_imul_u16,
840 iemAImpl_imul_u32,
841 iemAImpl_imul_u64
842};
843
844/** Function table for the DIV instruction. */
845IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
846{
847 iemAImpl_div_u8,
848 iemAImpl_div_u16,
849 iemAImpl_div_u32,
850 iemAImpl_div_u64
851};
852
853/** Function table for the MUL instruction. */
854IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
855{
856 iemAImpl_idiv_u8,
857 iemAImpl_idiv_u16,
858 iemAImpl_idiv_u32,
859 iemAImpl_idiv_u64
860};
861
862/** Function table for the SHLD instruction */
863IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
864{
865 iemAImpl_shld_u16,
866 iemAImpl_shld_u32,
867 iemAImpl_shld_u64,
868};
869
870/** Function table for the SHRD instruction */
871IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
872{
873 iemAImpl_shrd_u16,
874 iemAImpl_shrd_u32,
875 iemAImpl_shrd_u64,
876};
877
878
879/** Function table for the PUNPCKLBW instruction */
880IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
881/** Function table for the PUNPCKLBD instruction */
882IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
883/** Function table for the PUNPCKLDQ instruction */
884IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
885/** Function table for the PUNPCKLQDQ instruction */
886IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
887
888/** Function table for the PUNPCKHBW instruction */
889IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
890/** Function table for the PUNPCKHBD instruction */
891IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
892/** Function table for the PUNPCKHDQ instruction */
893IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
894/** Function table for the PUNPCKHQDQ instruction */
895IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
896
897/** Function table for the PXOR instruction */
898IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
899/** Function table for the PCMPEQB instruction */
900IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
901/** Function table for the PCMPEQW instruction */
902IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
903/** Function table for the PCMPEQD instruction */
904IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
905
906
907#if defined(IEM_LOG_MEMORY_WRITES)
908/** What IEM just wrote. */
909uint8_t g_abIemWrote[256];
910/** How much IEM just wrote. */
911size_t g_cbIemWrote;
912#endif
913
914
915/*********************************************************************************************************************************
916* Internal Functions *
917*********************************************************************************************************************************/
918IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
919IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
920IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
921IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
922/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
923IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
924IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
925IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
926IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
927IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
928IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
929IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
930IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
931IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
932IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
933IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
934IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
935#ifdef IEM_WITH_SETJMP
936DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
937DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
938DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
939DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
940DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
941#endif
942
943IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
944IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
945IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
946IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
947IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
948IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
949IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
950IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
951IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
952IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
953IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
954IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
955IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
956IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
957IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
958IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
959IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
960
961#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
962IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss);
963#endif
964
965#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
966IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
967IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
968#endif
969
970
971/**
972 * Sets the pass up status.
973 *
974 * @returns VINF_SUCCESS.
975 * @param pVCpu The cross context virtual CPU structure of the
976 * calling thread.
977 * @param rcPassUp The pass up status. Must be informational.
978 * VINF_SUCCESS is not allowed.
979 */
980IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
981{
982 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
983
984 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
985 if (rcOldPassUp == VINF_SUCCESS)
986 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
987 /* If both are EM scheduling codes, use EM priority rules. */
988 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
989 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
990 {
991 if (rcPassUp < rcOldPassUp)
992 {
993 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
994 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
995 }
996 else
997 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
998 }
999 /* Override EM scheduling with specific status code. */
1000 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1001 {
1002 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1003 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1004 }
1005 /* Don't override specific status code, first come first served. */
1006 else
1007 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1008 return VINF_SUCCESS;
1009}
1010
1011
1012/**
1013 * Calculates the CPU mode.
1014 *
1015 * This is mainly for updating IEMCPU::enmCpuMode.
1016 *
1017 * @returns CPU mode.
1018 * @param pVCpu The cross context virtual CPU structure of the
1019 * calling thread.
1020 */
1021DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1022{
1023 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1024 return IEMMODE_64BIT;
1025 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1026 return IEMMODE_32BIT;
1027 return IEMMODE_16BIT;
1028}
1029
1030
1031/**
1032 * Initializes the execution state.
1033 *
1034 * @param pVCpu The cross context virtual CPU structure of the
1035 * calling thread.
1036 * @param fBypassHandlers Whether to bypass access handlers.
1037 *
1038 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1039 * side-effects in strict builds.
1040 */
1041DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1042{
1043 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1044 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1045
1046#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1047 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1048 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1049 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1050 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1051 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1052 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1053 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1054 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1055#endif
1056
1057#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1058 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1059#endif
1060 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1061 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1062#ifdef VBOX_STRICT
1063 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1064 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1065 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1066 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1067 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1068 pVCpu->iem.s.uRexReg = 127;
1069 pVCpu->iem.s.uRexB = 127;
1070 pVCpu->iem.s.offModRm = 127;
1071 pVCpu->iem.s.uRexIndex = 127;
1072 pVCpu->iem.s.iEffSeg = 127;
1073 pVCpu->iem.s.idxPrefix = 127;
1074 pVCpu->iem.s.uVex3rdReg = 127;
1075 pVCpu->iem.s.uVexLength = 127;
1076 pVCpu->iem.s.fEvexStuff = 127;
1077 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1078# ifdef IEM_WITH_CODE_TLB
1079 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1080 pVCpu->iem.s.pbInstrBuf = NULL;
1081 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1082 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1083 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1084 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1085# else
1086 pVCpu->iem.s.offOpcode = 127;
1087 pVCpu->iem.s.cbOpcode = 127;
1088# endif
1089#endif
1090
1091 pVCpu->iem.s.cActiveMappings = 0;
1092 pVCpu->iem.s.iNextMapping = 0;
1093 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1094 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1095#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1096 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1097 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1098 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1099 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1100 if (!pVCpu->iem.s.fInPatchCode)
1101 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1102#endif
1103}
1104
1105#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1106/**
1107 * Performs a minimal reinitialization of the execution state.
1108 *
1109 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1110 * 'world-switch' types operations on the CPU. Currently only nested
1111 * hardware-virtualization uses it.
1112 *
1113 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1114 */
1115IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1116{
1117 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1118 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1119
1120 pVCpu->iem.s.uCpl = uCpl;
1121 pVCpu->iem.s.enmCpuMode = enmMode;
1122 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1123 pVCpu->iem.s.enmEffAddrMode = enmMode;
1124 if (enmMode != IEMMODE_64BIT)
1125 {
1126 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1127 pVCpu->iem.s.enmEffOpSize = enmMode;
1128 }
1129 else
1130 {
1131 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1132 pVCpu->iem.s.enmEffOpSize = enmMode;
1133 }
1134 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1135#ifndef IEM_WITH_CODE_TLB
1136 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1137 pVCpu->iem.s.offOpcode = 0;
1138 pVCpu->iem.s.cbOpcode = 0;
1139#endif
1140 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1141}
1142#endif
1143
1144/**
1145 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1146 *
1147 * @param pVCpu The cross context virtual CPU structure of the
1148 * calling thread.
1149 */
1150DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1151{
1152 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1153#ifdef VBOX_STRICT
1154# ifdef IEM_WITH_CODE_TLB
1155 NOREF(pVCpu);
1156# else
1157 pVCpu->iem.s.cbOpcode = 0;
1158# endif
1159#else
1160 NOREF(pVCpu);
1161#endif
1162}
1163
1164
1165/**
1166 * Initializes the decoder state.
1167 *
1168 * iemReInitDecoder is mostly a copy of this function.
1169 *
1170 * @param pVCpu The cross context virtual CPU structure of the
1171 * calling thread.
1172 * @param fBypassHandlers Whether to bypass access handlers.
1173 */
1174DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1175{
1176 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1177 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1178
1179#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1180 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1181 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1182 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1183 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1184 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1185 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1186 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1187 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1188#endif
1189
1190#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1191 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1192#endif
1193 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1194 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1195 pVCpu->iem.s.enmCpuMode = enmMode;
1196 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1197 pVCpu->iem.s.enmEffAddrMode = enmMode;
1198 if (enmMode != IEMMODE_64BIT)
1199 {
1200 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1201 pVCpu->iem.s.enmEffOpSize = enmMode;
1202 }
1203 else
1204 {
1205 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1206 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1207 }
1208 pVCpu->iem.s.fPrefixes = 0;
1209 pVCpu->iem.s.uRexReg = 0;
1210 pVCpu->iem.s.uRexB = 0;
1211 pVCpu->iem.s.uRexIndex = 0;
1212 pVCpu->iem.s.idxPrefix = 0;
1213 pVCpu->iem.s.uVex3rdReg = 0;
1214 pVCpu->iem.s.uVexLength = 0;
1215 pVCpu->iem.s.fEvexStuff = 0;
1216 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1217#ifdef IEM_WITH_CODE_TLB
1218 pVCpu->iem.s.pbInstrBuf = NULL;
1219 pVCpu->iem.s.offInstrNextByte = 0;
1220 pVCpu->iem.s.offCurInstrStart = 0;
1221# ifdef VBOX_STRICT
1222 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1223 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1224 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1225# endif
1226#else
1227 pVCpu->iem.s.offOpcode = 0;
1228 pVCpu->iem.s.cbOpcode = 0;
1229#endif
1230 pVCpu->iem.s.offModRm = 0;
1231 pVCpu->iem.s.cActiveMappings = 0;
1232 pVCpu->iem.s.iNextMapping = 0;
1233 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1234 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1235#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1236 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1237 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1238 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1239 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1240 if (!pVCpu->iem.s.fInPatchCode)
1241 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1242#endif
1243
1244#ifdef DBGFTRACE_ENABLED
1245 switch (enmMode)
1246 {
1247 case IEMMODE_64BIT:
1248 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1249 break;
1250 case IEMMODE_32BIT:
1251 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1252 break;
1253 case IEMMODE_16BIT:
1254 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1255 break;
1256 }
1257#endif
1258}
1259
1260
1261/**
1262 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1263 *
1264 * This is mostly a copy of iemInitDecoder.
1265 *
1266 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1267 */
1268DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1269{
1270 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1271
1272#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1273 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1274 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1275 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1276 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1278 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1280 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1281#endif
1282
1283 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1284 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1285 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1286 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1287 pVCpu->iem.s.enmEffAddrMode = enmMode;
1288 if (enmMode != IEMMODE_64BIT)
1289 {
1290 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1291 pVCpu->iem.s.enmEffOpSize = enmMode;
1292 }
1293 else
1294 {
1295 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1296 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1297 }
1298 pVCpu->iem.s.fPrefixes = 0;
1299 pVCpu->iem.s.uRexReg = 0;
1300 pVCpu->iem.s.uRexB = 0;
1301 pVCpu->iem.s.uRexIndex = 0;
1302 pVCpu->iem.s.idxPrefix = 0;
1303 pVCpu->iem.s.uVex3rdReg = 0;
1304 pVCpu->iem.s.uVexLength = 0;
1305 pVCpu->iem.s.fEvexStuff = 0;
1306 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1307#ifdef IEM_WITH_CODE_TLB
1308 if (pVCpu->iem.s.pbInstrBuf)
1309 {
1310 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1311 - pVCpu->iem.s.uInstrBufPc;
1312 if (off < pVCpu->iem.s.cbInstrBufTotal)
1313 {
1314 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1315 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1316 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1317 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1318 else
1319 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1320 }
1321 else
1322 {
1323 pVCpu->iem.s.pbInstrBuf = NULL;
1324 pVCpu->iem.s.offInstrNextByte = 0;
1325 pVCpu->iem.s.offCurInstrStart = 0;
1326 pVCpu->iem.s.cbInstrBuf = 0;
1327 pVCpu->iem.s.cbInstrBufTotal = 0;
1328 }
1329 }
1330 else
1331 {
1332 pVCpu->iem.s.offInstrNextByte = 0;
1333 pVCpu->iem.s.offCurInstrStart = 0;
1334 pVCpu->iem.s.cbInstrBuf = 0;
1335 pVCpu->iem.s.cbInstrBufTotal = 0;
1336 }
1337#else
1338 pVCpu->iem.s.cbOpcode = 0;
1339 pVCpu->iem.s.offOpcode = 0;
1340#endif
1341 pVCpu->iem.s.offModRm = 0;
1342 Assert(pVCpu->iem.s.cActiveMappings == 0);
1343 pVCpu->iem.s.iNextMapping = 0;
1344 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1345 Assert(pVCpu->iem.s.fBypassHandlers == false);
1346#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1347 if (!pVCpu->iem.s.fInPatchCode)
1348 { /* likely */ }
1349 else
1350 {
1351 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1352 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1353 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1354 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1355 if (!pVCpu->iem.s.fInPatchCode)
1356 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1357 }
1358#endif
1359
1360#ifdef DBGFTRACE_ENABLED
1361 switch (enmMode)
1362 {
1363 case IEMMODE_64BIT:
1364 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1365 break;
1366 case IEMMODE_32BIT:
1367 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1368 break;
1369 case IEMMODE_16BIT:
1370 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1371 break;
1372 }
1373#endif
1374}
1375
1376
1377
1378/**
1379 * Prefetch opcodes the first time when starting executing.
1380 *
1381 * @returns Strict VBox status code.
1382 * @param pVCpu The cross context virtual CPU structure of the
1383 * calling thread.
1384 * @param fBypassHandlers Whether to bypass access handlers.
1385 */
1386IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1387{
1388 iemInitDecoder(pVCpu, fBypassHandlers);
1389
1390#ifdef IEM_WITH_CODE_TLB
1391 /** @todo Do ITLB lookup here. */
1392
1393#else /* !IEM_WITH_CODE_TLB */
1394
1395 /*
1396 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1397 *
1398 * First translate CS:rIP to a physical address.
1399 */
1400 uint32_t cbToTryRead;
1401 RTGCPTR GCPtrPC;
1402 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1403 {
1404 cbToTryRead = PAGE_SIZE;
1405 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1406 if (IEM_IS_CANONICAL(GCPtrPC))
1407 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1408 else
1409 return iemRaiseGeneralProtectionFault0(pVCpu);
1410 }
1411 else
1412 {
1413 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1414 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1415 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1416 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1417 else
1418 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1419 if (cbToTryRead) { /* likely */ }
1420 else /* overflowed */
1421 {
1422 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1423 cbToTryRead = UINT32_MAX;
1424 }
1425 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1426 Assert(GCPtrPC <= UINT32_MAX);
1427 }
1428
1429# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1430 /* Allow interpretation of patch manager code blocks since they can for
1431 instance throw #PFs for perfectly good reasons. */
1432 if (pVCpu->iem.s.fInPatchCode)
1433 {
1434 size_t cbRead = 0;
1435 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1436 AssertRCReturn(rc, rc);
1437 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1438 return VINF_SUCCESS;
1439 }
1440# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1441
1442 RTGCPHYS GCPhys;
1443 uint64_t fFlags;
1444 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1445 if (RT_SUCCESS(rc)) { /* probable */ }
1446 else
1447 {
1448 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1449 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1450 }
1451 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1452 else
1453 {
1454 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1455 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1456 }
1457 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1458 else
1459 {
1460 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1461 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1462 }
1463 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1464 /** @todo Check reserved bits and such stuff. PGM is better at doing
1465 * that, so do it when implementing the guest virtual address
1466 * TLB... */
1467
1468 /*
1469 * Read the bytes at this address.
1470 */
1471 PVM pVM = pVCpu->CTX_SUFF(pVM);
1472# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1473 size_t cbActual;
1474 if ( PATMIsEnabled(pVM)
1475 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1476 {
1477 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1478 Assert(cbActual > 0);
1479 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1480 }
1481 else
1482# endif
1483 {
1484 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1485 if (cbToTryRead > cbLeftOnPage)
1486 cbToTryRead = cbLeftOnPage;
1487 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1488 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1489
1490 if (!pVCpu->iem.s.fBypassHandlers)
1491 {
1492 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1493 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1494 { /* likely */ }
1495 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1496 {
1497 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1498 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1499 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1500 }
1501 else
1502 {
1503 Log((RT_SUCCESS(rcStrict)
1504 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1505 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1506 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1507 return rcStrict;
1508 }
1509 }
1510 else
1511 {
1512 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1513 if (RT_SUCCESS(rc))
1514 { /* likely */ }
1515 else
1516 {
1517 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1518 GCPtrPC, GCPhys, rc, cbToTryRead));
1519 return rc;
1520 }
1521 }
1522 pVCpu->iem.s.cbOpcode = cbToTryRead;
1523 }
1524#endif /* !IEM_WITH_CODE_TLB */
1525 return VINF_SUCCESS;
1526}
1527
1528
1529/**
1530 * Invalidates the IEM TLBs.
1531 *
1532 * This is called internally as well as by PGM when moving GC mappings.
1533 *
1534 * @returns
1535 * @param pVCpu The cross context virtual CPU structure of the calling
1536 * thread.
1537 * @param fVmm Set when PGM calls us with a remapping.
1538 */
1539VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1540{
1541#ifdef IEM_WITH_CODE_TLB
1542 pVCpu->iem.s.cbInstrBufTotal = 0;
1543 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1544 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1545 { /* very likely */ }
1546 else
1547 {
1548 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1549 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1550 while (i-- > 0)
1551 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1552 }
1553#endif
1554
1555#ifdef IEM_WITH_DATA_TLB
1556 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1557 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1558 { /* very likely */ }
1559 else
1560 {
1561 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1562 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1563 while (i-- > 0)
1564 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1565 }
1566#endif
1567 NOREF(pVCpu); NOREF(fVmm);
1568}
1569
1570
1571/**
1572 * Invalidates a page in the TLBs.
1573 *
1574 * @param pVCpu The cross context virtual CPU structure of the calling
1575 * thread.
1576 * @param GCPtr The address of the page to invalidate
1577 */
1578VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1579{
1580#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1581 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1582 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1583 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1584 uintptr_t idx = (uint8_t)GCPtr;
1585
1586# ifdef IEM_WITH_CODE_TLB
1587 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1588 {
1589 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1590 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1591 pVCpu->iem.s.cbInstrBufTotal = 0;
1592 }
1593# endif
1594
1595# ifdef IEM_WITH_DATA_TLB
1596 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1597 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1598# endif
1599#else
1600 NOREF(pVCpu); NOREF(GCPtr);
1601#endif
1602}
1603
1604
1605/**
1606 * Invalidates the host physical aspects of the IEM TLBs.
1607 *
1608 * This is called internally as well as by PGM when moving GC mappings.
1609 *
1610 * @param pVCpu The cross context virtual CPU structure of the calling
1611 * thread.
1612 */
1613VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1614{
1615#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1616 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1617
1618# ifdef IEM_WITH_CODE_TLB
1619 pVCpu->iem.s.cbInstrBufTotal = 0;
1620# endif
1621 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1622 if (uTlbPhysRev != 0)
1623 {
1624 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1625 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1626 }
1627 else
1628 {
1629 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1630 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1631
1632 unsigned i;
1633# ifdef IEM_WITH_CODE_TLB
1634 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1635 while (i-- > 0)
1636 {
1637 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1638 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1639 }
1640# endif
1641# ifdef IEM_WITH_DATA_TLB
1642 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1643 while (i-- > 0)
1644 {
1645 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1646 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1647 }
1648# endif
1649 }
1650#else
1651 NOREF(pVCpu);
1652#endif
1653}
1654
1655
1656/**
1657 * Invalidates the host physical aspects of the IEM TLBs.
1658 *
1659 * This is called internally as well as by PGM when moving GC mappings.
1660 *
1661 * @param pVM The cross context VM structure.
1662 *
1663 * @remarks Caller holds the PGM lock.
1664 */
1665VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1666{
1667 RT_NOREF_PV(pVM);
1668}
1669
1670#ifdef IEM_WITH_CODE_TLB
1671
1672/**
1673 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1674 * failure and jumps.
1675 *
1676 * We end up here for a number of reasons:
1677 * - pbInstrBuf isn't yet initialized.
1678 * - Advancing beyond the buffer boundrary (e.g. cross page).
1679 * - Advancing beyond the CS segment limit.
1680 * - Fetching from non-mappable page (e.g. MMIO).
1681 *
1682 * @param pVCpu The cross context virtual CPU structure of the
1683 * calling thread.
1684 * @param pvDst Where to return the bytes.
1685 * @param cbDst Number of bytes to read.
1686 *
1687 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1688 */
1689IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1690{
1691#ifdef IN_RING3
1692 for (;;)
1693 {
1694 Assert(cbDst <= 8);
1695 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1696
1697 /*
1698 * We might have a partial buffer match, deal with that first to make the
1699 * rest simpler. This is the first part of the cross page/buffer case.
1700 */
1701 if (pVCpu->iem.s.pbInstrBuf != NULL)
1702 {
1703 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1704 {
1705 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1706 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1707 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1708
1709 cbDst -= cbCopy;
1710 pvDst = (uint8_t *)pvDst + cbCopy;
1711 offBuf += cbCopy;
1712 pVCpu->iem.s.offInstrNextByte += offBuf;
1713 }
1714 }
1715
1716 /*
1717 * Check segment limit, figuring how much we're allowed to access at this point.
1718 *
1719 * We will fault immediately if RIP is past the segment limit / in non-canonical
1720 * territory. If we do continue, there are one or more bytes to read before we
1721 * end up in trouble and we need to do that first before faulting.
1722 */
1723 RTGCPTR GCPtrFirst;
1724 uint32_t cbMaxRead;
1725 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1726 {
1727 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1728 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1729 { /* likely */ }
1730 else
1731 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1732 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1733 }
1734 else
1735 {
1736 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1737 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1738 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1739 { /* likely */ }
1740 else
1741 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1742 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1743 if (cbMaxRead != 0)
1744 { /* likely */ }
1745 else
1746 {
1747 /* Overflowed because address is 0 and limit is max. */
1748 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1749 cbMaxRead = X86_PAGE_SIZE;
1750 }
1751 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1752 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1753 if (cbMaxRead2 < cbMaxRead)
1754 cbMaxRead = cbMaxRead2;
1755 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1756 }
1757
1758 /*
1759 * Get the TLB entry for this piece of code.
1760 */
1761 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1762 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1763 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1764 if (pTlbe->uTag == uTag)
1765 {
1766 /* likely when executing lots of code, otherwise unlikely */
1767# ifdef VBOX_WITH_STATISTICS
1768 pVCpu->iem.s.CodeTlb.cTlbHits++;
1769# endif
1770 }
1771 else
1772 {
1773 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1774# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1775 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1776 {
1777 pTlbe->uTag = uTag;
1778 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1779 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1780 pTlbe->GCPhys = NIL_RTGCPHYS;
1781 pTlbe->pbMappingR3 = NULL;
1782 }
1783 else
1784# endif
1785 {
1786 RTGCPHYS GCPhys;
1787 uint64_t fFlags;
1788 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1789 if (RT_FAILURE(rc))
1790 {
1791 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1792 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1793 }
1794
1795 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1796 pTlbe->uTag = uTag;
1797 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1798 pTlbe->GCPhys = GCPhys;
1799 pTlbe->pbMappingR3 = NULL;
1800 }
1801 }
1802
1803 /*
1804 * Check TLB page table level access flags.
1805 */
1806 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1807 {
1808 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1809 {
1810 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1811 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1812 }
1813 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1814 {
1815 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1816 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1817 }
1818 }
1819
1820# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1821 /*
1822 * Allow interpretation of patch manager code blocks since they can for
1823 * instance throw #PFs for perfectly good reasons.
1824 */
1825 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1826 { /* no unlikely */ }
1827 else
1828 {
1829 /** @todo Could be optimized this a little in ring-3 if we liked. */
1830 size_t cbRead = 0;
1831 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1832 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1833 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1834 return;
1835 }
1836# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1837
1838 /*
1839 * Look up the physical page info if necessary.
1840 */
1841 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1842 { /* not necessary */ }
1843 else
1844 {
1845 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1846 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1847 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1848 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1849 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1850 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1851 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1852 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1853 }
1854
1855# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1856 /*
1857 * Try do a direct read using the pbMappingR3 pointer.
1858 */
1859 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1860 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1861 {
1862 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1863 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1864 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1865 {
1866 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1867 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1868 }
1869 else
1870 {
1871 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1872 Assert(cbInstr < cbMaxRead);
1873 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1874 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1875 }
1876 if (cbDst <= cbMaxRead)
1877 {
1878 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1879 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1880 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1881 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1882 return;
1883 }
1884 pVCpu->iem.s.pbInstrBuf = NULL;
1885
1886 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1887 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1888 }
1889 else
1890# endif
1891#if 0
1892 /*
1893 * If there is no special read handling, so we can read a bit more and
1894 * put it in the prefetch buffer.
1895 */
1896 if ( cbDst < cbMaxRead
1897 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1898 {
1899 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1900 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1901 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1902 { /* likely */ }
1903 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1904 {
1905 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1906 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1907 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1908 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1909 }
1910 else
1911 {
1912 Log((RT_SUCCESS(rcStrict)
1913 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1914 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1915 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1916 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1917 }
1918 }
1919 /*
1920 * Special read handling, so only read exactly what's needed.
1921 * This is a highly unlikely scenario.
1922 */
1923 else
1924#endif
1925 {
1926 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1927 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1928 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1929 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1930 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1931 { /* likely */ }
1932 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1933 {
1934 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1935 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1936 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1937 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1938 }
1939 else
1940 {
1941 Log((RT_SUCCESS(rcStrict)
1942 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1943 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1944 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1945 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1946 }
1947 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1948 if (cbToRead == cbDst)
1949 return;
1950 }
1951
1952 /*
1953 * More to read, loop.
1954 */
1955 cbDst -= cbMaxRead;
1956 pvDst = (uint8_t *)pvDst + cbMaxRead;
1957 }
1958#else
1959 RT_NOREF(pvDst, cbDst);
1960 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1961#endif
1962}
1963
1964#else
1965
1966/**
1967 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1968 * exception if it fails.
1969 *
1970 * @returns Strict VBox status code.
1971 * @param pVCpu The cross context virtual CPU structure of the
1972 * calling thread.
1973 * @param cbMin The minimum number of bytes relative offOpcode
1974 * that must be read.
1975 */
1976IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1977{
1978 /*
1979 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1980 *
1981 * First translate CS:rIP to a physical address.
1982 */
1983 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1984 uint32_t cbToTryRead;
1985 RTGCPTR GCPtrNext;
1986 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1987 {
1988 cbToTryRead = PAGE_SIZE;
1989 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1990 if (!IEM_IS_CANONICAL(GCPtrNext))
1991 return iemRaiseGeneralProtectionFault0(pVCpu);
1992 }
1993 else
1994 {
1995 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1996 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1997 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1998 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1999 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2000 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2001 if (!cbToTryRead) /* overflowed */
2002 {
2003 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2004 cbToTryRead = UINT32_MAX;
2005 /** @todo check out wrapping around the code segment. */
2006 }
2007 if (cbToTryRead < cbMin - cbLeft)
2008 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2009 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2010 }
2011
2012 /* Only read up to the end of the page, and make sure we don't read more
2013 than the opcode buffer can hold. */
2014 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2015 if (cbToTryRead > cbLeftOnPage)
2016 cbToTryRead = cbLeftOnPage;
2017 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2018 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2019/** @todo r=bird: Convert assertion into undefined opcode exception? */
2020 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2021
2022# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2023 /* Allow interpretation of patch manager code blocks since they can for
2024 instance throw #PFs for perfectly good reasons. */
2025 if (pVCpu->iem.s.fInPatchCode)
2026 {
2027 size_t cbRead = 0;
2028 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2029 AssertRCReturn(rc, rc);
2030 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2031 return VINF_SUCCESS;
2032 }
2033# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2034
2035 RTGCPHYS GCPhys;
2036 uint64_t fFlags;
2037 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2038 if (RT_FAILURE(rc))
2039 {
2040 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2041 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2042 }
2043 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2044 {
2045 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2046 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2047 }
2048 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2049 {
2050 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2051 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2052 }
2053 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2054 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2055 /** @todo Check reserved bits and such stuff. PGM is better at doing
2056 * that, so do it when implementing the guest virtual address
2057 * TLB... */
2058
2059 /*
2060 * Read the bytes at this address.
2061 *
2062 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2063 * and since PATM should only patch the start of an instruction there
2064 * should be no need to check again here.
2065 */
2066 if (!pVCpu->iem.s.fBypassHandlers)
2067 {
2068 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2069 cbToTryRead, PGMACCESSORIGIN_IEM);
2070 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2071 { /* likely */ }
2072 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2073 {
2074 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2075 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2076 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2077 }
2078 else
2079 {
2080 Log((RT_SUCCESS(rcStrict)
2081 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2082 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2083 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2084 return rcStrict;
2085 }
2086 }
2087 else
2088 {
2089 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2090 if (RT_SUCCESS(rc))
2091 { /* likely */ }
2092 else
2093 {
2094 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2095 return rc;
2096 }
2097 }
2098 pVCpu->iem.s.cbOpcode += cbToTryRead;
2099 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2100
2101 return VINF_SUCCESS;
2102}
2103
2104#endif /* !IEM_WITH_CODE_TLB */
2105#ifndef IEM_WITH_SETJMP
2106
2107/**
2108 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2109 *
2110 * @returns Strict VBox status code.
2111 * @param pVCpu The cross context virtual CPU structure of the
2112 * calling thread.
2113 * @param pb Where to return the opcode byte.
2114 */
2115DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2116{
2117 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2118 if (rcStrict == VINF_SUCCESS)
2119 {
2120 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2121 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2122 pVCpu->iem.s.offOpcode = offOpcode + 1;
2123 }
2124 else
2125 *pb = 0;
2126 return rcStrict;
2127}
2128
2129
2130/**
2131 * Fetches the next opcode byte.
2132 *
2133 * @returns Strict VBox status code.
2134 * @param pVCpu The cross context virtual CPU structure of the
2135 * calling thread.
2136 * @param pu8 Where to return the opcode byte.
2137 */
2138DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2139{
2140 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2141 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2142 {
2143 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2144 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2145 return VINF_SUCCESS;
2146 }
2147 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2148}
2149
2150#else /* IEM_WITH_SETJMP */
2151
2152/**
2153 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2154 *
2155 * @returns The opcode byte.
2156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2157 */
2158DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2159{
2160# ifdef IEM_WITH_CODE_TLB
2161 uint8_t u8;
2162 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2163 return u8;
2164# else
2165 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2166 if (rcStrict == VINF_SUCCESS)
2167 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2168 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2169# endif
2170}
2171
2172
2173/**
2174 * Fetches the next opcode byte, longjmp on error.
2175 *
2176 * @returns The opcode byte.
2177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2178 */
2179DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2180{
2181# ifdef IEM_WITH_CODE_TLB
2182 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2183 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2184 if (RT_LIKELY( pbBuf != NULL
2185 && offBuf < pVCpu->iem.s.cbInstrBuf))
2186 {
2187 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2188 return pbBuf[offBuf];
2189 }
2190# else
2191 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2192 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2193 {
2194 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2195 return pVCpu->iem.s.abOpcode[offOpcode];
2196 }
2197# endif
2198 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2199}
2200
2201#endif /* IEM_WITH_SETJMP */
2202
2203/**
2204 * Fetches the next opcode byte, returns automatically on failure.
2205 *
2206 * @param a_pu8 Where to return the opcode byte.
2207 * @remark Implicitly references pVCpu.
2208 */
2209#ifndef IEM_WITH_SETJMP
2210# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2211 do \
2212 { \
2213 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2214 if (rcStrict2 == VINF_SUCCESS) \
2215 { /* likely */ } \
2216 else \
2217 return rcStrict2; \
2218 } while (0)
2219#else
2220# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2221#endif /* IEM_WITH_SETJMP */
2222
2223
2224#ifndef IEM_WITH_SETJMP
2225/**
2226 * Fetches the next signed byte from the opcode stream.
2227 *
2228 * @returns Strict VBox status code.
2229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2230 * @param pi8 Where to return the signed byte.
2231 */
2232DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2233{
2234 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2235}
2236#endif /* !IEM_WITH_SETJMP */
2237
2238
2239/**
2240 * Fetches the next signed byte from the opcode stream, returning automatically
2241 * on failure.
2242 *
2243 * @param a_pi8 Where to return the signed byte.
2244 * @remark Implicitly references pVCpu.
2245 */
2246#ifndef IEM_WITH_SETJMP
2247# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2248 do \
2249 { \
2250 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2251 if (rcStrict2 != VINF_SUCCESS) \
2252 return rcStrict2; \
2253 } while (0)
2254#else /* IEM_WITH_SETJMP */
2255# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2256
2257#endif /* IEM_WITH_SETJMP */
2258
2259#ifndef IEM_WITH_SETJMP
2260
2261/**
2262 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2263 *
2264 * @returns Strict VBox status code.
2265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2266 * @param pu16 Where to return the opcode dword.
2267 */
2268DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2269{
2270 uint8_t u8;
2271 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2272 if (rcStrict == VINF_SUCCESS)
2273 *pu16 = (int8_t)u8;
2274 return rcStrict;
2275}
2276
2277
2278/**
2279 * Fetches the next signed byte from the opcode stream, extending it to
2280 * unsigned 16-bit.
2281 *
2282 * @returns Strict VBox status code.
2283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2284 * @param pu16 Where to return the unsigned word.
2285 */
2286DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2287{
2288 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2289 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2290 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2291
2292 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2293 pVCpu->iem.s.offOpcode = offOpcode + 1;
2294 return VINF_SUCCESS;
2295}
2296
2297#endif /* !IEM_WITH_SETJMP */
2298
2299/**
2300 * Fetches the next signed byte from the opcode stream and sign-extending it to
2301 * a word, returning automatically on failure.
2302 *
2303 * @param a_pu16 Where to return the word.
2304 * @remark Implicitly references pVCpu.
2305 */
2306#ifndef IEM_WITH_SETJMP
2307# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2308 do \
2309 { \
2310 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2311 if (rcStrict2 != VINF_SUCCESS) \
2312 return rcStrict2; \
2313 } while (0)
2314#else
2315# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2316#endif
2317
2318#ifndef IEM_WITH_SETJMP
2319
2320/**
2321 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2322 *
2323 * @returns Strict VBox status code.
2324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2325 * @param pu32 Where to return the opcode dword.
2326 */
2327DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2328{
2329 uint8_t u8;
2330 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2331 if (rcStrict == VINF_SUCCESS)
2332 *pu32 = (int8_t)u8;
2333 return rcStrict;
2334}
2335
2336
2337/**
2338 * Fetches the next signed byte from the opcode stream, extending it to
2339 * unsigned 32-bit.
2340 *
2341 * @returns Strict VBox status code.
2342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2343 * @param pu32 Where to return the unsigned dword.
2344 */
2345DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2346{
2347 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2348 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2349 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2350
2351 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2352 pVCpu->iem.s.offOpcode = offOpcode + 1;
2353 return VINF_SUCCESS;
2354}
2355
2356#endif /* !IEM_WITH_SETJMP */
2357
2358/**
2359 * Fetches the next signed byte from the opcode stream and sign-extending it to
2360 * a word, returning automatically on failure.
2361 *
2362 * @param a_pu32 Where to return the word.
2363 * @remark Implicitly references pVCpu.
2364 */
2365#ifndef IEM_WITH_SETJMP
2366#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2367 do \
2368 { \
2369 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2370 if (rcStrict2 != VINF_SUCCESS) \
2371 return rcStrict2; \
2372 } while (0)
2373#else
2374# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2375#endif
2376
2377#ifndef IEM_WITH_SETJMP
2378
2379/**
2380 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2381 *
2382 * @returns Strict VBox status code.
2383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2384 * @param pu64 Where to return the opcode qword.
2385 */
2386DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2387{
2388 uint8_t u8;
2389 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2390 if (rcStrict == VINF_SUCCESS)
2391 *pu64 = (int8_t)u8;
2392 return rcStrict;
2393}
2394
2395
2396/**
2397 * Fetches the next signed byte from the opcode stream, extending it to
2398 * unsigned 64-bit.
2399 *
2400 * @returns Strict VBox status code.
2401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2402 * @param pu64 Where to return the unsigned qword.
2403 */
2404DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2405{
2406 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2407 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2408 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2409
2410 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2411 pVCpu->iem.s.offOpcode = offOpcode + 1;
2412 return VINF_SUCCESS;
2413}
2414
2415#endif /* !IEM_WITH_SETJMP */
2416
2417
2418/**
2419 * Fetches the next signed byte from the opcode stream and sign-extending it to
2420 * a word, returning automatically on failure.
2421 *
2422 * @param a_pu64 Where to return the word.
2423 * @remark Implicitly references pVCpu.
2424 */
2425#ifndef IEM_WITH_SETJMP
2426# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2427 do \
2428 { \
2429 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2430 if (rcStrict2 != VINF_SUCCESS) \
2431 return rcStrict2; \
2432 } while (0)
2433#else
2434# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2435#endif
2436
2437
2438#ifndef IEM_WITH_SETJMP
2439/**
2440 * Fetches the next opcode byte.
2441 *
2442 * @returns Strict VBox status code.
2443 * @param pVCpu The cross context virtual CPU structure of the
2444 * calling thread.
2445 * @param pu8 Where to return the opcode byte.
2446 */
2447DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2448{
2449 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2450 pVCpu->iem.s.offModRm = offOpcode;
2451 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2452 {
2453 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2454 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2455 return VINF_SUCCESS;
2456 }
2457 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2458}
2459#else /* IEM_WITH_SETJMP */
2460/**
2461 * Fetches the next opcode byte, longjmp on error.
2462 *
2463 * @returns The opcode byte.
2464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2465 */
2466DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2467{
2468# ifdef IEM_WITH_CODE_TLB
2469 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2470 pVCpu->iem.s.offModRm = offBuf;
2471 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2472 if (RT_LIKELY( pbBuf != NULL
2473 && offBuf < pVCpu->iem.s.cbInstrBuf))
2474 {
2475 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2476 return pbBuf[offBuf];
2477 }
2478# else
2479 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2480 pVCpu->iem.s.offModRm = offOpcode;
2481 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2482 {
2483 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2484 return pVCpu->iem.s.abOpcode[offOpcode];
2485 }
2486# endif
2487 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2488}
2489#endif /* IEM_WITH_SETJMP */
2490
2491/**
2492 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2493 * on failure.
2494 *
2495 * Will note down the position of the ModR/M byte for VT-x exits.
2496 *
2497 * @param a_pbRm Where to return the RM opcode byte.
2498 * @remark Implicitly references pVCpu.
2499 */
2500#ifndef IEM_WITH_SETJMP
2501# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2502 do \
2503 { \
2504 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2505 if (rcStrict2 == VINF_SUCCESS) \
2506 { /* likely */ } \
2507 else \
2508 return rcStrict2; \
2509 } while (0)
2510#else
2511# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2512#endif /* IEM_WITH_SETJMP */
2513
2514
2515#ifndef IEM_WITH_SETJMP
2516
2517/**
2518 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2519 *
2520 * @returns Strict VBox status code.
2521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2522 * @param pu16 Where to return the opcode word.
2523 */
2524DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2525{
2526 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2527 if (rcStrict == VINF_SUCCESS)
2528 {
2529 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2530# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2531 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2532# else
2533 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2534# endif
2535 pVCpu->iem.s.offOpcode = offOpcode + 2;
2536 }
2537 else
2538 *pu16 = 0;
2539 return rcStrict;
2540}
2541
2542
2543/**
2544 * Fetches the next opcode word.
2545 *
2546 * @returns Strict VBox status code.
2547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2548 * @param pu16 Where to return the opcode word.
2549 */
2550DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2551{
2552 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2553 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2554 {
2555 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2556# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2557 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2558# else
2559 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2560# endif
2561 return VINF_SUCCESS;
2562 }
2563 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2564}
2565
2566#else /* IEM_WITH_SETJMP */
2567
2568/**
2569 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2570 *
2571 * @returns The opcode word.
2572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2573 */
2574DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2575{
2576# ifdef IEM_WITH_CODE_TLB
2577 uint16_t u16;
2578 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2579 return u16;
2580# else
2581 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2582 if (rcStrict == VINF_SUCCESS)
2583 {
2584 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2585 pVCpu->iem.s.offOpcode += 2;
2586# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2587 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2588# else
2589 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2590# endif
2591 }
2592 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2593# endif
2594}
2595
2596
2597/**
2598 * Fetches the next opcode word, longjmp on error.
2599 *
2600 * @returns The opcode word.
2601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2602 */
2603DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2604{
2605# ifdef IEM_WITH_CODE_TLB
2606 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2607 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2608 if (RT_LIKELY( pbBuf != NULL
2609 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2610 {
2611 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2612# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2613 return *(uint16_t const *)&pbBuf[offBuf];
2614# else
2615 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2616# endif
2617 }
2618# else
2619 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2620 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2621 {
2622 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2623# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2624 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2625# else
2626 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2627# endif
2628 }
2629# endif
2630 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2631}
2632
2633#endif /* IEM_WITH_SETJMP */
2634
2635
2636/**
2637 * Fetches the next opcode word, returns automatically on failure.
2638 *
2639 * @param a_pu16 Where to return the opcode word.
2640 * @remark Implicitly references pVCpu.
2641 */
2642#ifndef IEM_WITH_SETJMP
2643# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2644 do \
2645 { \
2646 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2647 if (rcStrict2 != VINF_SUCCESS) \
2648 return rcStrict2; \
2649 } while (0)
2650#else
2651# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2652#endif
2653
2654#ifndef IEM_WITH_SETJMP
2655
2656/**
2657 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2658 *
2659 * @returns Strict VBox status code.
2660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2661 * @param pu32 Where to return the opcode double word.
2662 */
2663DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2664{
2665 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2666 if (rcStrict == VINF_SUCCESS)
2667 {
2668 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2669 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2670 pVCpu->iem.s.offOpcode = offOpcode + 2;
2671 }
2672 else
2673 *pu32 = 0;
2674 return rcStrict;
2675}
2676
2677
2678/**
2679 * Fetches the next opcode word, zero extending it to a double word.
2680 *
2681 * @returns Strict VBox status code.
2682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2683 * @param pu32 Where to return the opcode double word.
2684 */
2685DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2686{
2687 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2688 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2689 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2690
2691 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2692 pVCpu->iem.s.offOpcode = offOpcode + 2;
2693 return VINF_SUCCESS;
2694}
2695
2696#endif /* !IEM_WITH_SETJMP */
2697
2698
2699/**
2700 * Fetches the next opcode word and zero extends it to a double word, returns
2701 * automatically on failure.
2702 *
2703 * @param a_pu32 Where to return the opcode double word.
2704 * @remark Implicitly references pVCpu.
2705 */
2706#ifndef IEM_WITH_SETJMP
2707# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2708 do \
2709 { \
2710 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2711 if (rcStrict2 != VINF_SUCCESS) \
2712 return rcStrict2; \
2713 } while (0)
2714#else
2715# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2716#endif
2717
2718#ifndef IEM_WITH_SETJMP
2719
2720/**
2721 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2722 *
2723 * @returns Strict VBox status code.
2724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2725 * @param pu64 Where to return the opcode quad word.
2726 */
2727DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2728{
2729 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2730 if (rcStrict == VINF_SUCCESS)
2731 {
2732 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2733 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2734 pVCpu->iem.s.offOpcode = offOpcode + 2;
2735 }
2736 else
2737 *pu64 = 0;
2738 return rcStrict;
2739}
2740
2741
2742/**
2743 * Fetches the next opcode word, zero extending it to a quad word.
2744 *
2745 * @returns Strict VBox status code.
2746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2747 * @param pu64 Where to return the opcode quad word.
2748 */
2749DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2750{
2751 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2752 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2753 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2754
2755 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2756 pVCpu->iem.s.offOpcode = offOpcode + 2;
2757 return VINF_SUCCESS;
2758}
2759
2760#endif /* !IEM_WITH_SETJMP */
2761
2762/**
2763 * Fetches the next opcode word and zero extends it to a quad word, returns
2764 * automatically on failure.
2765 *
2766 * @param a_pu64 Where to return the opcode quad word.
2767 * @remark Implicitly references pVCpu.
2768 */
2769#ifndef IEM_WITH_SETJMP
2770# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2771 do \
2772 { \
2773 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2774 if (rcStrict2 != VINF_SUCCESS) \
2775 return rcStrict2; \
2776 } while (0)
2777#else
2778# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2779#endif
2780
2781
2782#ifndef IEM_WITH_SETJMP
2783/**
2784 * Fetches the next signed word from the opcode stream.
2785 *
2786 * @returns Strict VBox status code.
2787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2788 * @param pi16 Where to return the signed word.
2789 */
2790DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2791{
2792 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2793}
2794#endif /* !IEM_WITH_SETJMP */
2795
2796
2797/**
2798 * Fetches the next signed word from the opcode stream, returning automatically
2799 * on failure.
2800 *
2801 * @param a_pi16 Where to return the signed word.
2802 * @remark Implicitly references pVCpu.
2803 */
2804#ifndef IEM_WITH_SETJMP
2805# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2806 do \
2807 { \
2808 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2809 if (rcStrict2 != VINF_SUCCESS) \
2810 return rcStrict2; \
2811 } while (0)
2812#else
2813# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2814#endif
2815
2816#ifndef IEM_WITH_SETJMP
2817
2818/**
2819 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2820 *
2821 * @returns Strict VBox status code.
2822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2823 * @param pu32 Where to return the opcode dword.
2824 */
2825DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2826{
2827 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2828 if (rcStrict == VINF_SUCCESS)
2829 {
2830 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2831# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2832 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2833# else
2834 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2835 pVCpu->iem.s.abOpcode[offOpcode + 1],
2836 pVCpu->iem.s.abOpcode[offOpcode + 2],
2837 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2838# endif
2839 pVCpu->iem.s.offOpcode = offOpcode + 4;
2840 }
2841 else
2842 *pu32 = 0;
2843 return rcStrict;
2844}
2845
2846
2847/**
2848 * Fetches the next opcode dword.
2849 *
2850 * @returns Strict VBox status code.
2851 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2852 * @param pu32 Where to return the opcode double word.
2853 */
2854DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2855{
2856 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2857 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2858 {
2859 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2860# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2861 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2862# else
2863 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2864 pVCpu->iem.s.abOpcode[offOpcode + 1],
2865 pVCpu->iem.s.abOpcode[offOpcode + 2],
2866 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2867# endif
2868 return VINF_SUCCESS;
2869 }
2870 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2871}
2872
2873#else /* !IEM_WITH_SETJMP */
2874
2875/**
2876 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2877 *
2878 * @returns The opcode dword.
2879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2880 */
2881DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2882{
2883# ifdef IEM_WITH_CODE_TLB
2884 uint32_t u32;
2885 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2886 return u32;
2887# else
2888 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2889 if (rcStrict == VINF_SUCCESS)
2890 {
2891 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2892 pVCpu->iem.s.offOpcode = offOpcode + 4;
2893# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2894 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2895# else
2896 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2897 pVCpu->iem.s.abOpcode[offOpcode + 1],
2898 pVCpu->iem.s.abOpcode[offOpcode + 2],
2899 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2900# endif
2901 }
2902 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2903# endif
2904}
2905
2906
2907/**
2908 * Fetches the next opcode dword, longjmp on error.
2909 *
2910 * @returns The opcode dword.
2911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2912 */
2913DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2914{
2915# ifdef IEM_WITH_CODE_TLB
2916 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2917 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2918 if (RT_LIKELY( pbBuf != NULL
2919 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2920 {
2921 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2922# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2923 return *(uint32_t const *)&pbBuf[offBuf];
2924# else
2925 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2926 pbBuf[offBuf + 1],
2927 pbBuf[offBuf + 2],
2928 pbBuf[offBuf + 3]);
2929# endif
2930 }
2931# else
2932 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2933 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2934 {
2935 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2936# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2937 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2938# else
2939 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2940 pVCpu->iem.s.abOpcode[offOpcode + 1],
2941 pVCpu->iem.s.abOpcode[offOpcode + 2],
2942 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2943# endif
2944 }
2945# endif
2946 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2947}
2948
2949#endif /* !IEM_WITH_SETJMP */
2950
2951
2952/**
2953 * Fetches the next opcode dword, returns automatically on failure.
2954 *
2955 * @param a_pu32 Where to return the opcode dword.
2956 * @remark Implicitly references pVCpu.
2957 */
2958#ifndef IEM_WITH_SETJMP
2959# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2960 do \
2961 { \
2962 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2963 if (rcStrict2 != VINF_SUCCESS) \
2964 return rcStrict2; \
2965 } while (0)
2966#else
2967# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2968#endif
2969
2970#ifndef IEM_WITH_SETJMP
2971
2972/**
2973 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2974 *
2975 * @returns Strict VBox status code.
2976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2977 * @param pu64 Where to return the opcode dword.
2978 */
2979DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2980{
2981 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2982 if (rcStrict == VINF_SUCCESS)
2983 {
2984 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2985 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2986 pVCpu->iem.s.abOpcode[offOpcode + 1],
2987 pVCpu->iem.s.abOpcode[offOpcode + 2],
2988 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2989 pVCpu->iem.s.offOpcode = offOpcode + 4;
2990 }
2991 else
2992 *pu64 = 0;
2993 return rcStrict;
2994}
2995
2996
2997/**
2998 * Fetches the next opcode dword, zero extending it to a quad word.
2999 *
3000 * @returns Strict VBox status code.
3001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3002 * @param pu64 Where to return the opcode quad word.
3003 */
3004DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3005{
3006 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3007 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3008 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3009
3010 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3011 pVCpu->iem.s.abOpcode[offOpcode + 1],
3012 pVCpu->iem.s.abOpcode[offOpcode + 2],
3013 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3014 pVCpu->iem.s.offOpcode = offOpcode + 4;
3015 return VINF_SUCCESS;
3016}
3017
3018#endif /* !IEM_WITH_SETJMP */
3019
3020
3021/**
3022 * Fetches the next opcode dword and zero extends it to a quad word, returns
3023 * automatically on failure.
3024 *
3025 * @param a_pu64 Where to return the opcode quad word.
3026 * @remark Implicitly references pVCpu.
3027 */
3028#ifndef IEM_WITH_SETJMP
3029# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3030 do \
3031 { \
3032 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3033 if (rcStrict2 != VINF_SUCCESS) \
3034 return rcStrict2; \
3035 } while (0)
3036#else
3037# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3038#endif
3039
3040
3041#ifndef IEM_WITH_SETJMP
3042/**
3043 * Fetches the next signed double word from the opcode stream.
3044 *
3045 * @returns Strict VBox status code.
3046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3047 * @param pi32 Where to return the signed double word.
3048 */
3049DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3050{
3051 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3052}
3053#endif
3054
3055/**
3056 * Fetches the next signed double word from the opcode stream, returning
3057 * automatically on failure.
3058 *
3059 * @param a_pi32 Where to return the signed double word.
3060 * @remark Implicitly references pVCpu.
3061 */
3062#ifndef IEM_WITH_SETJMP
3063# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3064 do \
3065 { \
3066 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3067 if (rcStrict2 != VINF_SUCCESS) \
3068 return rcStrict2; \
3069 } while (0)
3070#else
3071# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3072#endif
3073
3074#ifndef IEM_WITH_SETJMP
3075
3076/**
3077 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3078 *
3079 * @returns Strict VBox status code.
3080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3081 * @param pu64 Where to return the opcode qword.
3082 */
3083DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3084{
3085 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3086 if (rcStrict == VINF_SUCCESS)
3087 {
3088 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3089 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3090 pVCpu->iem.s.abOpcode[offOpcode + 1],
3091 pVCpu->iem.s.abOpcode[offOpcode + 2],
3092 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3093 pVCpu->iem.s.offOpcode = offOpcode + 4;
3094 }
3095 else
3096 *pu64 = 0;
3097 return rcStrict;
3098}
3099
3100
3101/**
3102 * Fetches the next opcode dword, sign extending it into a quad word.
3103 *
3104 * @returns Strict VBox status code.
3105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3106 * @param pu64 Where to return the opcode quad word.
3107 */
3108DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3109{
3110 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3111 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3112 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3113
3114 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3115 pVCpu->iem.s.abOpcode[offOpcode + 1],
3116 pVCpu->iem.s.abOpcode[offOpcode + 2],
3117 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3118 *pu64 = i32;
3119 pVCpu->iem.s.offOpcode = offOpcode + 4;
3120 return VINF_SUCCESS;
3121}
3122
3123#endif /* !IEM_WITH_SETJMP */
3124
3125
3126/**
3127 * Fetches the next opcode double word and sign extends it to a quad word,
3128 * returns automatically on failure.
3129 *
3130 * @param a_pu64 Where to return the opcode quad word.
3131 * @remark Implicitly references pVCpu.
3132 */
3133#ifndef IEM_WITH_SETJMP
3134# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3135 do \
3136 { \
3137 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3138 if (rcStrict2 != VINF_SUCCESS) \
3139 return rcStrict2; \
3140 } while (0)
3141#else
3142# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3143#endif
3144
3145#ifndef IEM_WITH_SETJMP
3146
3147/**
3148 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3149 *
3150 * @returns Strict VBox status code.
3151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3152 * @param pu64 Where to return the opcode qword.
3153 */
3154DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3155{
3156 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3157 if (rcStrict == VINF_SUCCESS)
3158 {
3159 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3160# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3161 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3162# else
3163 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3164 pVCpu->iem.s.abOpcode[offOpcode + 1],
3165 pVCpu->iem.s.abOpcode[offOpcode + 2],
3166 pVCpu->iem.s.abOpcode[offOpcode + 3],
3167 pVCpu->iem.s.abOpcode[offOpcode + 4],
3168 pVCpu->iem.s.abOpcode[offOpcode + 5],
3169 pVCpu->iem.s.abOpcode[offOpcode + 6],
3170 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3171# endif
3172 pVCpu->iem.s.offOpcode = offOpcode + 8;
3173 }
3174 else
3175 *pu64 = 0;
3176 return rcStrict;
3177}
3178
3179
3180/**
3181 * Fetches the next opcode qword.
3182 *
3183 * @returns Strict VBox status code.
3184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3185 * @param pu64 Where to return the opcode qword.
3186 */
3187DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3188{
3189 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3190 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3191 {
3192# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3193 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3194# else
3195 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3196 pVCpu->iem.s.abOpcode[offOpcode + 1],
3197 pVCpu->iem.s.abOpcode[offOpcode + 2],
3198 pVCpu->iem.s.abOpcode[offOpcode + 3],
3199 pVCpu->iem.s.abOpcode[offOpcode + 4],
3200 pVCpu->iem.s.abOpcode[offOpcode + 5],
3201 pVCpu->iem.s.abOpcode[offOpcode + 6],
3202 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3203# endif
3204 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3205 return VINF_SUCCESS;
3206 }
3207 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3208}
3209
3210#else /* IEM_WITH_SETJMP */
3211
3212/**
3213 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3214 *
3215 * @returns The opcode qword.
3216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3217 */
3218DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3219{
3220# ifdef IEM_WITH_CODE_TLB
3221 uint64_t u64;
3222 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3223 return u64;
3224# else
3225 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3226 if (rcStrict == VINF_SUCCESS)
3227 {
3228 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3229 pVCpu->iem.s.offOpcode = offOpcode + 8;
3230# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3231 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3232# else
3233 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3234 pVCpu->iem.s.abOpcode[offOpcode + 1],
3235 pVCpu->iem.s.abOpcode[offOpcode + 2],
3236 pVCpu->iem.s.abOpcode[offOpcode + 3],
3237 pVCpu->iem.s.abOpcode[offOpcode + 4],
3238 pVCpu->iem.s.abOpcode[offOpcode + 5],
3239 pVCpu->iem.s.abOpcode[offOpcode + 6],
3240 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3241# endif
3242 }
3243 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3244# endif
3245}
3246
3247
3248/**
3249 * Fetches the next opcode qword, longjmp on error.
3250 *
3251 * @returns The opcode qword.
3252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3253 */
3254DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3255{
3256# ifdef IEM_WITH_CODE_TLB
3257 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3258 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3259 if (RT_LIKELY( pbBuf != NULL
3260 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3261 {
3262 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3263# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3264 return *(uint64_t const *)&pbBuf[offBuf];
3265# else
3266 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3267 pbBuf[offBuf + 1],
3268 pbBuf[offBuf + 2],
3269 pbBuf[offBuf + 3],
3270 pbBuf[offBuf + 4],
3271 pbBuf[offBuf + 5],
3272 pbBuf[offBuf + 6],
3273 pbBuf[offBuf + 7]);
3274# endif
3275 }
3276# else
3277 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3278 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3279 {
3280 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3281# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3282 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3283# else
3284 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3285 pVCpu->iem.s.abOpcode[offOpcode + 1],
3286 pVCpu->iem.s.abOpcode[offOpcode + 2],
3287 pVCpu->iem.s.abOpcode[offOpcode + 3],
3288 pVCpu->iem.s.abOpcode[offOpcode + 4],
3289 pVCpu->iem.s.abOpcode[offOpcode + 5],
3290 pVCpu->iem.s.abOpcode[offOpcode + 6],
3291 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3292# endif
3293 }
3294# endif
3295 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3296}
3297
3298#endif /* IEM_WITH_SETJMP */
3299
3300/**
3301 * Fetches the next opcode quad word, returns automatically on failure.
3302 *
3303 * @param a_pu64 Where to return the opcode quad word.
3304 * @remark Implicitly references pVCpu.
3305 */
3306#ifndef IEM_WITH_SETJMP
3307# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3308 do \
3309 { \
3310 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3311 if (rcStrict2 != VINF_SUCCESS) \
3312 return rcStrict2; \
3313 } while (0)
3314#else
3315# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3316#endif
3317
3318
3319/** @name Misc Worker Functions.
3320 * @{
3321 */
3322
3323/**
3324 * Gets the exception class for the specified exception vector.
3325 *
3326 * @returns The class of the specified exception.
3327 * @param uVector The exception vector.
3328 */
3329IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3330{
3331 Assert(uVector <= X86_XCPT_LAST);
3332 switch (uVector)
3333 {
3334 case X86_XCPT_DE:
3335 case X86_XCPT_TS:
3336 case X86_XCPT_NP:
3337 case X86_XCPT_SS:
3338 case X86_XCPT_GP:
3339 case X86_XCPT_SX: /* AMD only */
3340 return IEMXCPTCLASS_CONTRIBUTORY;
3341
3342 case X86_XCPT_PF:
3343 case X86_XCPT_VE: /* Intel only */
3344 return IEMXCPTCLASS_PAGE_FAULT;
3345
3346 case X86_XCPT_DF:
3347 return IEMXCPTCLASS_DOUBLE_FAULT;
3348 }
3349 return IEMXCPTCLASS_BENIGN;
3350}
3351
3352
3353/**
3354 * Evaluates how to handle an exception caused during delivery of another event
3355 * (exception / interrupt).
3356 *
3357 * @returns How to handle the recursive exception.
3358 * @param pVCpu The cross context virtual CPU structure of the
3359 * calling thread.
3360 * @param fPrevFlags The flags of the previous event.
3361 * @param uPrevVector The vector of the previous event.
3362 * @param fCurFlags The flags of the current exception.
3363 * @param uCurVector The vector of the current exception.
3364 * @param pfXcptRaiseInfo Where to store additional information about the
3365 * exception condition. Optional.
3366 */
3367VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3368 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3369{
3370 /*
3371 * Only CPU exceptions can be raised while delivering other events, software interrupt
3372 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3373 */
3374 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3375 Assert(pVCpu); RT_NOREF(pVCpu);
3376 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3377
3378 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3379 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3380 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3381 {
3382 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3383 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3384 {
3385 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3386 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3387 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3388 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3389 {
3390 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3391 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3392 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3393 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3394 uCurVector, pVCpu->cpum.GstCtx.cr2));
3395 }
3396 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3397 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3398 {
3399 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3400 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3401 }
3402 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3403 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3404 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3405 {
3406 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3407 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3408 }
3409 }
3410 else
3411 {
3412 if (uPrevVector == X86_XCPT_NMI)
3413 {
3414 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3415 if (uCurVector == X86_XCPT_PF)
3416 {
3417 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3418 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3419 }
3420 }
3421 else if ( uPrevVector == X86_XCPT_AC
3422 && uCurVector == X86_XCPT_AC)
3423 {
3424 enmRaise = IEMXCPTRAISE_CPU_HANG;
3425 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3426 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3427 }
3428 }
3429 }
3430 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3431 {
3432 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3433 if (uCurVector == X86_XCPT_PF)
3434 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3435 }
3436 else
3437 {
3438 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3439 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3440 }
3441
3442 if (pfXcptRaiseInfo)
3443 *pfXcptRaiseInfo = fRaiseInfo;
3444 return enmRaise;
3445}
3446
3447
3448/**
3449 * Enters the CPU shutdown state initiated by a triple fault or other
3450 * unrecoverable conditions.
3451 *
3452 * @returns Strict VBox status code.
3453 * @param pVCpu The cross context virtual CPU structure of the
3454 * calling thread.
3455 */
3456IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3457{
3458 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3459 {
3460 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3461 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3462 }
3463
3464 RT_NOREF(pVCpu);
3465 return VINF_EM_TRIPLE_FAULT;
3466}
3467
3468
3469/**
3470 * Validates a new SS segment.
3471 *
3472 * @returns VBox strict status code.
3473 * @param pVCpu The cross context virtual CPU structure of the
3474 * calling thread.
3475 * @param NewSS The new SS selctor.
3476 * @param uCpl The CPL to load the stack for.
3477 * @param pDesc Where to return the descriptor.
3478 */
3479IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3480{
3481 /* Null selectors are not allowed (we're not called for dispatching
3482 interrupts with SS=0 in long mode). */
3483 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3484 {
3485 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3486 return iemRaiseTaskSwitchFault0(pVCpu);
3487 }
3488
3489 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3490 if ((NewSS & X86_SEL_RPL) != uCpl)
3491 {
3492 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3493 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3494 }
3495
3496 /*
3497 * Read the descriptor.
3498 */
3499 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3500 if (rcStrict != VINF_SUCCESS)
3501 return rcStrict;
3502
3503 /*
3504 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3505 */
3506 if (!pDesc->Legacy.Gen.u1DescType)
3507 {
3508 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3509 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3510 }
3511
3512 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3513 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3514 {
3515 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3516 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3517 }
3518 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3519 {
3520 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3521 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3522 }
3523
3524 /* Is it there? */
3525 /** @todo testcase: Is this checked before the canonical / limit check below? */
3526 if (!pDesc->Legacy.Gen.u1Present)
3527 {
3528 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3529 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3530 }
3531
3532 return VINF_SUCCESS;
3533}
3534
3535
3536/**
3537 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3538 * not.
3539 *
3540 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3541 */
3542#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3543# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3544#else
3545# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3546#endif
3547
3548/**
3549 * Updates the EFLAGS in the correct manner wrt. PATM.
3550 *
3551 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3552 * @param a_fEfl The new EFLAGS.
3553 */
3554#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3555# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3556#else
3557# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3558#endif
3559
3560
3561/** @} */
3562
3563/** @name Raising Exceptions.
3564 *
3565 * @{
3566 */
3567
3568
3569/**
3570 * Loads the specified stack far pointer from the TSS.
3571 *
3572 * @returns VBox strict status code.
3573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3574 * @param uCpl The CPL to load the stack for.
3575 * @param pSelSS Where to return the new stack segment.
3576 * @param puEsp Where to return the new stack pointer.
3577 */
3578IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3579{
3580 VBOXSTRICTRC rcStrict;
3581 Assert(uCpl < 4);
3582
3583 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3584 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3585 {
3586 /*
3587 * 16-bit TSS (X86TSS16).
3588 */
3589 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3590 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3591 {
3592 uint32_t off = uCpl * 4 + 2;
3593 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3594 {
3595 /** @todo check actual access pattern here. */
3596 uint32_t u32Tmp = 0; /* gcc maybe... */
3597 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3598 if (rcStrict == VINF_SUCCESS)
3599 {
3600 *puEsp = RT_LOWORD(u32Tmp);
3601 *pSelSS = RT_HIWORD(u32Tmp);
3602 return VINF_SUCCESS;
3603 }
3604 }
3605 else
3606 {
3607 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3608 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3609 }
3610 break;
3611 }
3612
3613 /*
3614 * 32-bit TSS (X86TSS32).
3615 */
3616 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3617 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3618 {
3619 uint32_t off = uCpl * 8 + 4;
3620 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3621 {
3622/** @todo check actual access pattern here. */
3623 uint64_t u64Tmp;
3624 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3625 if (rcStrict == VINF_SUCCESS)
3626 {
3627 *puEsp = u64Tmp & UINT32_MAX;
3628 *pSelSS = (RTSEL)(u64Tmp >> 32);
3629 return VINF_SUCCESS;
3630 }
3631 }
3632 else
3633 {
3634 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3635 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3636 }
3637 break;
3638 }
3639
3640 default:
3641 AssertFailed();
3642 rcStrict = VERR_IEM_IPE_4;
3643 break;
3644 }
3645
3646 *puEsp = 0; /* make gcc happy */
3647 *pSelSS = 0; /* make gcc happy */
3648 return rcStrict;
3649}
3650
3651
3652/**
3653 * Loads the specified stack pointer from the 64-bit TSS.
3654 *
3655 * @returns VBox strict status code.
3656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3657 * @param uCpl The CPL to load the stack for.
3658 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3659 * @param puRsp Where to return the new stack pointer.
3660 */
3661IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3662{
3663 Assert(uCpl < 4);
3664 Assert(uIst < 8);
3665 *puRsp = 0; /* make gcc happy */
3666
3667 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3668 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3669
3670 uint32_t off;
3671 if (uIst)
3672 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3673 else
3674 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3675 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3676 {
3677 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3678 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3679 }
3680
3681 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3682}
3683
3684
3685/**
3686 * Adjust the CPU state according to the exception being raised.
3687 *
3688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3689 * @param u8Vector The exception that has been raised.
3690 */
3691DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3692{
3693 switch (u8Vector)
3694 {
3695 case X86_XCPT_DB:
3696 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3697 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3698 break;
3699 /** @todo Read the AMD and Intel exception reference... */
3700 }
3701}
3702
3703
3704/**
3705 * Implements exceptions and interrupts for real mode.
3706 *
3707 * @returns VBox strict status code.
3708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3709 * @param cbInstr The number of bytes to offset rIP by in the return
3710 * address.
3711 * @param u8Vector The interrupt / exception vector number.
3712 * @param fFlags The flags.
3713 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3714 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3715 */
3716IEM_STATIC VBOXSTRICTRC
3717iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3718 uint8_t cbInstr,
3719 uint8_t u8Vector,
3720 uint32_t fFlags,
3721 uint16_t uErr,
3722 uint64_t uCr2)
3723{
3724 NOREF(uErr); NOREF(uCr2);
3725 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3726
3727 /*
3728 * Read the IDT entry.
3729 */
3730 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3731 {
3732 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3733 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3734 }
3735 RTFAR16 Idte;
3736 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3737 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3738 {
3739 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3740 return rcStrict;
3741 }
3742
3743 /*
3744 * Push the stack frame.
3745 */
3746 uint16_t *pu16Frame;
3747 uint64_t uNewRsp;
3748 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3749 if (rcStrict != VINF_SUCCESS)
3750 return rcStrict;
3751
3752 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3753#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3754 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3755 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3756 fEfl |= UINT16_C(0xf000);
3757#endif
3758 pu16Frame[2] = (uint16_t)fEfl;
3759 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3760 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3761 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3762 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3763 return rcStrict;
3764
3765 /*
3766 * Load the vector address into cs:ip and make exception specific state
3767 * adjustments.
3768 */
3769 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3770 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3771 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3772 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3773 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3774 pVCpu->cpum.GstCtx.rip = Idte.off;
3775 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3776 IEMMISC_SET_EFL(pVCpu, fEfl);
3777
3778 /** @todo do we actually do this in real mode? */
3779 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3780 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3781
3782 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3783}
3784
3785
3786/**
3787 * Loads a NULL data selector into when coming from V8086 mode.
3788 *
3789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3790 * @param pSReg Pointer to the segment register.
3791 */
3792IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3793{
3794 pSReg->Sel = 0;
3795 pSReg->ValidSel = 0;
3796 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3797 {
3798 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3799 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3800 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3801 }
3802 else
3803 {
3804 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3805 /** @todo check this on AMD-V */
3806 pSReg->u64Base = 0;
3807 pSReg->u32Limit = 0;
3808 }
3809}
3810
3811
3812/**
3813 * Loads a segment selector during a task switch in V8086 mode.
3814 *
3815 * @param pSReg Pointer to the segment register.
3816 * @param uSel The selector value to load.
3817 */
3818IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3819{
3820 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3821 pSReg->Sel = uSel;
3822 pSReg->ValidSel = uSel;
3823 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3824 pSReg->u64Base = uSel << 4;
3825 pSReg->u32Limit = 0xffff;
3826 pSReg->Attr.u = 0xf3;
3827}
3828
3829
3830/**
3831 * Loads a NULL data selector into a selector register, both the hidden and
3832 * visible parts, in protected mode.
3833 *
3834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3835 * @param pSReg Pointer to the segment register.
3836 * @param uRpl The RPL.
3837 */
3838IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3839{
3840 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3841 * data selector in protected mode. */
3842 pSReg->Sel = uRpl;
3843 pSReg->ValidSel = uRpl;
3844 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3845 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3846 {
3847 /* VT-x (Intel 3960x) observed doing something like this. */
3848 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3849 pSReg->u32Limit = UINT32_MAX;
3850 pSReg->u64Base = 0;
3851 }
3852 else
3853 {
3854 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3855 pSReg->u32Limit = 0;
3856 pSReg->u64Base = 0;
3857 }
3858}
3859
3860
3861/**
3862 * Loads a segment selector during a task switch in protected mode.
3863 *
3864 * In this task switch scenario, we would throw \#TS exceptions rather than
3865 * \#GPs.
3866 *
3867 * @returns VBox strict status code.
3868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3869 * @param pSReg Pointer to the segment register.
3870 * @param uSel The new selector value.
3871 *
3872 * @remarks This does _not_ handle CS or SS.
3873 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3874 */
3875IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3876{
3877 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3878
3879 /* Null data selector. */
3880 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3881 {
3882 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3883 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3884 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3885 return VINF_SUCCESS;
3886 }
3887
3888 /* Fetch the descriptor. */
3889 IEMSELDESC Desc;
3890 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3891 if (rcStrict != VINF_SUCCESS)
3892 {
3893 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3894 VBOXSTRICTRC_VAL(rcStrict)));
3895 return rcStrict;
3896 }
3897
3898 /* Must be a data segment or readable code segment. */
3899 if ( !Desc.Legacy.Gen.u1DescType
3900 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3901 {
3902 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3903 Desc.Legacy.Gen.u4Type));
3904 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3905 }
3906
3907 /* Check privileges for data segments and non-conforming code segments. */
3908 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3909 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3910 {
3911 /* The RPL and the new CPL must be less than or equal to the DPL. */
3912 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3913 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3914 {
3915 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3916 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3917 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3918 }
3919 }
3920
3921 /* Is it there? */
3922 if (!Desc.Legacy.Gen.u1Present)
3923 {
3924 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3925 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3926 }
3927
3928 /* The base and limit. */
3929 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3930 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3931
3932 /*
3933 * Ok, everything checked out fine. Now set the accessed bit before
3934 * committing the result into the registers.
3935 */
3936 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3937 {
3938 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3939 if (rcStrict != VINF_SUCCESS)
3940 return rcStrict;
3941 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3942 }
3943
3944 /* Commit */
3945 pSReg->Sel = uSel;
3946 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3947 pSReg->u32Limit = cbLimit;
3948 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3949 pSReg->ValidSel = uSel;
3950 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3951 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3952 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3953
3954 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3955 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3956 return VINF_SUCCESS;
3957}
3958
3959
3960/**
3961 * Performs a task switch.
3962 *
3963 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3964 * caller is responsible for performing the necessary checks (like DPL, TSS
3965 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3966 * reference for JMP, CALL, IRET.
3967 *
3968 * If the task switch is the due to a software interrupt or hardware exception,
3969 * the caller is responsible for validating the TSS selector and descriptor. See
3970 * Intel Instruction reference for INT n.
3971 *
3972 * @returns VBox strict status code.
3973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3974 * @param enmTaskSwitch The cause of the task switch.
3975 * @param uNextEip The EIP effective after the task switch.
3976 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3977 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3978 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3979 * @param SelTSS The TSS selector of the new task.
3980 * @param pNewDescTSS Pointer to the new TSS descriptor.
3981 */
3982IEM_STATIC VBOXSTRICTRC
3983iemTaskSwitch(PVMCPU pVCpu,
3984 IEMTASKSWITCH enmTaskSwitch,
3985 uint32_t uNextEip,
3986 uint32_t fFlags,
3987 uint16_t uErr,
3988 uint64_t uCr2,
3989 RTSEL SelTSS,
3990 PIEMSELDESC pNewDescTSS)
3991{
3992 Assert(!IEM_IS_REAL_MODE(pVCpu));
3993 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3994 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3995
3996 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3997 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3998 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3999 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4000 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4001
4002 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4003 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4004
4005 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4006 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4007
4008 /* Update CR2 in case it's a page-fault. */
4009 /** @todo This should probably be done much earlier in IEM/PGM. See
4010 * @bugref{5653#c49}. */
4011 if (fFlags & IEM_XCPT_FLAGS_CR2)
4012 pVCpu->cpum.GstCtx.cr2 = uCr2;
4013
4014 /*
4015 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4016 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4017 */
4018 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4019 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4020 if (uNewTSSLimit < uNewTSSLimitMin)
4021 {
4022 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4023 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4024 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4025 }
4026
4027 /*
4028 * Task switches in VMX non-root mode always cause task switches.
4029 * The new TSS must have been read and validated (DPL, limits etc.) before a
4030 * task-switch VM-exit commences.
4031 *
4032 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4033 */
4034 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4035 {
4036 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4037 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS);
4038 }
4039
4040 /*
4041 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4042 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4043 */
4044 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4045 {
4046 uint32_t const uExitInfo1 = SelTSS;
4047 uint32_t uExitInfo2 = uErr;
4048 switch (enmTaskSwitch)
4049 {
4050 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4051 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4052 default: break;
4053 }
4054 if (fFlags & IEM_XCPT_FLAGS_ERR)
4055 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4056 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4057 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4058
4059 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4060 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4061 RT_NOREF2(uExitInfo1, uExitInfo2);
4062 }
4063
4064 /*
4065 * Check the current TSS limit. The last written byte to the current TSS during the
4066 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4067 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4068 *
4069 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4070 * end up with smaller than "legal" TSS limits.
4071 */
4072 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4073 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4074 if (uCurTSSLimit < uCurTSSLimitMin)
4075 {
4076 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4077 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4078 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4079 }
4080
4081 /*
4082 * Verify that the new TSS can be accessed and map it. Map only the required contents
4083 * and not the entire TSS.
4084 */
4085 void *pvNewTSS;
4086 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4087 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4088 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4089 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4090 * not perform correct translation if this happens. See Intel spec. 7.2.1
4091 * "Task-State Segment" */
4092 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4093 if (rcStrict != VINF_SUCCESS)
4094 {
4095 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4096 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4097 return rcStrict;
4098 }
4099
4100 /*
4101 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4102 */
4103 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4104 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4105 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4106 {
4107 PX86DESC pDescCurTSS;
4108 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4109 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4110 if (rcStrict != VINF_SUCCESS)
4111 {
4112 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4113 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4114 return rcStrict;
4115 }
4116
4117 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4118 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4119 if (rcStrict != VINF_SUCCESS)
4120 {
4121 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4122 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4123 return rcStrict;
4124 }
4125
4126 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4127 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4128 {
4129 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4130 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4131 u32EFlags &= ~X86_EFL_NT;
4132 }
4133 }
4134
4135 /*
4136 * Save the CPU state into the current TSS.
4137 */
4138 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4139 if (GCPtrNewTSS == GCPtrCurTSS)
4140 {
4141 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4142 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4143 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4144 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4145 pVCpu->cpum.GstCtx.ldtr.Sel));
4146 }
4147 if (fIsNewTSS386)
4148 {
4149 /*
4150 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4151 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4152 */
4153 void *pvCurTSS32;
4154 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4155 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4156 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4157 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4158 if (rcStrict != VINF_SUCCESS)
4159 {
4160 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4161 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4162 return rcStrict;
4163 }
4164
4165 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4166 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4167 pCurTSS32->eip = uNextEip;
4168 pCurTSS32->eflags = u32EFlags;
4169 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4170 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4171 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4172 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4173 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4174 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4175 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4176 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4177 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4178 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4179 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4180 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4181 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4182 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4183
4184 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4185 if (rcStrict != VINF_SUCCESS)
4186 {
4187 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4188 VBOXSTRICTRC_VAL(rcStrict)));
4189 return rcStrict;
4190 }
4191 }
4192 else
4193 {
4194 /*
4195 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4196 */
4197 void *pvCurTSS16;
4198 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4199 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4200 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4201 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4202 if (rcStrict != VINF_SUCCESS)
4203 {
4204 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4205 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4206 return rcStrict;
4207 }
4208
4209 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4210 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4211 pCurTSS16->ip = uNextEip;
4212 pCurTSS16->flags = u32EFlags;
4213 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4214 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4215 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4216 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4217 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4218 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4219 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4220 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4221 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4222 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4223 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4224 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4225
4226 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4227 if (rcStrict != VINF_SUCCESS)
4228 {
4229 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4230 VBOXSTRICTRC_VAL(rcStrict)));
4231 return rcStrict;
4232 }
4233 }
4234
4235 /*
4236 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4237 */
4238 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4239 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4240 {
4241 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4242 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4243 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4244 }
4245
4246 /*
4247 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4248 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4249 */
4250 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4251 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4252 bool fNewDebugTrap;
4253 if (fIsNewTSS386)
4254 {
4255 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4256 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4257 uNewEip = pNewTSS32->eip;
4258 uNewEflags = pNewTSS32->eflags;
4259 uNewEax = pNewTSS32->eax;
4260 uNewEcx = pNewTSS32->ecx;
4261 uNewEdx = pNewTSS32->edx;
4262 uNewEbx = pNewTSS32->ebx;
4263 uNewEsp = pNewTSS32->esp;
4264 uNewEbp = pNewTSS32->ebp;
4265 uNewEsi = pNewTSS32->esi;
4266 uNewEdi = pNewTSS32->edi;
4267 uNewES = pNewTSS32->es;
4268 uNewCS = pNewTSS32->cs;
4269 uNewSS = pNewTSS32->ss;
4270 uNewDS = pNewTSS32->ds;
4271 uNewFS = pNewTSS32->fs;
4272 uNewGS = pNewTSS32->gs;
4273 uNewLdt = pNewTSS32->selLdt;
4274 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4275 }
4276 else
4277 {
4278 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4279 uNewCr3 = 0;
4280 uNewEip = pNewTSS16->ip;
4281 uNewEflags = pNewTSS16->flags;
4282 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4283 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4284 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4285 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4286 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4287 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4288 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4289 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4290 uNewES = pNewTSS16->es;
4291 uNewCS = pNewTSS16->cs;
4292 uNewSS = pNewTSS16->ss;
4293 uNewDS = pNewTSS16->ds;
4294 uNewFS = 0;
4295 uNewGS = 0;
4296 uNewLdt = pNewTSS16->selLdt;
4297 fNewDebugTrap = false;
4298 }
4299
4300 if (GCPtrNewTSS == GCPtrCurTSS)
4301 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4302 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4303
4304 /*
4305 * We're done accessing the new TSS.
4306 */
4307 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4308 if (rcStrict != VINF_SUCCESS)
4309 {
4310 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4311 return rcStrict;
4312 }
4313
4314 /*
4315 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4316 */
4317 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4318 {
4319 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4320 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4321 if (rcStrict != VINF_SUCCESS)
4322 {
4323 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4324 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4325 return rcStrict;
4326 }
4327
4328 /* Check that the descriptor indicates the new TSS is available (not busy). */
4329 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4330 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4331 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4332
4333 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4334 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4335 if (rcStrict != VINF_SUCCESS)
4336 {
4337 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4338 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4339 return rcStrict;
4340 }
4341 }
4342
4343 /*
4344 * From this point on, we're technically in the new task. We will defer exceptions
4345 * until the completion of the task switch but before executing any instructions in the new task.
4346 */
4347 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4348 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4349 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4350 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4351 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4352 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4353 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4354
4355 /* Set the busy bit in TR. */
4356 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4357 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4358 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4359 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4360 {
4361 uNewEflags |= X86_EFL_NT;
4362 }
4363
4364 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4365 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4366 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4367
4368 pVCpu->cpum.GstCtx.eip = uNewEip;
4369 pVCpu->cpum.GstCtx.eax = uNewEax;
4370 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4371 pVCpu->cpum.GstCtx.edx = uNewEdx;
4372 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4373 pVCpu->cpum.GstCtx.esp = uNewEsp;
4374 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4375 pVCpu->cpum.GstCtx.esi = uNewEsi;
4376 pVCpu->cpum.GstCtx.edi = uNewEdi;
4377
4378 uNewEflags &= X86_EFL_LIVE_MASK;
4379 uNewEflags |= X86_EFL_RA1_MASK;
4380 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4381
4382 /*
4383 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4384 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4385 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4386 */
4387 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4388 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4389
4390 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4391 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4392
4393 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4394 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4395
4396 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4397 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4398
4399 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4400 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4401
4402 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4403 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4404 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4405
4406 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4407 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4408 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4409 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4410
4411 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4412 {
4413 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4414 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4415 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4416 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4417 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4418 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4419 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4420 }
4421
4422 /*
4423 * Switch CR3 for the new task.
4424 */
4425 if ( fIsNewTSS386
4426 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4427 {
4428 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4429 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4430 AssertRCSuccessReturn(rc, rc);
4431
4432 /* Inform PGM. */
4433 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4434 AssertRCReturn(rc, rc);
4435 /* ignore informational status codes */
4436
4437 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4438 }
4439
4440 /*
4441 * Switch LDTR for the new task.
4442 */
4443 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4444 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4445 else
4446 {
4447 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4448
4449 IEMSELDESC DescNewLdt;
4450 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4451 if (rcStrict != VINF_SUCCESS)
4452 {
4453 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4454 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4455 return rcStrict;
4456 }
4457 if ( !DescNewLdt.Legacy.Gen.u1Present
4458 || DescNewLdt.Legacy.Gen.u1DescType
4459 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4460 {
4461 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4462 uNewLdt, DescNewLdt.Legacy.u));
4463 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4464 }
4465
4466 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4467 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4468 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4469 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4470 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4471 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4472 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4473 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4474 }
4475
4476 IEMSELDESC DescSS;
4477 if (IEM_IS_V86_MODE(pVCpu))
4478 {
4479 pVCpu->iem.s.uCpl = 3;
4480 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4481 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4482 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4483 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4484 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4485 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4486
4487 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4488 DescSS.Legacy.u = 0;
4489 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4490 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4491 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4492 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4493 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4494 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4495 DescSS.Legacy.Gen.u2Dpl = 3;
4496 }
4497 else
4498 {
4499 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4500
4501 /*
4502 * Load the stack segment for the new task.
4503 */
4504 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4505 {
4506 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4507 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4508 }
4509
4510 /* Fetch the descriptor. */
4511 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4512 if (rcStrict != VINF_SUCCESS)
4513 {
4514 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4515 VBOXSTRICTRC_VAL(rcStrict)));
4516 return rcStrict;
4517 }
4518
4519 /* SS must be a data segment and writable. */
4520 if ( !DescSS.Legacy.Gen.u1DescType
4521 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4522 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4523 {
4524 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4525 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4526 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4527 }
4528
4529 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4530 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4531 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4532 {
4533 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4534 uNewCpl));
4535 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4536 }
4537
4538 /* Is it there? */
4539 if (!DescSS.Legacy.Gen.u1Present)
4540 {
4541 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4542 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4543 }
4544
4545 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4546 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4547
4548 /* Set the accessed bit before committing the result into SS. */
4549 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4550 {
4551 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4552 if (rcStrict != VINF_SUCCESS)
4553 return rcStrict;
4554 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4555 }
4556
4557 /* Commit SS. */
4558 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4559 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4560 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4561 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4562 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4563 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4564 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4565
4566 /* CPL has changed, update IEM before loading rest of segments. */
4567 pVCpu->iem.s.uCpl = uNewCpl;
4568
4569 /*
4570 * Load the data segments for the new task.
4571 */
4572 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4573 if (rcStrict != VINF_SUCCESS)
4574 return rcStrict;
4575 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4576 if (rcStrict != VINF_SUCCESS)
4577 return rcStrict;
4578 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4579 if (rcStrict != VINF_SUCCESS)
4580 return rcStrict;
4581 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4582 if (rcStrict != VINF_SUCCESS)
4583 return rcStrict;
4584
4585 /*
4586 * Load the code segment for the new task.
4587 */
4588 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4589 {
4590 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4591 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4592 }
4593
4594 /* Fetch the descriptor. */
4595 IEMSELDESC DescCS;
4596 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4597 if (rcStrict != VINF_SUCCESS)
4598 {
4599 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4600 return rcStrict;
4601 }
4602
4603 /* CS must be a code segment. */
4604 if ( !DescCS.Legacy.Gen.u1DescType
4605 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4606 {
4607 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4608 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4609 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4610 }
4611
4612 /* For conforming CS, DPL must be less than or equal to the RPL. */
4613 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4614 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4615 {
4616 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4617 DescCS.Legacy.Gen.u2Dpl));
4618 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4619 }
4620
4621 /* For non-conforming CS, DPL must match RPL. */
4622 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4623 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4624 {
4625 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4626 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4627 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4628 }
4629
4630 /* Is it there? */
4631 if (!DescCS.Legacy.Gen.u1Present)
4632 {
4633 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4634 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4635 }
4636
4637 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4638 u64Base = X86DESC_BASE(&DescCS.Legacy);
4639
4640 /* Set the accessed bit before committing the result into CS. */
4641 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4642 {
4643 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4644 if (rcStrict != VINF_SUCCESS)
4645 return rcStrict;
4646 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4647 }
4648
4649 /* Commit CS. */
4650 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4651 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4652 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4653 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4654 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4655 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4656 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4657 }
4658
4659 /** @todo Debug trap. */
4660 if (fIsNewTSS386 && fNewDebugTrap)
4661 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4662
4663 /*
4664 * Construct the error code masks based on what caused this task switch.
4665 * See Intel Instruction reference for INT.
4666 */
4667 uint16_t uExt;
4668 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4669 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4670 {
4671 uExt = 1;
4672 }
4673 else
4674 uExt = 0;
4675
4676 /*
4677 * Push any error code on to the new stack.
4678 */
4679 if (fFlags & IEM_XCPT_FLAGS_ERR)
4680 {
4681 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4682 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4683 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4684
4685 /* Check that there is sufficient space on the stack. */
4686 /** @todo Factor out segment limit checking for normal/expand down segments
4687 * into a separate function. */
4688 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4689 {
4690 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4691 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4692 {
4693 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4694 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4695 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4696 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4697 }
4698 }
4699 else
4700 {
4701 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4702 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4703 {
4704 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4705 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4706 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4707 }
4708 }
4709
4710
4711 if (fIsNewTSS386)
4712 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4713 else
4714 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4715 if (rcStrict != VINF_SUCCESS)
4716 {
4717 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4718 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4719 return rcStrict;
4720 }
4721 }
4722
4723 /* Check the new EIP against the new CS limit. */
4724 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4725 {
4726 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4727 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4728 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4729 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4730 }
4731
4732 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4733 pVCpu->cpum.GstCtx.ss.Sel));
4734 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4735}
4736
4737
4738/**
4739 * Implements exceptions and interrupts for protected mode.
4740 *
4741 * @returns VBox strict status code.
4742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4743 * @param cbInstr The number of bytes to offset rIP by in the return
4744 * address.
4745 * @param u8Vector The interrupt / exception vector number.
4746 * @param fFlags The flags.
4747 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4748 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4749 */
4750IEM_STATIC VBOXSTRICTRC
4751iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4752 uint8_t cbInstr,
4753 uint8_t u8Vector,
4754 uint32_t fFlags,
4755 uint16_t uErr,
4756 uint64_t uCr2)
4757{
4758 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4759
4760 /*
4761 * Read the IDT entry.
4762 */
4763 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4764 {
4765 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4766 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4767 }
4768 X86DESC Idte;
4769 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4770 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4771 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4772 {
4773 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4774 return rcStrict;
4775 }
4776 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4777 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4778 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4779
4780 /*
4781 * Check the descriptor type, DPL and such.
4782 * ASSUMES this is done in the same order as described for call-gate calls.
4783 */
4784 if (Idte.Gate.u1DescType)
4785 {
4786 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4787 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4788 }
4789 bool fTaskGate = false;
4790 uint8_t f32BitGate = true;
4791 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4792 switch (Idte.Gate.u4Type)
4793 {
4794 case X86_SEL_TYPE_SYS_UNDEFINED:
4795 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4796 case X86_SEL_TYPE_SYS_LDT:
4797 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4798 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4799 case X86_SEL_TYPE_SYS_UNDEFINED2:
4800 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4801 case X86_SEL_TYPE_SYS_UNDEFINED3:
4802 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4803 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4804 case X86_SEL_TYPE_SYS_UNDEFINED4:
4805 {
4806 /** @todo check what actually happens when the type is wrong...
4807 * esp. call gates. */
4808 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4809 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4810 }
4811
4812 case X86_SEL_TYPE_SYS_286_INT_GATE:
4813 f32BitGate = false;
4814 RT_FALL_THRU();
4815 case X86_SEL_TYPE_SYS_386_INT_GATE:
4816 fEflToClear |= X86_EFL_IF;
4817 break;
4818
4819 case X86_SEL_TYPE_SYS_TASK_GATE:
4820 fTaskGate = true;
4821#ifndef IEM_IMPLEMENTS_TASKSWITCH
4822 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4823#endif
4824 break;
4825
4826 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4827 f32BitGate = false;
4828 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4829 break;
4830
4831 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4832 }
4833
4834 /* Check DPL against CPL if applicable. */
4835 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4836 {
4837 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4838 {
4839 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4840 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4841 }
4842 }
4843
4844 /* Is it there? */
4845 if (!Idte.Gate.u1Present)
4846 {
4847 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4848 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4849 }
4850
4851 /* Is it a task-gate? */
4852 if (fTaskGate)
4853 {
4854 /*
4855 * Construct the error code masks based on what caused this task switch.
4856 * See Intel Instruction reference for INT.
4857 */
4858 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4859 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4860 RTSEL SelTSS = Idte.Gate.u16Sel;
4861
4862 /*
4863 * Fetch the TSS descriptor in the GDT.
4864 */
4865 IEMSELDESC DescTSS;
4866 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4867 if (rcStrict != VINF_SUCCESS)
4868 {
4869 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4870 VBOXSTRICTRC_VAL(rcStrict)));
4871 return rcStrict;
4872 }
4873
4874 /* The TSS descriptor must be a system segment and be available (not busy). */
4875 if ( DescTSS.Legacy.Gen.u1DescType
4876 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4877 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4878 {
4879 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4880 u8Vector, SelTSS, DescTSS.Legacy.au64));
4881 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4882 }
4883
4884 /* The TSS must be present. */
4885 if (!DescTSS.Legacy.Gen.u1Present)
4886 {
4887 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4888 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4889 }
4890
4891 /* Do the actual task switch. */
4892 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4893 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4894 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4895 }
4896
4897 /* A null CS is bad. */
4898 RTSEL NewCS = Idte.Gate.u16Sel;
4899 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4900 {
4901 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4902 return iemRaiseGeneralProtectionFault0(pVCpu);
4903 }
4904
4905 /* Fetch the descriptor for the new CS. */
4906 IEMSELDESC DescCS;
4907 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4908 if (rcStrict != VINF_SUCCESS)
4909 {
4910 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4911 return rcStrict;
4912 }
4913
4914 /* Must be a code segment. */
4915 if (!DescCS.Legacy.Gen.u1DescType)
4916 {
4917 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4918 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4919 }
4920 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4921 {
4922 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4923 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4924 }
4925
4926 /* Don't allow lowering the privilege level. */
4927 /** @todo Does the lowering of privileges apply to software interrupts
4928 * only? This has bearings on the more-privileged or
4929 * same-privilege stack behavior further down. A testcase would
4930 * be nice. */
4931 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4932 {
4933 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4934 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4935 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4936 }
4937
4938 /* Make sure the selector is present. */
4939 if (!DescCS.Legacy.Gen.u1Present)
4940 {
4941 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4942 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4943 }
4944
4945 /* Check the new EIP against the new CS limit. */
4946 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4947 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4948 ? Idte.Gate.u16OffsetLow
4949 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4950 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4951 if (uNewEip > cbLimitCS)
4952 {
4953 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4954 u8Vector, uNewEip, cbLimitCS, NewCS));
4955 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4956 }
4957 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4958
4959 /* Calc the flag image to push. */
4960 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4961 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4962 fEfl &= ~X86_EFL_RF;
4963 else
4964 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4965
4966 /* From V8086 mode only go to CPL 0. */
4967 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4968 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4969 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4970 {
4971 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4972 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4973 }
4974
4975 /*
4976 * If the privilege level changes, we need to get a new stack from the TSS.
4977 * This in turns means validating the new SS and ESP...
4978 */
4979 if (uNewCpl != pVCpu->iem.s.uCpl)
4980 {
4981 RTSEL NewSS;
4982 uint32_t uNewEsp;
4983 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4984 if (rcStrict != VINF_SUCCESS)
4985 return rcStrict;
4986
4987 IEMSELDESC DescSS;
4988 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4989 if (rcStrict != VINF_SUCCESS)
4990 return rcStrict;
4991 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4992 if (!DescSS.Legacy.Gen.u1DefBig)
4993 {
4994 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4995 uNewEsp = (uint16_t)uNewEsp;
4996 }
4997
4998 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4999
5000 /* Check that there is sufficient space for the stack frame. */
5001 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5002 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5003 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5004 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5005
5006 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5007 {
5008 if ( uNewEsp - 1 > cbLimitSS
5009 || uNewEsp < cbStackFrame)
5010 {
5011 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5012 u8Vector, NewSS, uNewEsp, cbStackFrame));
5013 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5014 }
5015 }
5016 else
5017 {
5018 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5019 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5020 {
5021 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5022 u8Vector, NewSS, uNewEsp, cbStackFrame));
5023 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5024 }
5025 }
5026
5027 /*
5028 * Start making changes.
5029 */
5030
5031 /* Set the new CPL so that stack accesses use it. */
5032 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5033 pVCpu->iem.s.uCpl = uNewCpl;
5034
5035 /* Create the stack frame. */
5036 RTPTRUNION uStackFrame;
5037 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5038 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5039 if (rcStrict != VINF_SUCCESS)
5040 return rcStrict;
5041 void * const pvStackFrame = uStackFrame.pv;
5042 if (f32BitGate)
5043 {
5044 if (fFlags & IEM_XCPT_FLAGS_ERR)
5045 *uStackFrame.pu32++ = uErr;
5046 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5047 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5048 uStackFrame.pu32[2] = fEfl;
5049 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5050 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5051 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5052 if (fEfl & X86_EFL_VM)
5053 {
5054 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5055 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5056 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5057 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5058 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5059 }
5060 }
5061 else
5062 {
5063 if (fFlags & IEM_XCPT_FLAGS_ERR)
5064 *uStackFrame.pu16++ = uErr;
5065 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5066 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5067 uStackFrame.pu16[2] = fEfl;
5068 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5069 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5070 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5071 if (fEfl & X86_EFL_VM)
5072 {
5073 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5074 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5075 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5076 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5077 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5078 }
5079 }
5080 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5081 if (rcStrict != VINF_SUCCESS)
5082 return rcStrict;
5083
5084 /* Mark the selectors 'accessed' (hope this is the correct time). */
5085 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5086 * after pushing the stack frame? (Write protect the gdt + stack to
5087 * find out.) */
5088 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5089 {
5090 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5091 if (rcStrict != VINF_SUCCESS)
5092 return rcStrict;
5093 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5094 }
5095
5096 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5097 {
5098 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5099 if (rcStrict != VINF_SUCCESS)
5100 return rcStrict;
5101 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5102 }
5103
5104 /*
5105 * Start comitting the register changes (joins with the DPL=CPL branch).
5106 */
5107 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5108 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5109 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5110 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5111 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5112 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5113 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5114 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5115 * SP is loaded).
5116 * Need to check the other combinations too:
5117 * - 16-bit TSS, 32-bit handler
5118 * - 32-bit TSS, 16-bit handler */
5119 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5120 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5121 else
5122 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5123
5124 if (fEfl & X86_EFL_VM)
5125 {
5126 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5127 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5128 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5129 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5130 }
5131 }
5132 /*
5133 * Same privilege, no stack change and smaller stack frame.
5134 */
5135 else
5136 {
5137 uint64_t uNewRsp;
5138 RTPTRUNION uStackFrame;
5139 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5140 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5141 if (rcStrict != VINF_SUCCESS)
5142 return rcStrict;
5143 void * const pvStackFrame = uStackFrame.pv;
5144
5145 if (f32BitGate)
5146 {
5147 if (fFlags & IEM_XCPT_FLAGS_ERR)
5148 *uStackFrame.pu32++ = uErr;
5149 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5150 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5151 uStackFrame.pu32[2] = fEfl;
5152 }
5153 else
5154 {
5155 if (fFlags & IEM_XCPT_FLAGS_ERR)
5156 *uStackFrame.pu16++ = uErr;
5157 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5158 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5159 uStackFrame.pu16[2] = fEfl;
5160 }
5161 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5162 if (rcStrict != VINF_SUCCESS)
5163 return rcStrict;
5164
5165 /* Mark the CS selector as 'accessed'. */
5166 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5167 {
5168 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5169 if (rcStrict != VINF_SUCCESS)
5170 return rcStrict;
5171 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5172 }
5173
5174 /*
5175 * Start committing the register changes (joins with the other branch).
5176 */
5177 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5178 }
5179
5180 /* ... register committing continues. */
5181 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5182 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5183 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5184 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5185 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5186 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5187
5188 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5189 fEfl &= ~fEflToClear;
5190 IEMMISC_SET_EFL(pVCpu, fEfl);
5191
5192 if (fFlags & IEM_XCPT_FLAGS_CR2)
5193 pVCpu->cpum.GstCtx.cr2 = uCr2;
5194
5195 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5196 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5197
5198 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5199}
5200
5201
5202/**
5203 * Implements exceptions and interrupts for long mode.
5204 *
5205 * @returns VBox strict status code.
5206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5207 * @param cbInstr The number of bytes to offset rIP by in the return
5208 * address.
5209 * @param u8Vector The interrupt / exception vector number.
5210 * @param fFlags The flags.
5211 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5212 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5213 */
5214IEM_STATIC VBOXSTRICTRC
5215iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5216 uint8_t cbInstr,
5217 uint8_t u8Vector,
5218 uint32_t fFlags,
5219 uint16_t uErr,
5220 uint64_t uCr2)
5221{
5222 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5223
5224 /*
5225 * Read the IDT entry.
5226 */
5227 uint16_t offIdt = (uint16_t)u8Vector << 4;
5228 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5229 {
5230 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5231 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5232 }
5233 X86DESC64 Idte;
5234 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5235 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5236 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5237 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5238 {
5239 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5240 return rcStrict;
5241 }
5242 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5243 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5244 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5245
5246 /*
5247 * Check the descriptor type, DPL and such.
5248 * ASSUMES this is done in the same order as described for call-gate calls.
5249 */
5250 if (Idte.Gate.u1DescType)
5251 {
5252 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5253 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5254 }
5255 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5256 switch (Idte.Gate.u4Type)
5257 {
5258 case AMD64_SEL_TYPE_SYS_INT_GATE:
5259 fEflToClear |= X86_EFL_IF;
5260 break;
5261 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5262 break;
5263
5264 default:
5265 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5266 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5267 }
5268
5269 /* Check DPL against CPL if applicable. */
5270 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5271 {
5272 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5273 {
5274 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5275 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5276 }
5277 }
5278
5279 /* Is it there? */
5280 if (!Idte.Gate.u1Present)
5281 {
5282 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5283 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5284 }
5285
5286 /* A null CS is bad. */
5287 RTSEL NewCS = Idte.Gate.u16Sel;
5288 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5289 {
5290 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5291 return iemRaiseGeneralProtectionFault0(pVCpu);
5292 }
5293
5294 /* Fetch the descriptor for the new CS. */
5295 IEMSELDESC DescCS;
5296 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5297 if (rcStrict != VINF_SUCCESS)
5298 {
5299 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5300 return rcStrict;
5301 }
5302
5303 /* Must be a 64-bit code segment. */
5304 if (!DescCS.Long.Gen.u1DescType)
5305 {
5306 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5307 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5308 }
5309 if ( !DescCS.Long.Gen.u1Long
5310 || DescCS.Long.Gen.u1DefBig
5311 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5312 {
5313 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5314 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5315 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5316 }
5317
5318 /* Don't allow lowering the privilege level. For non-conforming CS
5319 selectors, the CS.DPL sets the privilege level the trap/interrupt
5320 handler runs at. For conforming CS selectors, the CPL remains
5321 unchanged, but the CS.DPL must be <= CPL. */
5322 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5323 * when CPU in Ring-0. Result \#GP? */
5324 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5325 {
5326 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5327 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5328 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5329 }
5330
5331
5332 /* Make sure the selector is present. */
5333 if (!DescCS.Legacy.Gen.u1Present)
5334 {
5335 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5336 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5337 }
5338
5339 /* Check that the new RIP is canonical. */
5340 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5341 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5342 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5343 if (!IEM_IS_CANONICAL(uNewRip))
5344 {
5345 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5346 return iemRaiseGeneralProtectionFault0(pVCpu);
5347 }
5348
5349 /*
5350 * If the privilege level changes or if the IST isn't zero, we need to get
5351 * a new stack from the TSS.
5352 */
5353 uint64_t uNewRsp;
5354 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5355 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5356 if ( uNewCpl != pVCpu->iem.s.uCpl
5357 || Idte.Gate.u3IST != 0)
5358 {
5359 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5360 if (rcStrict != VINF_SUCCESS)
5361 return rcStrict;
5362 }
5363 else
5364 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5365 uNewRsp &= ~(uint64_t)0xf;
5366
5367 /*
5368 * Calc the flag image to push.
5369 */
5370 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5371 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5372 fEfl &= ~X86_EFL_RF;
5373 else
5374 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5375
5376 /*
5377 * Start making changes.
5378 */
5379 /* Set the new CPL so that stack accesses use it. */
5380 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5381 pVCpu->iem.s.uCpl = uNewCpl;
5382
5383 /* Create the stack frame. */
5384 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5385 RTPTRUNION uStackFrame;
5386 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5387 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5388 if (rcStrict != VINF_SUCCESS)
5389 return rcStrict;
5390 void * const pvStackFrame = uStackFrame.pv;
5391
5392 if (fFlags & IEM_XCPT_FLAGS_ERR)
5393 *uStackFrame.pu64++ = uErr;
5394 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5395 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5396 uStackFrame.pu64[2] = fEfl;
5397 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5398 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5399 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5400 if (rcStrict != VINF_SUCCESS)
5401 return rcStrict;
5402
5403 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5404 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5405 * after pushing the stack frame? (Write protect the gdt + stack to
5406 * find out.) */
5407 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5408 {
5409 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5410 if (rcStrict != VINF_SUCCESS)
5411 return rcStrict;
5412 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5413 }
5414
5415 /*
5416 * Start comitting the register changes.
5417 */
5418 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5419 * hidden registers when interrupting 32-bit or 16-bit code! */
5420 if (uNewCpl != uOldCpl)
5421 {
5422 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5423 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5424 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5425 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5426 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5427 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5428 }
5429 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5430 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5431 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5432 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5433 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5434 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5435 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5436 pVCpu->cpum.GstCtx.rip = uNewRip;
5437
5438 fEfl &= ~fEflToClear;
5439 IEMMISC_SET_EFL(pVCpu, fEfl);
5440
5441 if (fFlags & IEM_XCPT_FLAGS_CR2)
5442 pVCpu->cpum.GstCtx.cr2 = uCr2;
5443
5444 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5445 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5446
5447 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5448}
5449
5450
5451/**
5452 * Implements exceptions and interrupts.
5453 *
5454 * All exceptions and interrupts goes thru this function!
5455 *
5456 * @returns VBox strict status code.
5457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5458 * @param cbInstr The number of bytes to offset rIP by in the return
5459 * address.
5460 * @param u8Vector The interrupt / exception vector number.
5461 * @param fFlags The flags.
5462 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5463 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5464 */
5465DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5466iemRaiseXcptOrInt(PVMCPU pVCpu,
5467 uint8_t cbInstr,
5468 uint8_t u8Vector,
5469 uint32_t fFlags,
5470 uint16_t uErr,
5471 uint64_t uCr2)
5472{
5473 /*
5474 * Get all the state that we might need here.
5475 */
5476 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5477 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5478
5479#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5480 /*
5481 * Flush prefetch buffer
5482 */
5483 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5484#endif
5485
5486 /*
5487 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5488 */
5489 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5490 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5491 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5492 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5493 {
5494 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5495 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5496 u8Vector = X86_XCPT_GP;
5497 uErr = 0;
5498 }
5499#ifdef DBGFTRACE_ENABLED
5500 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5501 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5502 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5503#endif
5504
5505#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5506 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5507 {
5508 /*
5509 * If the event is being injected as part of VMRUN, it isn't subject to event
5510 * intercepts in the nested-guest. However, secondary exceptions that occur
5511 * during injection of any event -are- subject to exception intercepts.
5512 * See AMD spec. 15.20 "Event Injection".
5513 */
5514 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5515 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = 1;
5516 else
5517 {
5518 /*
5519 * Check and handle if the event being raised is intercepted.
5520 */
5521 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5522 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5523 return rcStrict0;
5524 }
5525 }
5526#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5527
5528 /*
5529 * Do recursion accounting.
5530 */
5531 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5532 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5533 if (pVCpu->iem.s.cXcptRecursions == 0)
5534 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5535 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5536 else
5537 {
5538 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5539 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5540 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5541
5542 if (pVCpu->iem.s.cXcptRecursions >= 4)
5543 {
5544#ifdef DEBUG_bird
5545 AssertFailed();
5546#endif
5547 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5548 }
5549
5550 /*
5551 * Evaluate the sequence of recurring events.
5552 */
5553 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5554 NULL /* pXcptRaiseInfo */);
5555 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5556 { /* likely */ }
5557 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5558 {
5559 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5560 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5561 u8Vector = X86_XCPT_DF;
5562 uErr = 0;
5563 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5564 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5565 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5566 }
5567 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5568 {
5569 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5570 return iemInitiateCpuShutdown(pVCpu);
5571 }
5572 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5573 {
5574 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5575 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5576 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5577 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5578 return VERR_EM_GUEST_CPU_HANG;
5579 }
5580 else
5581 {
5582 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5583 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5584 return VERR_IEM_IPE_9;
5585 }
5586
5587 /*
5588 * The 'EXT' bit is set when an exception occurs during deliver of an external
5589 * event (such as an interrupt or earlier exception)[1]. Privileged software
5590 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5591 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5592 *
5593 * [1] - Intel spec. 6.13 "Error Code"
5594 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5595 * [3] - Intel Instruction reference for INT n.
5596 */
5597 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5598 && (fFlags & IEM_XCPT_FLAGS_ERR)
5599 && u8Vector != X86_XCPT_PF
5600 && u8Vector != X86_XCPT_DF)
5601 {
5602 uErr |= X86_TRAP_ERR_EXTERNAL;
5603 }
5604 }
5605
5606 pVCpu->iem.s.cXcptRecursions++;
5607 pVCpu->iem.s.uCurXcpt = u8Vector;
5608 pVCpu->iem.s.fCurXcpt = fFlags;
5609 pVCpu->iem.s.uCurXcptErr = uErr;
5610 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5611
5612 /*
5613 * Extensive logging.
5614 */
5615#if defined(LOG_ENABLED) && defined(IN_RING3)
5616 if (LogIs3Enabled())
5617 {
5618 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5619 PVM pVM = pVCpu->CTX_SUFF(pVM);
5620 char szRegs[4096];
5621 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5622 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5623 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5624 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5625 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5626 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5627 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5628 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5629 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5630 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5631 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5632 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5633 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5634 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5635 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5636 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5637 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5638 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5639 " efer=%016VR{efer}\n"
5640 " pat=%016VR{pat}\n"
5641 " sf_mask=%016VR{sf_mask}\n"
5642 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5643 " lstar=%016VR{lstar}\n"
5644 " star=%016VR{star} cstar=%016VR{cstar}\n"
5645 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5646 );
5647
5648 char szInstr[256];
5649 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5650 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5651 szInstr, sizeof(szInstr), NULL);
5652 Log3(("%s%s\n", szRegs, szInstr));
5653 }
5654#endif /* LOG_ENABLED */
5655
5656 /*
5657 * Call the mode specific worker function.
5658 */
5659 VBOXSTRICTRC rcStrict;
5660 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5661 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5662 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5663 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5664 else
5665 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5666
5667 /* Flush the prefetch buffer. */
5668#ifdef IEM_WITH_CODE_TLB
5669 pVCpu->iem.s.pbInstrBuf = NULL;
5670#else
5671 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5672#endif
5673
5674 /*
5675 * Unwind.
5676 */
5677 pVCpu->iem.s.cXcptRecursions--;
5678 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5679 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5680 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5681 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5682 pVCpu->iem.s.cXcptRecursions + 1));
5683 return rcStrict;
5684}
5685
5686#ifdef IEM_WITH_SETJMP
5687/**
5688 * See iemRaiseXcptOrInt. Will not return.
5689 */
5690IEM_STATIC DECL_NO_RETURN(void)
5691iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5692 uint8_t cbInstr,
5693 uint8_t u8Vector,
5694 uint32_t fFlags,
5695 uint16_t uErr,
5696 uint64_t uCr2)
5697{
5698 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5699 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5700}
5701#endif
5702
5703
5704/** \#DE - 00. */
5705DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5706{
5707 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5708}
5709
5710
5711/** \#DB - 01.
5712 * @note This automatically clear DR7.GD. */
5713DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5714{
5715 /** @todo set/clear RF. */
5716 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5717 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5718}
5719
5720
5721/** \#BR - 05. */
5722DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5723{
5724 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5725}
5726
5727
5728/** \#UD - 06. */
5729DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5730{
5731 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5732}
5733
5734
5735/** \#NM - 07. */
5736DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5737{
5738 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5739}
5740
5741
5742/** \#TS(err) - 0a. */
5743DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5744{
5745 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5746}
5747
5748
5749/** \#TS(tr) - 0a. */
5750DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5751{
5752 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5753 pVCpu->cpum.GstCtx.tr.Sel, 0);
5754}
5755
5756
5757/** \#TS(0) - 0a. */
5758DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5759{
5760 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5761 0, 0);
5762}
5763
5764
5765/** \#TS(err) - 0a. */
5766DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5767{
5768 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5769 uSel & X86_SEL_MASK_OFF_RPL, 0);
5770}
5771
5772
5773/** \#NP(err) - 0b. */
5774DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5775{
5776 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5777}
5778
5779
5780/** \#NP(sel) - 0b. */
5781DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5782{
5783 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5784 uSel & ~X86_SEL_RPL, 0);
5785}
5786
5787
5788/** \#SS(seg) - 0c. */
5789DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5790{
5791 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5792 uSel & ~X86_SEL_RPL, 0);
5793}
5794
5795
5796/** \#SS(err) - 0c. */
5797DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5798{
5799 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5800}
5801
5802
5803/** \#GP(n) - 0d. */
5804DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5805{
5806 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5807}
5808
5809
5810/** \#GP(0) - 0d. */
5811DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5812{
5813 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5814}
5815
5816#ifdef IEM_WITH_SETJMP
5817/** \#GP(0) - 0d. */
5818DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5819{
5820 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5821}
5822#endif
5823
5824
5825/** \#GP(sel) - 0d. */
5826DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5827{
5828 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5829 Sel & ~X86_SEL_RPL, 0);
5830}
5831
5832
5833/** \#GP(0) - 0d. */
5834DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5835{
5836 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5837}
5838
5839
5840/** \#GP(sel) - 0d. */
5841DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5842{
5843 NOREF(iSegReg); NOREF(fAccess);
5844 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5845 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5846}
5847
5848#ifdef IEM_WITH_SETJMP
5849/** \#GP(sel) - 0d, longjmp. */
5850DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5851{
5852 NOREF(iSegReg); NOREF(fAccess);
5853 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5854 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5855}
5856#endif
5857
5858/** \#GP(sel) - 0d. */
5859DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5860{
5861 NOREF(Sel);
5862 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5863}
5864
5865#ifdef IEM_WITH_SETJMP
5866/** \#GP(sel) - 0d, longjmp. */
5867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5868{
5869 NOREF(Sel);
5870 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5871}
5872#endif
5873
5874
5875/** \#GP(sel) - 0d. */
5876DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5877{
5878 NOREF(iSegReg); NOREF(fAccess);
5879 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5880}
5881
5882#ifdef IEM_WITH_SETJMP
5883/** \#GP(sel) - 0d, longjmp. */
5884DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5885 uint32_t fAccess)
5886{
5887 NOREF(iSegReg); NOREF(fAccess);
5888 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5889}
5890#endif
5891
5892
5893/** \#PF(n) - 0e. */
5894DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5895{
5896 uint16_t uErr;
5897 switch (rc)
5898 {
5899 case VERR_PAGE_NOT_PRESENT:
5900 case VERR_PAGE_TABLE_NOT_PRESENT:
5901 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5902 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5903 uErr = 0;
5904 break;
5905
5906 default:
5907 AssertMsgFailed(("%Rrc\n", rc));
5908 RT_FALL_THRU();
5909 case VERR_ACCESS_DENIED:
5910 uErr = X86_TRAP_PF_P;
5911 break;
5912
5913 /** @todo reserved */
5914 }
5915
5916 if (pVCpu->iem.s.uCpl == 3)
5917 uErr |= X86_TRAP_PF_US;
5918
5919 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5920 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5921 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5922 uErr |= X86_TRAP_PF_ID;
5923
5924#if 0 /* This is so much non-sense, really. Why was it done like that? */
5925 /* Note! RW access callers reporting a WRITE protection fault, will clear
5926 the READ flag before calling. So, read-modify-write accesses (RW)
5927 can safely be reported as READ faults. */
5928 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5929 uErr |= X86_TRAP_PF_RW;
5930#else
5931 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5932 {
5933 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5934 uErr |= X86_TRAP_PF_RW;
5935 }
5936#endif
5937
5938 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5939 uErr, GCPtrWhere);
5940}
5941
5942#ifdef IEM_WITH_SETJMP
5943/** \#PF(n) - 0e, longjmp. */
5944IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5945{
5946 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5947}
5948#endif
5949
5950
5951/** \#MF(0) - 10. */
5952DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5953{
5954 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5955}
5956
5957
5958/** \#AC(0) - 11. */
5959DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5960{
5961 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5962}
5963
5964
5965/**
5966 * Macro for calling iemCImplRaiseDivideError().
5967 *
5968 * This enables us to add/remove arguments and force different levels of
5969 * inlining as we wish.
5970 *
5971 * @return Strict VBox status code.
5972 */
5973#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5974IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5975{
5976 NOREF(cbInstr);
5977 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5978}
5979
5980
5981/**
5982 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5983 *
5984 * This enables us to add/remove arguments and force different levels of
5985 * inlining as we wish.
5986 *
5987 * @return Strict VBox status code.
5988 */
5989#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5990IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5991{
5992 NOREF(cbInstr);
5993 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5994}
5995
5996
5997/**
5998 * Macro for calling iemCImplRaiseInvalidOpcode().
5999 *
6000 * This enables us to add/remove arguments and force different levels of
6001 * inlining as we wish.
6002 *
6003 * @return Strict VBox status code.
6004 */
6005#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6006IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6007{
6008 NOREF(cbInstr);
6009 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6010}
6011
6012
6013/** @} */
6014
6015
6016/*
6017 *
6018 * Helpers routines.
6019 * Helpers routines.
6020 * Helpers routines.
6021 *
6022 */
6023
6024/**
6025 * Recalculates the effective operand size.
6026 *
6027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6028 */
6029IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6030{
6031 switch (pVCpu->iem.s.enmCpuMode)
6032 {
6033 case IEMMODE_16BIT:
6034 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6035 break;
6036 case IEMMODE_32BIT:
6037 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6038 break;
6039 case IEMMODE_64BIT:
6040 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6041 {
6042 case 0:
6043 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6044 break;
6045 case IEM_OP_PRF_SIZE_OP:
6046 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6047 break;
6048 case IEM_OP_PRF_SIZE_REX_W:
6049 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6050 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6051 break;
6052 }
6053 break;
6054 default:
6055 AssertFailed();
6056 }
6057}
6058
6059
6060/**
6061 * Sets the default operand size to 64-bit and recalculates the effective
6062 * operand size.
6063 *
6064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6065 */
6066IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6067{
6068 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6069 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6070 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6071 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6072 else
6073 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6074}
6075
6076
6077/*
6078 *
6079 * Common opcode decoders.
6080 * Common opcode decoders.
6081 * Common opcode decoders.
6082 *
6083 */
6084//#include <iprt/mem.h>
6085
6086/**
6087 * Used to add extra details about a stub case.
6088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6089 */
6090IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6091{
6092#if defined(LOG_ENABLED) && defined(IN_RING3)
6093 PVM pVM = pVCpu->CTX_SUFF(pVM);
6094 char szRegs[4096];
6095 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6096 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6097 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6098 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6099 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6100 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6101 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6102 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6103 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6104 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6105 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6106 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6107 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6108 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6109 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6110 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6111 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6112 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6113 " efer=%016VR{efer}\n"
6114 " pat=%016VR{pat}\n"
6115 " sf_mask=%016VR{sf_mask}\n"
6116 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6117 " lstar=%016VR{lstar}\n"
6118 " star=%016VR{star} cstar=%016VR{cstar}\n"
6119 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6120 );
6121
6122 char szInstr[256];
6123 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6124 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6125 szInstr, sizeof(szInstr), NULL);
6126
6127 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6128#else
6129 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6130#endif
6131}
6132
6133/**
6134 * Complains about a stub.
6135 *
6136 * Providing two versions of this macro, one for daily use and one for use when
6137 * working on IEM.
6138 */
6139#if 0
6140# define IEMOP_BITCH_ABOUT_STUB() \
6141 do { \
6142 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6143 iemOpStubMsg2(pVCpu); \
6144 RTAssertPanic(); \
6145 } while (0)
6146#else
6147# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6148#endif
6149
6150/** Stubs an opcode. */
6151#define FNIEMOP_STUB(a_Name) \
6152 FNIEMOP_DEF(a_Name) \
6153 { \
6154 RT_NOREF_PV(pVCpu); \
6155 IEMOP_BITCH_ABOUT_STUB(); \
6156 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6157 } \
6158 typedef int ignore_semicolon
6159
6160/** Stubs an opcode. */
6161#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6162 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6163 { \
6164 RT_NOREF_PV(pVCpu); \
6165 RT_NOREF_PV(a_Name0); \
6166 IEMOP_BITCH_ABOUT_STUB(); \
6167 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6168 } \
6169 typedef int ignore_semicolon
6170
6171/** Stubs an opcode which currently should raise \#UD. */
6172#define FNIEMOP_UD_STUB(a_Name) \
6173 FNIEMOP_DEF(a_Name) \
6174 { \
6175 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6176 return IEMOP_RAISE_INVALID_OPCODE(); \
6177 } \
6178 typedef int ignore_semicolon
6179
6180/** Stubs an opcode which currently should raise \#UD. */
6181#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6182 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6183 { \
6184 RT_NOREF_PV(pVCpu); \
6185 RT_NOREF_PV(a_Name0); \
6186 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6187 return IEMOP_RAISE_INVALID_OPCODE(); \
6188 } \
6189 typedef int ignore_semicolon
6190
6191
6192
6193/** @name Register Access.
6194 * @{
6195 */
6196
6197/**
6198 * Gets a reference (pointer) to the specified hidden segment register.
6199 *
6200 * @returns Hidden register reference.
6201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6202 * @param iSegReg The segment register.
6203 */
6204IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6205{
6206 Assert(iSegReg < X86_SREG_COUNT);
6207 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6208 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6209
6210#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6211 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6212 { /* likely */ }
6213 else
6214 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6215#else
6216 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6217#endif
6218 return pSReg;
6219}
6220
6221
6222/**
6223 * Ensures that the given hidden segment register is up to date.
6224 *
6225 * @returns Hidden register reference.
6226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6227 * @param pSReg The segment register.
6228 */
6229IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6230{
6231#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6232 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6233 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6234#else
6235 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6236 NOREF(pVCpu);
6237#endif
6238 return pSReg;
6239}
6240
6241
6242/**
6243 * Gets a reference (pointer) to the specified segment register (the selector
6244 * value).
6245 *
6246 * @returns Pointer to the selector variable.
6247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6248 * @param iSegReg The segment register.
6249 */
6250DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6251{
6252 Assert(iSegReg < X86_SREG_COUNT);
6253 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6254 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6255}
6256
6257
6258/**
6259 * Fetches the selector value of a segment register.
6260 *
6261 * @returns The selector value.
6262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6263 * @param iSegReg The segment register.
6264 */
6265DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6266{
6267 Assert(iSegReg < X86_SREG_COUNT);
6268 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6269 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6270}
6271
6272
6273/**
6274 * Fetches the base address value of a segment register.
6275 *
6276 * @returns The selector value.
6277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6278 * @param iSegReg The segment register.
6279 */
6280DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6281{
6282 Assert(iSegReg < X86_SREG_COUNT);
6283 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6284 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6285}
6286
6287
6288/**
6289 * Gets a reference (pointer) to the specified general purpose register.
6290 *
6291 * @returns Register reference.
6292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6293 * @param iReg The general purpose register.
6294 */
6295DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6296{
6297 Assert(iReg < 16);
6298 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6299}
6300
6301
6302/**
6303 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6304 *
6305 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6306 *
6307 * @returns Register reference.
6308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6309 * @param iReg The register.
6310 */
6311DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6312{
6313 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6314 {
6315 Assert(iReg < 16);
6316 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6317 }
6318 /* high 8-bit register. */
6319 Assert(iReg < 8);
6320 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6321}
6322
6323
6324/**
6325 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6326 *
6327 * @returns Register reference.
6328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6329 * @param iReg The register.
6330 */
6331DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6332{
6333 Assert(iReg < 16);
6334 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6335}
6336
6337
6338/**
6339 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6340 *
6341 * @returns Register reference.
6342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6343 * @param iReg The register.
6344 */
6345DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6346{
6347 Assert(iReg < 16);
6348 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6349}
6350
6351
6352/**
6353 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6354 *
6355 * @returns Register reference.
6356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6357 * @param iReg The register.
6358 */
6359DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6360{
6361 Assert(iReg < 64);
6362 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6363}
6364
6365
6366/**
6367 * Gets a reference (pointer) to the specified segment register's base address.
6368 *
6369 * @returns Segment register base address reference.
6370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6371 * @param iSegReg The segment selector.
6372 */
6373DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6374{
6375 Assert(iSegReg < X86_SREG_COUNT);
6376 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6377 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6378}
6379
6380
6381/**
6382 * Fetches the value of a 8-bit general purpose register.
6383 *
6384 * @returns The register value.
6385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6386 * @param iReg The register.
6387 */
6388DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6389{
6390 return *iemGRegRefU8(pVCpu, iReg);
6391}
6392
6393
6394/**
6395 * Fetches the value of a 16-bit general purpose register.
6396 *
6397 * @returns The register value.
6398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6399 * @param iReg The register.
6400 */
6401DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6402{
6403 Assert(iReg < 16);
6404 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6405}
6406
6407
6408/**
6409 * Fetches the value of a 32-bit general purpose register.
6410 *
6411 * @returns The register value.
6412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6413 * @param iReg The register.
6414 */
6415DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6416{
6417 Assert(iReg < 16);
6418 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6419}
6420
6421
6422/**
6423 * Fetches the value of a 64-bit general purpose register.
6424 *
6425 * @returns The register value.
6426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6427 * @param iReg The register.
6428 */
6429DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6430{
6431 Assert(iReg < 16);
6432 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6433}
6434
6435
6436/**
6437 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6438 *
6439 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6440 * segment limit.
6441 *
6442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6443 * @param offNextInstr The offset of the next instruction.
6444 */
6445IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6446{
6447 switch (pVCpu->iem.s.enmEffOpSize)
6448 {
6449 case IEMMODE_16BIT:
6450 {
6451 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6452 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6453 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6454 return iemRaiseGeneralProtectionFault0(pVCpu);
6455 pVCpu->cpum.GstCtx.rip = uNewIp;
6456 break;
6457 }
6458
6459 case IEMMODE_32BIT:
6460 {
6461 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6462 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6463
6464 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6465 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6466 return iemRaiseGeneralProtectionFault0(pVCpu);
6467 pVCpu->cpum.GstCtx.rip = uNewEip;
6468 break;
6469 }
6470
6471 case IEMMODE_64BIT:
6472 {
6473 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6474
6475 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6476 if (!IEM_IS_CANONICAL(uNewRip))
6477 return iemRaiseGeneralProtectionFault0(pVCpu);
6478 pVCpu->cpum.GstCtx.rip = uNewRip;
6479 break;
6480 }
6481
6482 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6483 }
6484
6485 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6486
6487#ifndef IEM_WITH_CODE_TLB
6488 /* Flush the prefetch buffer. */
6489 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6490#endif
6491
6492 return VINF_SUCCESS;
6493}
6494
6495
6496/**
6497 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6498 *
6499 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6500 * segment limit.
6501 *
6502 * @returns Strict VBox status code.
6503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6504 * @param offNextInstr The offset of the next instruction.
6505 */
6506IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6507{
6508 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6509
6510 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6511 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6512 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6513 return iemRaiseGeneralProtectionFault0(pVCpu);
6514 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6515 pVCpu->cpum.GstCtx.rip = uNewIp;
6516 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6517
6518#ifndef IEM_WITH_CODE_TLB
6519 /* Flush the prefetch buffer. */
6520 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6521#endif
6522
6523 return VINF_SUCCESS;
6524}
6525
6526
6527/**
6528 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6529 *
6530 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6531 * segment limit.
6532 *
6533 * @returns Strict VBox status code.
6534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6535 * @param offNextInstr The offset of the next instruction.
6536 */
6537IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6538{
6539 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6540
6541 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6542 {
6543 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6544
6545 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6546 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6547 return iemRaiseGeneralProtectionFault0(pVCpu);
6548 pVCpu->cpum.GstCtx.rip = uNewEip;
6549 }
6550 else
6551 {
6552 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6553
6554 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6555 if (!IEM_IS_CANONICAL(uNewRip))
6556 return iemRaiseGeneralProtectionFault0(pVCpu);
6557 pVCpu->cpum.GstCtx.rip = uNewRip;
6558 }
6559 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6560
6561#ifndef IEM_WITH_CODE_TLB
6562 /* Flush the prefetch buffer. */
6563 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6564#endif
6565
6566 return VINF_SUCCESS;
6567}
6568
6569
6570/**
6571 * Performs a near jump to the specified address.
6572 *
6573 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6574 * segment limit.
6575 *
6576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6577 * @param uNewRip The new RIP value.
6578 */
6579IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6580{
6581 switch (pVCpu->iem.s.enmEffOpSize)
6582 {
6583 case IEMMODE_16BIT:
6584 {
6585 Assert(uNewRip <= UINT16_MAX);
6586 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6587 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6588 return iemRaiseGeneralProtectionFault0(pVCpu);
6589 /** @todo Test 16-bit jump in 64-bit mode. */
6590 pVCpu->cpum.GstCtx.rip = uNewRip;
6591 break;
6592 }
6593
6594 case IEMMODE_32BIT:
6595 {
6596 Assert(uNewRip <= UINT32_MAX);
6597 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6598 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6599
6600 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6601 return iemRaiseGeneralProtectionFault0(pVCpu);
6602 pVCpu->cpum.GstCtx.rip = uNewRip;
6603 break;
6604 }
6605
6606 case IEMMODE_64BIT:
6607 {
6608 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6609
6610 if (!IEM_IS_CANONICAL(uNewRip))
6611 return iemRaiseGeneralProtectionFault0(pVCpu);
6612 pVCpu->cpum.GstCtx.rip = uNewRip;
6613 break;
6614 }
6615
6616 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6617 }
6618
6619 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6620
6621#ifndef IEM_WITH_CODE_TLB
6622 /* Flush the prefetch buffer. */
6623 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6624#endif
6625
6626 return VINF_SUCCESS;
6627}
6628
6629
6630/**
6631 * Get the address of the top of the stack.
6632 *
6633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6634 */
6635DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6636{
6637 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6638 return pVCpu->cpum.GstCtx.rsp;
6639 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6640 return pVCpu->cpum.GstCtx.esp;
6641 return pVCpu->cpum.GstCtx.sp;
6642}
6643
6644
6645/**
6646 * Updates the RIP/EIP/IP to point to the next instruction.
6647 *
6648 * This function leaves the EFLAGS.RF flag alone.
6649 *
6650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6651 * @param cbInstr The number of bytes to add.
6652 */
6653IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6654{
6655 switch (pVCpu->iem.s.enmCpuMode)
6656 {
6657 case IEMMODE_16BIT:
6658 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6659 pVCpu->cpum.GstCtx.eip += cbInstr;
6660 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6661 break;
6662
6663 case IEMMODE_32BIT:
6664 pVCpu->cpum.GstCtx.eip += cbInstr;
6665 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6666 break;
6667
6668 case IEMMODE_64BIT:
6669 pVCpu->cpum.GstCtx.rip += cbInstr;
6670 break;
6671 default: AssertFailed();
6672 }
6673}
6674
6675
6676#if 0
6677/**
6678 * Updates the RIP/EIP/IP to point to the next instruction.
6679 *
6680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6681 */
6682IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6683{
6684 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6685}
6686#endif
6687
6688
6689
6690/**
6691 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6692 *
6693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6694 * @param cbInstr The number of bytes to add.
6695 */
6696IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6697{
6698 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6699
6700 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6701#if ARCH_BITS >= 64
6702 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6703 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6704 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6705#else
6706 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6707 pVCpu->cpum.GstCtx.rip += cbInstr;
6708 else
6709 pVCpu->cpum.GstCtx.eip += cbInstr;
6710#endif
6711}
6712
6713
6714/**
6715 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6716 *
6717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6718 */
6719IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6720{
6721 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6722}
6723
6724
6725/**
6726 * Adds to the stack pointer.
6727 *
6728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6729 * @param cbToAdd The number of bytes to add (8-bit!).
6730 */
6731DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6732{
6733 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6734 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6735 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6736 pVCpu->cpum.GstCtx.esp += cbToAdd;
6737 else
6738 pVCpu->cpum.GstCtx.sp += cbToAdd;
6739}
6740
6741
6742/**
6743 * Subtracts from the stack pointer.
6744 *
6745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6746 * @param cbToSub The number of bytes to subtract (8-bit!).
6747 */
6748DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6749{
6750 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6751 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6752 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6753 pVCpu->cpum.GstCtx.esp -= cbToSub;
6754 else
6755 pVCpu->cpum.GstCtx.sp -= cbToSub;
6756}
6757
6758
6759/**
6760 * Adds to the temporary stack pointer.
6761 *
6762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6763 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6764 * @param cbToAdd The number of bytes to add (16-bit).
6765 */
6766DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6767{
6768 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6769 pTmpRsp->u += cbToAdd;
6770 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6771 pTmpRsp->DWords.dw0 += cbToAdd;
6772 else
6773 pTmpRsp->Words.w0 += cbToAdd;
6774}
6775
6776
6777/**
6778 * Subtracts from the temporary stack pointer.
6779 *
6780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6781 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6782 * @param cbToSub The number of bytes to subtract.
6783 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6784 * expecting that.
6785 */
6786DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6787{
6788 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6789 pTmpRsp->u -= cbToSub;
6790 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6791 pTmpRsp->DWords.dw0 -= cbToSub;
6792 else
6793 pTmpRsp->Words.w0 -= cbToSub;
6794}
6795
6796
6797/**
6798 * Calculates the effective stack address for a push of the specified size as
6799 * well as the new RSP value (upper bits may be masked).
6800 *
6801 * @returns Effective stack addressf for the push.
6802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6803 * @param cbItem The size of the stack item to pop.
6804 * @param puNewRsp Where to return the new RSP value.
6805 */
6806DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6807{
6808 RTUINT64U uTmpRsp;
6809 RTGCPTR GCPtrTop;
6810 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6811
6812 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6813 GCPtrTop = uTmpRsp.u -= cbItem;
6814 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6815 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6816 else
6817 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6818 *puNewRsp = uTmpRsp.u;
6819 return GCPtrTop;
6820}
6821
6822
6823/**
6824 * Gets the current stack pointer and calculates the value after a pop of the
6825 * specified size.
6826 *
6827 * @returns Current stack pointer.
6828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6829 * @param cbItem The size of the stack item to pop.
6830 * @param puNewRsp Where to return the new RSP value.
6831 */
6832DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6833{
6834 RTUINT64U uTmpRsp;
6835 RTGCPTR GCPtrTop;
6836 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6837
6838 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6839 {
6840 GCPtrTop = uTmpRsp.u;
6841 uTmpRsp.u += cbItem;
6842 }
6843 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6844 {
6845 GCPtrTop = uTmpRsp.DWords.dw0;
6846 uTmpRsp.DWords.dw0 += cbItem;
6847 }
6848 else
6849 {
6850 GCPtrTop = uTmpRsp.Words.w0;
6851 uTmpRsp.Words.w0 += cbItem;
6852 }
6853 *puNewRsp = uTmpRsp.u;
6854 return GCPtrTop;
6855}
6856
6857
6858/**
6859 * Calculates the effective stack address for a push of the specified size as
6860 * well as the new temporary RSP value (upper bits may be masked).
6861 *
6862 * @returns Effective stack addressf for the push.
6863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6864 * @param pTmpRsp The temporary stack pointer. This is updated.
6865 * @param cbItem The size of the stack item to pop.
6866 */
6867DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6868{
6869 RTGCPTR GCPtrTop;
6870
6871 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6872 GCPtrTop = pTmpRsp->u -= cbItem;
6873 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6874 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6875 else
6876 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6877 return GCPtrTop;
6878}
6879
6880
6881/**
6882 * Gets the effective stack address for a pop of the specified size and
6883 * calculates and updates the temporary RSP.
6884 *
6885 * @returns Current stack pointer.
6886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6887 * @param pTmpRsp The temporary stack pointer. This is updated.
6888 * @param cbItem The size of the stack item to pop.
6889 */
6890DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6891{
6892 RTGCPTR GCPtrTop;
6893 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6894 {
6895 GCPtrTop = pTmpRsp->u;
6896 pTmpRsp->u += cbItem;
6897 }
6898 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6899 {
6900 GCPtrTop = pTmpRsp->DWords.dw0;
6901 pTmpRsp->DWords.dw0 += cbItem;
6902 }
6903 else
6904 {
6905 GCPtrTop = pTmpRsp->Words.w0;
6906 pTmpRsp->Words.w0 += cbItem;
6907 }
6908 return GCPtrTop;
6909}
6910
6911/** @} */
6912
6913
6914/** @name FPU access and helpers.
6915 *
6916 * @{
6917 */
6918
6919
6920/**
6921 * Hook for preparing to use the host FPU.
6922 *
6923 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6924 *
6925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6926 */
6927DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6928{
6929#ifdef IN_RING3
6930 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6931#else
6932 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6933#endif
6934 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6935}
6936
6937
6938/**
6939 * Hook for preparing to use the host FPU for SSE.
6940 *
6941 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6942 *
6943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6944 */
6945DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6946{
6947 iemFpuPrepareUsage(pVCpu);
6948}
6949
6950
6951/**
6952 * Hook for preparing to use the host FPU for AVX.
6953 *
6954 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6955 *
6956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6957 */
6958DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6959{
6960 iemFpuPrepareUsage(pVCpu);
6961}
6962
6963
6964/**
6965 * Hook for actualizing the guest FPU state before the interpreter reads it.
6966 *
6967 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6968 *
6969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6970 */
6971DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6972{
6973#ifdef IN_RING3
6974 NOREF(pVCpu);
6975#else
6976 CPUMRZFpuStateActualizeForRead(pVCpu);
6977#endif
6978 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6979}
6980
6981
6982/**
6983 * Hook for actualizing the guest FPU state before the interpreter changes it.
6984 *
6985 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6986 *
6987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6988 */
6989DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6990{
6991#ifdef IN_RING3
6992 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6993#else
6994 CPUMRZFpuStateActualizeForChange(pVCpu);
6995#endif
6996 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6997}
6998
6999
7000/**
7001 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7002 * only.
7003 *
7004 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7005 *
7006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7007 */
7008DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7009{
7010#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7011 NOREF(pVCpu);
7012#else
7013 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7014#endif
7015 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7016}
7017
7018
7019/**
7020 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7021 * read+write.
7022 *
7023 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7024 *
7025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7026 */
7027DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7028{
7029#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7030 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7031#else
7032 CPUMRZFpuStateActualizeForChange(pVCpu);
7033#endif
7034 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7035}
7036
7037
7038/**
7039 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7040 * only.
7041 *
7042 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7043 *
7044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7045 */
7046DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7047{
7048#ifdef IN_RING3
7049 NOREF(pVCpu);
7050#else
7051 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7052#endif
7053 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7054}
7055
7056
7057/**
7058 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7059 * read+write.
7060 *
7061 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7062 *
7063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7064 */
7065DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7066{
7067#ifdef IN_RING3
7068 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7069#else
7070 CPUMRZFpuStateActualizeForChange(pVCpu);
7071#endif
7072 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7073}
7074
7075
7076/**
7077 * Stores a QNaN value into a FPU register.
7078 *
7079 * @param pReg Pointer to the register.
7080 */
7081DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7082{
7083 pReg->au32[0] = UINT32_C(0x00000000);
7084 pReg->au32[1] = UINT32_C(0xc0000000);
7085 pReg->au16[4] = UINT16_C(0xffff);
7086}
7087
7088
7089/**
7090 * Updates the FOP, FPU.CS and FPUIP registers.
7091 *
7092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7093 * @param pFpuCtx The FPU context.
7094 */
7095DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7096{
7097 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7098 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7099 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7100 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7101 {
7102 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7103 * happens in real mode here based on the fnsave and fnstenv images. */
7104 pFpuCtx->CS = 0;
7105 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7106 }
7107 else
7108 {
7109 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7110 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7111 }
7112}
7113
7114
7115/**
7116 * Updates the x87.DS and FPUDP registers.
7117 *
7118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7119 * @param pFpuCtx The FPU context.
7120 * @param iEffSeg The effective segment register.
7121 * @param GCPtrEff The effective address relative to @a iEffSeg.
7122 */
7123DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7124{
7125 RTSEL sel;
7126 switch (iEffSeg)
7127 {
7128 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7129 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7130 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7131 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7132 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7133 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7134 default:
7135 AssertMsgFailed(("%d\n", iEffSeg));
7136 sel = pVCpu->cpum.GstCtx.ds.Sel;
7137 }
7138 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7139 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7140 {
7141 pFpuCtx->DS = 0;
7142 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7143 }
7144 else
7145 {
7146 pFpuCtx->DS = sel;
7147 pFpuCtx->FPUDP = GCPtrEff;
7148 }
7149}
7150
7151
7152/**
7153 * Rotates the stack registers in the push direction.
7154 *
7155 * @param pFpuCtx The FPU context.
7156 * @remarks This is a complete waste of time, but fxsave stores the registers in
7157 * stack order.
7158 */
7159DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7160{
7161 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7162 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7163 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7164 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7165 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7166 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7167 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7168 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7169 pFpuCtx->aRegs[0].r80 = r80Tmp;
7170}
7171
7172
7173/**
7174 * Rotates the stack registers in the pop direction.
7175 *
7176 * @param pFpuCtx The FPU context.
7177 * @remarks This is a complete waste of time, but fxsave stores the registers in
7178 * stack order.
7179 */
7180DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7181{
7182 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7183 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7184 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7185 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7186 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7187 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7188 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7189 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7190 pFpuCtx->aRegs[7].r80 = r80Tmp;
7191}
7192
7193
7194/**
7195 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7196 * exception prevents it.
7197 *
7198 * @param pResult The FPU operation result to push.
7199 * @param pFpuCtx The FPU context.
7200 */
7201IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7202{
7203 /* Update FSW and bail if there are pending exceptions afterwards. */
7204 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7205 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7206 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7207 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7208 {
7209 pFpuCtx->FSW = fFsw;
7210 return;
7211 }
7212
7213 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7214 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7215 {
7216 /* All is fine, push the actual value. */
7217 pFpuCtx->FTW |= RT_BIT(iNewTop);
7218 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7219 }
7220 else if (pFpuCtx->FCW & X86_FCW_IM)
7221 {
7222 /* Masked stack overflow, push QNaN. */
7223 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7224 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7225 }
7226 else
7227 {
7228 /* Raise stack overflow, don't push anything. */
7229 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7230 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7231 return;
7232 }
7233
7234 fFsw &= ~X86_FSW_TOP_MASK;
7235 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7236 pFpuCtx->FSW = fFsw;
7237
7238 iemFpuRotateStackPush(pFpuCtx);
7239}
7240
7241
7242/**
7243 * Stores a result in a FPU register and updates the FSW and FTW.
7244 *
7245 * @param pFpuCtx The FPU context.
7246 * @param pResult The result to store.
7247 * @param iStReg Which FPU register to store it in.
7248 */
7249IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7250{
7251 Assert(iStReg < 8);
7252 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7253 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7254 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7255 pFpuCtx->FTW |= RT_BIT(iReg);
7256 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7257}
7258
7259
7260/**
7261 * Only updates the FPU status word (FSW) with the result of the current
7262 * instruction.
7263 *
7264 * @param pFpuCtx The FPU context.
7265 * @param u16FSW The FSW output of the current instruction.
7266 */
7267IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7268{
7269 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7270 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7271}
7272
7273
7274/**
7275 * Pops one item off the FPU stack if no pending exception prevents it.
7276 *
7277 * @param pFpuCtx The FPU context.
7278 */
7279IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7280{
7281 /* Check pending exceptions. */
7282 uint16_t uFSW = pFpuCtx->FSW;
7283 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7284 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7285 return;
7286
7287 /* TOP--. */
7288 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7289 uFSW &= ~X86_FSW_TOP_MASK;
7290 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7291 pFpuCtx->FSW = uFSW;
7292
7293 /* Mark the previous ST0 as empty. */
7294 iOldTop >>= X86_FSW_TOP_SHIFT;
7295 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7296
7297 /* Rotate the registers. */
7298 iemFpuRotateStackPop(pFpuCtx);
7299}
7300
7301
7302/**
7303 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7304 *
7305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7306 * @param pResult The FPU operation result to push.
7307 */
7308IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7309{
7310 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7311 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7312 iemFpuMaybePushResult(pResult, pFpuCtx);
7313}
7314
7315
7316/**
7317 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7318 * and sets FPUDP and FPUDS.
7319 *
7320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7321 * @param pResult The FPU operation result to push.
7322 * @param iEffSeg The effective segment register.
7323 * @param GCPtrEff The effective address relative to @a iEffSeg.
7324 */
7325IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7326{
7327 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7328 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7329 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7330 iemFpuMaybePushResult(pResult, pFpuCtx);
7331}
7332
7333
7334/**
7335 * Replace ST0 with the first value and push the second onto the FPU stack,
7336 * unless a pending exception prevents it.
7337 *
7338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7339 * @param pResult The FPU operation result to store and push.
7340 */
7341IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7342{
7343 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7344 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7345
7346 /* Update FSW and bail if there are pending exceptions afterwards. */
7347 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7348 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7349 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7350 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7351 {
7352 pFpuCtx->FSW = fFsw;
7353 return;
7354 }
7355
7356 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7357 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7358 {
7359 /* All is fine, push the actual value. */
7360 pFpuCtx->FTW |= RT_BIT(iNewTop);
7361 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7362 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7363 }
7364 else if (pFpuCtx->FCW & X86_FCW_IM)
7365 {
7366 /* Masked stack overflow, push QNaN. */
7367 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7368 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7369 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7370 }
7371 else
7372 {
7373 /* Raise stack overflow, don't push anything. */
7374 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7375 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7376 return;
7377 }
7378
7379 fFsw &= ~X86_FSW_TOP_MASK;
7380 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7381 pFpuCtx->FSW = fFsw;
7382
7383 iemFpuRotateStackPush(pFpuCtx);
7384}
7385
7386
7387/**
7388 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7389 * FOP.
7390 *
7391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7392 * @param pResult The result to store.
7393 * @param iStReg Which FPU register to store it in.
7394 */
7395IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7396{
7397 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7398 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7399 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7400}
7401
7402
7403/**
7404 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7405 * FOP, and then pops the stack.
7406 *
7407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7408 * @param pResult The result to store.
7409 * @param iStReg Which FPU register to store it in.
7410 */
7411IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7412{
7413 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7414 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7415 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7416 iemFpuMaybePopOne(pFpuCtx);
7417}
7418
7419
7420/**
7421 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7422 * FPUDP, and FPUDS.
7423 *
7424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7425 * @param pResult The result to store.
7426 * @param iStReg Which FPU register to store it in.
7427 * @param iEffSeg The effective memory operand selector register.
7428 * @param GCPtrEff The effective memory operand offset.
7429 */
7430IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7431 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7432{
7433 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7434 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7435 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7436 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7437}
7438
7439
7440/**
7441 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7442 * FPUDP, and FPUDS, and then pops the stack.
7443 *
7444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7445 * @param pResult The result to store.
7446 * @param iStReg Which FPU register to store it in.
7447 * @param iEffSeg The effective memory operand selector register.
7448 * @param GCPtrEff The effective memory operand offset.
7449 */
7450IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7451 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7452{
7453 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7454 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7455 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7456 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7457 iemFpuMaybePopOne(pFpuCtx);
7458}
7459
7460
7461/**
7462 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7463 *
7464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7465 */
7466IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7467{
7468 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7469 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7470}
7471
7472
7473/**
7474 * Marks the specified stack register as free (for FFREE).
7475 *
7476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7477 * @param iStReg The register to free.
7478 */
7479IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7480{
7481 Assert(iStReg < 8);
7482 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7483 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7484 pFpuCtx->FTW &= ~RT_BIT(iReg);
7485}
7486
7487
7488/**
7489 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7490 *
7491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7492 */
7493IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7494{
7495 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7496 uint16_t uFsw = pFpuCtx->FSW;
7497 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7498 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7499 uFsw &= ~X86_FSW_TOP_MASK;
7500 uFsw |= uTop;
7501 pFpuCtx->FSW = uFsw;
7502}
7503
7504
7505/**
7506 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7507 *
7508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7509 */
7510IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7511{
7512 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7513 uint16_t uFsw = pFpuCtx->FSW;
7514 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7515 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7516 uFsw &= ~X86_FSW_TOP_MASK;
7517 uFsw |= uTop;
7518 pFpuCtx->FSW = uFsw;
7519}
7520
7521
7522/**
7523 * Updates the FSW, FOP, FPUIP, and FPUCS.
7524 *
7525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7526 * @param u16FSW The FSW from the current instruction.
7527 */
7528IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7529{
7530 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7531 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7532 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7533}
7534
7535
7536/**
7537 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7538 *
7539 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7540 * @param u16FSW The FSW from the current instruction.
7541 */
7542IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7543{
7544 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7545 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7546 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7547 iemFpuMaybePopOne(pFpuCtx);
7548}
7549
7550
7551/**
7552 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7553 *
7554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7555 * @param u16FSW The FSW from the current instruction.
7556 * @param iEffSeg The effective memory operand selector register.
7557 * @param GCPtrEff The effective memory operand offset.
7558 */
7559IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7560{
7561 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7562 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7563 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7564 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7565}
7566
7567
7568/**
7569 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7570 *
7571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7572 * @param u16FSW The FSW from the current instruction.
7573 */
7574IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7575{
7576 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7577 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7578 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7579 iemFpuMaybePopOne(pFpuCtx);
7580 iemFpuMaybePopOne(pFpuCtx);
7581}
7582
7583
7584/**
7585 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7586 *
7587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7588 * @param u16FSW The FSW from the current instruction.
7589 * @param iEffSeg The effective memory operand selector register.
7590 * @param GCPtrEff The effective memory operand offset.
7591 */
7592IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7593{
7594 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7595 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7596 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7597 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7598 iemFpuMaybePopOne(pFpuCtx);
7599}
7600
7601
7602/**
7603 * Worker routine for raising an FPU stack underflow exception.
7604 *
7605 * @param pFpuCtx The FPU context.
7606 * @param iStReg The stack register being accessed.
7607 */
7608IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7609{
7610 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7611 if (pFpuCtx->FCW & X86_FCW_IM)
7612 {
7613 /* Masked underflow. */
7614 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7615 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7616 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7617 if (iStReg != UINT8_MAX)
7618 {
7619 pFpuCtx->FTW |= RT_BIT(iReg);
7620 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7621 }
7622 }
7623 else
7624 {
7625 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7626 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7627 }
7628}
7629
7630
7631/**
7632 * Raises a FPU stack underflow exception.
7633 *
7634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7635 * @param iStReg The destination register that should be loaded
7636 * with QNaN if \#IS is not masked. Specify
7637 * UINT8_MAX if none (like for fcom).
7638 */
7639DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7640{
7641 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7642 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7643 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7644}
7645
7646
7647DECL_NO_INLINE(IEM_STATIC, void)
7648iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7649{
7650 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7651 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7652 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7653 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7654}
7655
7656
7657DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7658{
7659 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7660 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7661 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7662 iemFpuMaybePopOne(pFpuCtx);
7663}
7664
7665
7666DECL_NO_INLINE(IEM_STATIC, void)
7667iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7668{
7669 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7670 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7671 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7672 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7673 iemFpuMaybePopOne(pFpuCtx);
7674}
7675
7676
7677DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7678{
7679 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7680 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7681 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7682 iemFpuMaybePopOne(pFpuCtx);
7683 iemFpuMaybePopOne(pFpuCtx);
7684}
7685
7686
7687DECL_NO_INLINE(IEM_STATIC, void)
7688iemFpuStackPushUnderflow(PVMCPU pVCpu)
7689{
7690 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7691 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7692
7693 if (pFpuCtx->FCW & X86_FCW_IM)
7694 {
7695 /* Masked overflow - Push QNaN. */
7696 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7697 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7698 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7699 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7700 pFpuCtx->FTW |= RT_BIT(iNewTop);
7701 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7702 iemFpuRotateStackPush(pFpuCtx);
7703 }
7704 else
7705 {
7706 /* Exception pending - don't change TOP or the register stack. */
7707 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7708 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7709 }
7710}
7711
7712
7713DECL_NO_INLINE(IEM_STATIC, void)
7714iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7715{
7716 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7717 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7718
7719 if (pFpuCtx->FCW & X86_FCW_IM)
7720 {
7721 /* Masked overflow - Push QNaN. */
7722 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7723 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7724 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7725 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7726 pFpuCtx->FTW |= RT_BIT(iNewTop);
7727 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7728 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7729 iemFpuRotateStackPush(pFpuCtx);
7730 }
7731 else
7732 {
7733 /* Exception pending - don't change TOP or the register stack. */
7734 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7735 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7736 }
7737}
7738
7739
7740/**
7741 * Worker routine for raising an FPU stack overflow exception on a push.
7742 *
7743 * @param pFpuCtx The FPU context.
7744 */
7745IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7746{
7747 if (pFpuCtx->FCW & X86_FCW_IM)
7748 {
7749 /* Masked overflow. */
7750 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7751 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7752 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7753 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7754 pFpuCtx->FTW |= RT_BIT(iNewTop);
7755 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7756 iemFpuRotateStackPush(pFpuCtx);
7757 }
7758 else
7759 {
7760 /* Exception pending - don't change TOP or the register stack. */
7761 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7762 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7763 }
7764}
7765
7766
7767/**
7768 * Raises a FPU stack overflow exception on a push.
7769 *
7770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7771 */
7772DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7773{
7774 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7775 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7776 iemFpuStackPushOverflowOnly(pFpuCtx);
7777}
7778
7779
7780/**
7781 * Raises a FPU stack overflow exception on a push with a memory operand.
7782 *
7783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7784 * @param iEffSeg The effective memory operand selector register.
7785 * @param GCPtrEff The effective memory operand offset.
7786 */
7787DECL_NO_INLINE(IEM_STATIC, void)
7788iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7789{
7790 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7791 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7792 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7793 iemFpuStackPushOverflowOnly(pFpuCtx);
7794}
7795
7796
7797IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7798{
7799 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7800 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7801 if (pFpuCtx->FTW & RT_BIT(iReg))
7802 return VINF_SUCCESS;
7803 return VERR_NOT_FOUND;
7804}
7805
7806
7807IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7808{
7809 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7810 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7811 if (pFpuCtx->FTW & RT_BIT(iReg))
7812 {
7813 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7814 return VINF_SUCCESS;
7815 }
7816 return VERR_NOT_FOUND;
7817}
7818
7819
7820IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7821 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7822{
7823 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7824 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7825 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7826 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7827 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7828 {
7829 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7830 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7831 return VINF_SUCCESS;
7832 }
7833 return VERR_NOT_FOUND;
7834}
7835
7836
7837IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7838{
7839 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7840 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7841 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7842 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7843 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7844 {
7845 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7846 return VINF_SUCCESS;
7847 }
7848 return VERR_NOT_FOUND;
7849}
7850
7851
7852/**
7853 * Updates the FPU exception status after FCW is changed.
7854 *
7855 * @param pFpuCtx The FPU context.
7856 */
7857IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7858{
7859 uint16_t u16Fsw = pFpuCtx->FSW;
7860 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7861 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7862 else
7863 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7864 pFpuCtx->FSW = u16Fsw;
7865}
7866
7867
7868/**
7869 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7870 *
7871 * @returns The full FTW.
7872 * @param pFpuCtx The FPU context.
7873 */
7874IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7875{
7876 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7877 uint16_t u16Ftw = 0;
7878 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7879 for (unsigned iSt = 0; iSt < 8; iSt++)
7880 {
7881 unsigned const iReg = (iSt + iTop) & 7;
7882 if (!(u8Ftw & RT_BIT(iReg)))
7883 u16Ftw |= 3 << (iReg * 2); /* empty */
7884 else
7885 {
7886 uint16_t uTag;
7887 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7888 if (pr80Reg->s.uExponent == 0x7fff)
7889 uTag = 2; /* Exponent is all 1's => Special. */
7890 else if (pr80Reg->s.uExponent == 0x0000)
7891 {
7892 if (pr80Reg->s.u64Mantissa == 0x0000)
7893 uTag = 1; /* All bits are zero => Zero. */
7894 else
7895 uTag = 2; /* Must be special. */
7896 }
7897 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7898 uTag = 0; /* Valid. */
7899 else
7900 uTag = 2; /* Must be special. */
7901
7902 u16Ftw |= uTag << (iReg * 2); /* empty */
7903 }
7904 }
7905
7906 return u16Ftw;
7907}
7908
7909
7910/**
7911 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7912 *
7913 * @returns The compressed FTW.
7914 * @param u16FullFtw The full FTW to convert.
7915 */
7916IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7917{
7918 uint8_t u8Ftw = 0;
7919 for (unsigned i = 0; i < 8; i++)
7920 {
7921 if ((u16FullFtw & 3) != 3 /*empty*/)
7922 u8Ftw |= RT_BIT(i);
7923 u16FullFtw >>= 2;
7924 }
7925
7926 return u8Ftw;
7927}
7928
7929/** @} */
7930
7931
7932/** @name Memory access.
7933 *
7934 * @{
7935 */
7936
7937
7938/**
7939 * Updates the IEMCPU::cbWritten counter if applicable.
7940 *
7941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7942 * @param fAccess The access being accounted for.
7943 * @param cbMem The access size.
7944 */
7945DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7946{
7947 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7948 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7949 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7950}
7951
7952
7953/**
7954 * Checks if the given segment can be written to, raise the appropriate
7955 * exception if not.
7956 *
7957 * @returns VBox strict status code.
7958 *
7959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7960 * @param pHid Pointer to the hidden register.
7961 * @param iSegReg The register number.
7962 * @param pu64BaseAddr Where to return the base address to use for the
7963 * segment. (In 64-bit code it may differ from the
7964 * base in the hidden segment.)
7965 */
7966IEM_STATIC VBOXSTRICTRC
7967iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7968{
7969 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7970
7971 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7972 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7973 else
7974 {
7975 if (!pHid->Attr.n.u1Present)
7976 {
7977 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7978 AssertRelease(uSel == 0);
7979 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7980 return iemRaiseGeneralProtectionFault0(pVCpu);
7981 }
7982
7983 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7984 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7985 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7986 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7987 *pu64BaseAddr = pHid->u64Base;
7988 }
7989 return VINF_SUCCESS;
7990}
7991
7992
7993/**
7994 * Checks if the given segment can be read from, raise the appropriate
7995 * exception if not.
7996 *
7997 * @returns VBox strict status code.
7998 *
7999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8000 * @param pHid Pointer to the hidden register.
8001 * @param iSegReg The register number.
8002 * @param pu64BaseAddr Where to return the base address to use for the
8003 * segment. (In 64-bit code it may differ from the
8004 * base in the hidden segment.)
8005 */
8006IEM_STATIC VBOXSTRICTRC
8007iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8008{
8009 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8010
8011 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8012 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8013 else
8014 {
8015 if (!pHid->Attr.n.u1Present)
8016 {
8017 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8018 AssertRelease(uSel == 0);
8019 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8020 return iemRaiseGeneralProtectionFault0(pVCpu);
8021 }
8022
8023 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8024 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8025 *pu64BaseAddr = pHid->u64Base;
8026 }
8027 return VINF_SUCCESS;
8028}
8029
8030
8031/**
8032 * Applies the segment limit, base and attributes.
8033 *
8034 * This may raise a \#GP or \#SS.
8035 *
8036 * @returns VBox strict status code.
8037 *
8038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8039 * @param fAccess The kind of access which is being performed.
8040 * @param iSegReg The index of the segment register to apply.
8041 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8042 * TSS, ++).
8043 * @param cbMem The access size.
8044 * @param pGCPtrMem Pointer to the guest memory address to apply
8045 * segmentation to. Input and output parameter.
8046 */
8047IEM_STATIC VBOXSTRICTRC
8048iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8049{
8050 if (iSegReg == UINT8_MAX)
8051 return VINF_SUCCESS;
8052
8053 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8054 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8055 switch (pVCpu->iem.s.enmCpuMode)
8056 {
8057 case IEMMODE_16BIT:
8058 case IEMMODE_32BIT:
8059 {
8060 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8061 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8062
8063 if ( pSel->Attr.n.u1Present
8064 && !pSel->Attr.n.u1Unusable)
8065 {
8066 Assert(pSel->Attr.n.u1DescType);
8067 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8068 {
8069 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8070 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8071 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8072
8073 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8074 {
8075 /** @todo CPL check. */
8076 }
8077
8078 /*
8079 * There are two kinds of data selectors, normal and expand down.
8080 */
8081 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8082 {
8083 if ( GCPtrFirst32 > pSel->u32Limit
8084 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8085 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8086 }
8087 else
8088 {
8089 /*
8090 * The upper boundary is defined by the B bit, not the G bit!
8091 */
8092 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8093 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8094 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8095 }
8096 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8097 }
8098 else
8099 {
8100
8101 /*
8102 * Code selector and usually be used to read thru, writing is
8103 * only permitted in real and V8086 mode.
8104 */
8105 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8106 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8107 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8108 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8109 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8110
8111 if ( GCPtrFirst32 > pSel->u32Limit
8112 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8113 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8114
8115 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8116 {
8117 /** @todo CPL check. */
8118 }
8119
8120 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8121 }
8122 }
8123 else
8124 return iemRaiseGeneralProtectionFault0(pVCpu);
8125 return VINF_SUCCESS;
8126 }
8127
8128 case IEMMODE_64BIT:
8129 {
8130 RTGCPTR GCPtrMem = *pGCPtrMem;
8131 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8132 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8133
8134 Assert(cbMem >= 1);
8135 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8136 return VINF_SUCCESS;
8137 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8138 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8139 return iemRaiseGeneralProtectionFault0(pVCpu);
8140 }
8141
8142 default:
8143 AssertFailedReturn(VERR_IEM_IPE_7);
8144 }
8145}
8146
8147
8148/**
8149 * Translates a virtual address to a physical physical address and checks if we
8150 * can access the page as specified.
8151 *
8152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8153 * @param GCPtrMem The virtual address.
8154 * @param fAccess The intended access.
8155 * @param pGCPhysMem Where to return the physical address.
8156 */
8157IEM_STATIC VBOXSTRICTRC
8158iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8159{
8160 /** @todo Need a different PGM interface here. We're currently using
8161 * generic / REM interfaces. this won't cut it for R0 & RC. */
8162 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8163 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8164 RTGCPHYS GCPhys;
8165 uint64_t fFlags;
8166 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8167 if (RT_FAILURE(rc))
8168 {
8169 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8170 /** @todo Check unassigned memory in unpaged mode. */
8171 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8172 *pGCPhysMem = NIL_RTGCPHYS;
8173 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8174 }
8175
8176 /* If the page is writable and does not have the no-exec bit set, all
8177 access is allowed. Otherwise we'll have to check more carefully... */
8178 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8179 {
8180 /* Write to read only memory? */
8181 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8182 && !(fFlags & X86_PTE_RW)
8183 && ( (pVCpu->iem.s.uCpl == 3
8184 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8185 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8186 {
8187 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8188 *pGCPhysMem = NIL_RTGCPHYS;
8189 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8190 }
8191
8192 /* Kernel memory accessed by userland? */
8193 if ( !(fFlags & X86_PTE_US)
8194 && pVCpu->iem.s.uCpl == 3
8195 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8196 {
8197 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8198 *pGCPhysMem = NIL_RTGCPHYS;
8199 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8200 }
8201
8202 /* Executing non-executable memory? */
8203 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8204 && (fFlags & X86_PTE_PAE_NX)
8205 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8206 {
8207 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8208 *pGCPhysMem = NIL_RTGCPHYS;
8209 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8210 VERR_ACCESS_DENIED);
8211 }
8212 }
8213
8214 /*
8215 * Set the dirty / access flags.
8216 * ASSUMES this is set when the address is translated rather than on committ...
8217 */
8218 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8219 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8220 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8221 {
8222 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8223 AssertRC(rc2);
8224 }
8225
8226 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8227 *pGCPhysMem = GCPhys;
8228 return VINF_SUCCESS;
8229}
8230
8231
8232
8233/**
8234 * Maps a physical page.
8235 *
8236 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8238 * @param GCPhysMem The physical address.
8239 * @param fAccess The intended access.
8240 * @param ppvMem Where to return the mapping address.
8241 * @param pLock The PGM lock.
8242 */
8243IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8244{
8245#ifdef IEM_LOG_MEMORY_WRITES
8246 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8247 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8248#endif
8249
8250 /** @todo This API may require some improving later. A private deal with PGM
8251 * regarding locking and unlocking needs to be struct. A couple of TLBs
8252 * living in PGM, but with publicly accessible inlined access methods
8253 * could perhaps be an even better solution. */
8254 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8255 GCPhysMem,
8256 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8257 pVCpu->iem.s.fBypassHandlers,
8258 ppvMem,
8259 pLock);
8260 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8261 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8262
8263 return rc;
8264}
8265
8266
8267/**
8268 * Unmap a page previously mapped by iemMemPageMap.
8269 *
8270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8271 * @param GCPhysMem The physical address.
8272 * @param fAccess The intended access.
8273 * @param pvMem What iemMemPageMap returned.
8274 * @param pLock The PGM lock.
8275 */
8276DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8277{
8278 NOREF(pVCpu);
8279 NOREF(GCPhysMem);
8280 NOREF(fAccess);
8281 NOREF(pvMem);
8282 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8283}
8284
8285
8286/**
8287 * Looks up a memory mapping entry.
8288 *
8289 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8291 * @param pvMem The memory address.
8292 * @param fAccess The access to.
8293 */
8294DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8295{
8296 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8297 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8298 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8299 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8300 return 0;
8301 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8302 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8303 return 1;
8304 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8305 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8306 return 2;
8307 return VERR_NOT_FOUND;
8308}
8309
8310
8311/**
8312 * Finds a free memmap entry when using iNextMapping doesn't work.
8313 *
8314 * @returns Memory mapping index, 1024 on failure.
8315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8316 */
8317IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8318{
8319 /*
8320 * The easy case.
8321 */
8322 if (pVCpu->iem.s.cActiveMappings == 0)
8323 {
8324 pVCpu->iem.s.iNextMapping = 1;
8325 return 0;
8326 }
8327
8328 /* There should be enough mappings for all instructions. */
8329 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8330
8331 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8332 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8333 return i;
8334
8335 AssertFailedReturn(1024);
8336}
8337
8338
8339/**
8340 * Commits a bounce buffer that needs writing back and unmaps it.
8341 *
8342 * @returns Strict VBox status code.
8343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8344 * @param iMemMap The index of the buffer to commit.
8345 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8346 * Always false in ring-3, obviously.
8347 */
8348IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8349{
8350 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8351 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8352#ifdef IN_RING3
8353 Assert(!fPostponeFail);
8354 RT_NOREF_PV(fPostponeFail);
8355#endif
8356
8357 /*
8358 * Do the writing.
8359 */
8360 PVM pVM = pVCpu->CTX_SUFF(pVM);
8361 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8362 {
8363 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8364 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8365 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8366 if (!pVCpu->iem.s.fBypassHandlers)
8367 {
8368 /*
8369 * Carefully and efficiently dealing with access handler return
8370 * codes make this a little bloated.
8371 */
8372 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8373 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8374 pbBuf,
8375 cbFirst,
8376 PGMACCESSORIGIN_IEM);
8377 if (rcStrict == VINF_SUCCESS)
8378 {
8379 if (cbSecond)
8380 {
8381 rcStrict = PGMPhysWrite(pVM,
8382 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8383 pbBuf + cbFirst,
8384 cbSecond,
8385 PGMACCESSORIGIN_IEM);
8386 if (rcStrict == VINF_SUCCESS)
8387 { /* nothing */ }
8388 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8389 {
8390 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8392 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8393 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8394 }
8395#ifndef IN_RING3
8396 else if (fPostponeFail)
8397 {
8398 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8399 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8401 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8402 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8403 return iemSetPassUpStatus(pVCpu, rcStrict);
8404 }
8405#endif
8406 else
8407 {
8408 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8409 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8411 return rcStrict;
8412 }
8413 }
8414 }
8415 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8416 {
8417 if (!cbSecond)
8418 {
8419 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8420 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8421 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8422 }
8423 else
8424 {
8425 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8427 pbBuf + cbFirst,
8428 cbSecond,
8429 PGMACCESSORIGIN_IEM);
8430 if (rcStrict2 == VINF_SUCCESS)
8431 {
8432 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8435 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8436 }
8437 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8438 {
8439 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8440 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8441 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8442 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8443 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8444 }
8445#ifndef IN_RING3
8446 else if (fPostponeFail)
8447 {
8448 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8449 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8450 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8451 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8452 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8453 return iemSetPassUpStatus(pVCpu, rcStrict);
8454 }
8455#endif
8456 else
8457 {
8458 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8459 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8460 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8461 return rcStrict2;
8462 }
8463 }
8464 }
8465#ifndef IN_RING3
8466 else if (fPostponeFail)
8467 {
8468 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8469 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8470 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8471 if (!cbSecond)
8472 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8473 else
8474 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8475 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8476 return iemSetPassUpStatus(pVCpu, rcStrict);
8477 }
8478#endif
8479 else
8480 {
8481 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8483 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8484 return rcStrict;
8485 }
8486 }
8487 else
8488 {
8489 /*
8490 * No access handlers, much simpler.
8491 */
8492 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8493 if (RT_SUCCESS(rc))
8494 {
8495 if (cbSecond)
8496 {
8497 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8498 if (RT_SUCCESS(rc))
8499 { /* likely */ }
8500 else
8501 {
8502 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8503 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8504 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8505 return rc;
8506 }
8507 }
8508 }
8509 else
8510 {
8511 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8512 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8513 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8514 return rc;
8515 }
8516 }
8517 }
8518
8519#if defined(IEM_LOG_MEMORY_WRITES)
8520 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8521 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8522 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8523 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8524 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8525 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8526
8527 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8528 g_cbIemWrote = cbWrote;
8529 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8530#endif
8531
8532 /*
8533 * Free the mapping entry.
8534 */
8535 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8536 Assert(pVCpu->iem.s.cActiveMappings != 0);
8537 pVCpu->iem.s.cActiveMappings--;
8538 return VINF_SUCCESS;
8539}
8540
8541
8542/**
8543 * iemMemMap worker that deals with a request crossing pages.
8544 */
8545IEM_STATIC VBOXSTRICTRC
8546iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8547{
8548 /*
8549 * Do the address translations.
8550 */
8551 RTGCPHYS GCPhysFirst;
8552 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8553 if (rcStrict != VINF_SUCCESS)
8554 return rcStrict;
8555
8556 RTGCPHYS GCPhysSecond;
8557 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8558 fAccess, &GCPhysSecond);
8559 if (rcStrict != VINF_SUCCESS)
8560 return rcStrict;
8561 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8562
8563 PVM pVM = pVCpu->CTX_SUFF(pVM);
8564
8565 /*
8566 * Read in the current memory content if it's a read, execute or partial
8567 * write access.
8568 */
8569 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8570 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8571 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8572
8573 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8574 {
8575 if (!pVCpu->iem.s.fBypassHandlers)
8576 {
8577 /*
8578 * Must carefully deal with access handler status codes here,
8579 * makes the code a bit bloated.
8580 */
8581 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8582 if (rcStrict == VINF_SUCCESS)
8583 {
8584 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8585 if (rcStrict == VINF_SUCCESS)
8586 { /*likely */ }
8587 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8588 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8589 else
8590 {
8591 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8592 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8593 return rcStrict;
8594 }
8595 }
8596 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8597 {
8598 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8599 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8600 {
8601 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8602 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8603 }
8604 else
8605 {
8606 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8607 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8608 return rcStrict2;
8609 }
8610 }
8611 else
8612 {
8613 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8614 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8615 return rcStrict;
8616 }
8617 }
8618 else
8619 {
8620 /*
8621 * No informational status codes here, much more straight forward.
8622 */
8623 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8624 if (RT_SUCCESS(rc))
8625 {
8626 Assert(rc == VINF_SUCCESS);
8627 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8628 if (RT_SUCCESS(rc))
8629 Assert(rc == VINF_SUCCESS);
8630 else
8631 {
8632 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8633 return rc;
8634 }
8635 }
8636 else
8637 {
8638 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8639 return rc;
8640 }
8641 }
8642 }
8643#ifdef VBOX_STRICT
8644 else
8645 memset(pbBuf, 0xcc, cbMem);
8646 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8647 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8648#endif
8649
8650 /*
8651 * Commit the bounce buffer entry.
8652 */
8653 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8654 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8655 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8656 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8657 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8658 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8659 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8660 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8661 pVCpu->iem.s.cActiveMappings++;
8662
8663 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8664 *ppvMem = pbBuf;
8665 return VINF_SUCCESS;
8666}
8667
8668
8669/**
8670 * iemMemMap woker that deals with iemMemPageMap failures.
8671 */
8672IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8673 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8674{
8675 /*
8676 * Filter out conditions we can handle and the ones which shouldn't happen.
8677 */
8678 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8679 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8680 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8681 {
8682 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8683 return rcMap;
8684 }
8685 pVCpu->iem.s.cPotentialExits++;
8686
8687 /*
8688 * Read in the current memory content if it's a read, execute or partial
8689 * write access.
8690 */
8691 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8692 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8693 {
8694 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8695 memset(pbBuf, 0xff, cbMem);
8696 else
8697 {
8698 int rc;
8699 if (!pVCpu->iem.s.fBypassHandlers)
8700 {
8701 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8702 if (rcStrict == VINF_SUCCESS)
8703 { /* nothing */ }
8704 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8705 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8706 else
8707 {
8708 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8709 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8710 return rcStrict;
8711 }
8712 }
8713 else
8714 {
8715 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8716 if (RT_SUCCESS(rc))
8717 { /* likely */ }
8718 else
8719 {
8720 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8721 GCPhysFirst, rc));
8722 return rc;
8723 }
8724 }
8725 }
8726 }
8727#ifdef VBOX_STRICT
8728 else
8729 memset(pbBuf, 0xcc, cbMem);
8730#endif
8731#ifdef VBOX_STRICT
8732 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8733 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8734#endif
8735
8736 /*
8737 * Commit the bounce buffer entry.
8738 */
8739 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8740 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8741 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8742 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8743 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8744 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8745 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8746 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8747 pVCpu->iem.s.cActiveMappings++;
8748
8749 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8750 *ppvMem = pbBuf;
8751 return VINF_SUCCESS;
8752}
8753
8754
8755
8756/**
8757 * Maps the specified guest memory for the given kind of access.
8758 *
8759 * This may be using bounce buffering of the memory if it's crossing a page
8760 * boundary or if there is an access handler installed for any of it. Because
8761 * of lock prefix guarantees, we're in for some extra clutter when this
8762 * happens.
8763 *
8764 * This may raise a \#GP, \#SS, \#PF or \#AC.
8765 *
8766 * @returns VBox strict status code.
8767 *
8768 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8769 * @param ppvMem Where to return the pointer to the mapped
8770 * memory.
8771 * @param cbMem The number of bytes to map. This is usually 1,
8772 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8773 * string operations it can be up to a page.
8774 * @param iSegReg The index of the segment register to use for
8775 * this access. The base and limits are checked.
8776 * Use UINT8_MAX to indicate that no segmentation
8777 * is required (for IDT, GDT and LDT accesses).
8778 * @param GCPtrMem The address of the guest memory.
8779 * @param fAccess How the memory is being accessed. The
8780 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8781 * how to map the memory, while the
8782 * IEM_ACCESS_WHAT_XXX bit is used when raising
8783 * exceptions.
8784 */
8785IEM_STATIC VBOXSTRICTRC
8786iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8787{
8788 /*
8789 * Check the input and figure out which mapping entry to use.
8790 */
8791 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8792 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8793 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8794
8795 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8796 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8797 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8798 {
8799 iMemMap = iemMemMapFindFree(pVCpu);
8800 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8801 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8802 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8803 pVCpu->iem.s.aMemMappings[2].fAccess),
8804 VERR_IEM_IPE_9);
8805 }
8806
8807 /*
8808 * Map the memory, checking that we can actually access it. If something
8809 * slightly complicated happens, fall back on bounce buffering.
8810 */
8811 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8812 if (rcStrict != VINF_SUCCESS)
8813 return rcStrict;
8814
8815 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8816 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8817
8818 RTGCPHYS GCPhysFirst;
8819 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8820 if (rcStrict != VINF_SUCCESS)
8821 return rcStrict;
8822
8823 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8824 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8825 if (fAccess & IEM_ACCESS_TYPE_READ)
8826 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8827
8828 void *pvMem;
8829 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8830 if (rcStrict != VINF_SUCCESS)
8831 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8832
8833 /*
8834 * Fill in the mapping table entry.
8835 */
8836 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8837 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8838 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8839 pVCpu->iem.s.cActiveMappings++;
8840
8841 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8842 *ppvMem = pvMem;
8843 return VINF_SUCCESS;
8844}
8845
8846
8847/**
8848 * Commits the guest memory if bounce buffered and unmaps it.
8849 *
8850 * @returns Strict VBox status code.
8851 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8852 * @param pvMem The mapping.
8853 * @param fAccess The kind of access.
8854 */
8855IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8856{
8857 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8858 AssertReturn(iMemMap >= 0, iMemMap);
8859
8860 /* If it's bounce buffered, we may need to write back the buffer. */
8861 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8862 {
8863 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8864 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8865 }
8866 /* Otherwise unlock it. */
8867 else
8868 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8869
8870 /* Free the entry. */
8871 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8872 Assert(pVCpu->iem.s.cActiveMappings != 0);
8873 pVCpu->iem.s.cActiveMappings--;
8874 return VINF_SUCCESS;
8875}
8876
8877#ifdef IEM_WITH_SETJMP
8878
8879/**
8880 * Maps the specified guest memory for the given kind of access, longjmp on
8881 * error.
8882 *
8883 * This may be using bounce buffering of the memory if it's crossing a page
8884 * boundary or if there is an access handler installed for any of it. Because
8885 * of lock prefix guarantees, we're in for some extra clutter when this
8886 * happens.
8887 *
8888 * This may raise a \#GP, \#SS, \#PF or \#AC.
8889 *
8890 * @returns Pointer to the mapped memory.
8891 *
8892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8893 * @param cbMem The number of bytes to map. This is usually 1,
8894 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8895 * string operations it can be up to a page.
8896 * @param iSegReg The index of the segment register to use for
8897 * this access. The base and limits are checked.
8898 * Use UINT8_MAX to indicate that no segmentation
8899 * is required (for IDT, GDT and LDT accesses).
8900 * @param GCPtrMem The address of the guest memory.
8901 * @param fAccess How the memory is being accessed. The
8902 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8903 * how to map the memory, while the
8904 * IEM_ACCESS_WHAT_XXX bit is used when raising
8905 * exceptions.
8906 */
8907IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8908{
8909 /*
8910 * Check the input and figure out which mapping entry to use.
8911 */
8912 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8913 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8914 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8915
8916 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8917 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8918 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8919 {
8920 iMemMap = iemMemMapFindFree(pVCpu);
8921 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8922 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8923 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8924 pVCpu->iem.s.aMemMappings[2].fAccess),
8925 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8926 }
8927
8928 /*
8929 * Map the memory, checking that we can actually access it. If something
8930 * slightly complicated happens, fall back on bounce buffering.
8931 */
8932 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8933 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8934 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8935
8936 /* Crossing a page boundary? */
8937 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8938 { /* No (likely). */ }
8939 else
8940 {
8941 void *pvMem;
8942 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8943 if (rcStrict == VINF_SUCCESS)
8944 return pvMem;
8945 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8946 }
8947
8948 RTGCPHYS GCPhysFirst;
8949 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8950 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8951 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8952
8953 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8954 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8955 if (fAccess & IEM_ACCESS_TYPE_READ)
8956 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8957
8958 void *pvMem;
8959 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8960 if (rcStrict == VINF_SUCCESS)
8961 { /* likely */ }
8962 else
8963 {
8964 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8965 if (rcStrict == VINF_SUCCESS)
8966 return pvMem;
8967 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8968 }
8969
8970 /*
8971 * Fill in the mapping table entry.
8972 */
8973 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8974 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8975 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8976 pVCpu->iem.s.cActiveMappings++;
8977
8978 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8979 return pvMem;
8980}
8981
8982
8983/**
8984 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8985 *
8986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8987 * @param pvMem The mapping.
8988 * @param fAccess The kind of access.
8989 */
8990IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8991{
8992 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8993 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8994
8995 /* If it's bounce buffered, we may need to write back the buffer. */
8996 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8997 {
8998 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8999 {
9000 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9001 if (rcStrict == VINF_SUCCESS)
9002 return;
9003 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9004 }
9005 }
9006 /* Otherwise unlock it. */
9007 else
9008 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9009
9010 /* Free the entry. */
9011 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9012 Assert(pVCpu->iem.s.cActiveMappings != 0);
9013 pVCpu->iem.s.cActiveMappings--;
9014}
9015
9016#endif /* IEM_WITH_SETJMP */
9017
9018#ifndef IN_RING3
9019/**
9020 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9021 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9022 *
9023 * Allows the instruction to be completed and retired, while the IEM user will
9024 * return to ring-3 immediately afterwards and do the postponed writes there.
9025 *
9026 * @returns VBox status code (no strict statuses). Caller must check
9027 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9029 * @param pvMem The mapping.
9030 * @param fAccess The kind of access.
9031 */
9032IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9033{
9034 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9035 AssertReturn(iMemMap >= 0, iMemMap);
9036
9037 /* If it's bounce buffered, we may need to write back the buffer. */
9038 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9039 {
9040 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9041 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9042 }
9043 /* Otherwise unlock it. */
9044 else
9045 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9046
9047 /* Free the entry. */
9048 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9049 Assert(pVCpu->iem.s.cActiveMappings != 0);
9050 pVCpu->iem.s.cActiveMappings--;
9051 return VINF_SUCCESS;
9052}
9053#endif
9054
9055
9056/**
9057 * Rollbacks mappings, releasing page locks and such.
9058 *
9059 * The caller shall only call this after checking cActiveMappings.
9060 *
9061 * @returns Strict VBox status code to pass up.
9062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9063 */
9064IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9065{
9066 Assert(pVCpu->iem.s.cActiveMappings > 0);
9067
9068 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9069 while (iMemMap-- > 0)
9070 {
9071 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9072 if (fAccess != IEM_ACCESS_INVALID)
9073 {
9074 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9075 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9076 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9077 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9078 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9079 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9080 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9081 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9082 pVCpu->iem.s.cActiveMappings--;
9083 }
9084 }
9085}
9086
9087
9088/**
9089 * Fetches a data byte.
9090 *
9091 * @returns Strict VBox status code.
9092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9093 * @param pu8Dst Where to return the byte.
9094 * @param iSegReg The index of the segment register to use for
9095 * this access. The base and limits are checked.
9096 * @param GCPtrMem The address of the guest memory.
9097 */
9098IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9099{
9100 /* The lazy approach for now... */
9101 uint8_t const *pu8Src;
9102 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9103 if (rc == VINF_SUCCESS)
9104 {
9105 *pu8Dst = *pu8Src;
9106 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9107 }
9108 return rc;
9109}
9110
9111
9112#ifdef IEM_WITH_SETJMP
9113/**
9114 * Fetches a data byte, longjmp on error.
9115 *
9116 * @returns The byte.
9117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9118 * @param iSegReg The index of the segment register to use for
9119 * this access. The base and limits are checked.
9120 * @param GCPtrMem The address of the guest memory.
9121 */
9122DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9123{
9124 /* The lazy approach for now... */
9125 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9126 uint8_t const bRet = *pu8Src;
9127 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9128 return bRet;
9129}
9130#endif /* IEM_WITH_SETJMP */
9131
9132
9133/**
9134 * Fetches a data word.
9135 *
9136 * @returns Strict VBox status code.
9137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9138 * @param pu16Dst Where to return the word.
9139 * @param iSegReg The index of the segment register to use for
9140 * this access. The base and limits are checked.
9141 * @param GCPtrMem The address of the guest memory.
9142 */
9143IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9144{
9145 /* The lazy approach for now... */
9146 uint16_t const *pu16Src;
9147 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9148 if (rc == VINF_SUCCESS)
9149 {
9150 *pu16Dst = *pu16Src;
9151 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9152 }
9153 return rc;
9154}
9155
9156
9157#ifdef IEM_WITH_SETJMP
9158/**
9159 * Fetches a data word, longjmp on error.
9160 *
9161 * @returns The word
9162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9163 * @param iSegReg The index of the segment register to use for
9164 * this access. The base and limits are checked.
9165 * @param GCPtrMem The address of the guest memory.
9166 */
9167DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9168{
9169 /* The lazy approach for now... */
9170 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9171 uint16_t const u16Ret = *pu16Src;
9172 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9173 return u16Ret;
9174}
9175#endif
9176
9177
9178/**
9179 * Fetches a data dword.
9180 *
9181 * @returns Strict VBox status code.
9182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9183 * @param pu32Dst Where to return the dword.
9184 * @param iSegReg The index of the segment register to use for
9185 * this access. The base and limits are checked.
9186 * @param GCPtrMem The address of the guest memory.
9187 */
9188IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9189{
9190 /* The lazy approach for now... */
9191 uint32_t const *pu32Src;
9192 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9193 if (rc == VINF_SUCCESS)
9194 {
9195 *pu32Dst = *pu32Src;
9196 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9197 }
9198 return rc;
9199}
9200
9201
9202#ifdef IEM_WITH_SETJMP
9203
9204IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9205{
9206 Assert(cbMem >= 1);
9207 Assert(iSegReg < X86_SREG_COUNT);
9208
9209 /*
9210 * 64-bit mode is simpler.
9211 */
9212 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9213 {
9214 if (iSegReg >= X86_SREG_FS)
9215 {
9216 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9217 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9218 GCPtrMem += pSel->u64Base;
9219 }
9220
9221 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9222 return GCPtrMem;
9223 }
9224 /*
9225 * 16-bit and 32-bit segmentation.
9226 */
9227 else
9228 {
9229 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9230 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9231 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9232 == X86DESCATTR_P /* data, expand up */
9233 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9234 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9235 {
9236 /* expand up */
9237 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9238 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9239 && GCPtrLast32 > (uint32_t)GCPtrMem))
9240 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9241 }
9242 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9243 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9244 {
9245 /* expand down */
9246 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9247 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9248 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9249 && GCPtrLast32 > (uint32_t)GCPtrMem))
9250 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9251 }
9252 else
9253 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9254 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9255 }
9256 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9257}
9258
9259
9260IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9261{
9262 Assert(cbMem >= 1);
9263 Assert(iSegReg < X86_SREG_COUNT);
9264
9265 /*
9266 * 64-bit mode is simpler.
9267 */
9268 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9269 {
9270 if (iSegReg >= X86_SREG_FS)
9271 {
9272 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9273 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9274 GCPtrMem += pSel->u64Base;
9275 }
9276
9277 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9278 return GCPtrMem;
9279 }
9280 /*
9281 * 16-bit and 32-bit segmentation.
9282 */
9283 else
9284 {
9285 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9286 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9287 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9288 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9289 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9290 {
9291 /* expand up */
9292 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9293 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9294 && GCPtrLast32 > (uint32_t)GCPtrMem))
9295 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9296 }
9297 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9298 {
9299 /* expand down */
9300 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9301 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9302 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9303 && GCPtrLast32 > (uint32_t)GCPtrMem))
9304 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9305 }
9306 else
9307 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9308 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9309 }
9310 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9311}
9312
9313
9314/**
9315 * Fetches a data dword, longjmp on error, fallback/safe version.
9316 *
9317 * @returns The dword
9318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9319 * @param iSegReg The index of the segment register to use for
9320 * this access. The base and limits are checked.
9321 * @param GCPtrMem The address of the guest memory.
9322 */
9323IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9324{
9325 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9326 uint32_t const u32Ret = *pu32Src;
9327 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9328 return u32Ret;
9329}
9330
9331
9332/**
9333 * Fetches a data dword, longjmp on error.
9334 *
9335 * @returns The dword
9336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9337 * @param iSegReg The index of the segment register to use for
9338 * this access. The base and limits are checked.
9339 * @param GCPtrMem The address of the guest memory.
9340 */
9341DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9342{
9343# ifdef IEM_WITH_DATA_TLB
9344 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9345 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9346 {
9347 /// @todo more later.
9348 }
9349
9350 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9351# else
9352 /* The lazy approach. */
9353 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9354 uint32_t const u32Ret = *pu32Src;
9355 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9356 return u32Ret;
9357# endif
9358}
9359#endif
9360
9361
9362#ifdef SOME_UNUSED_FUNCTION
9363/**
9364 * Fetches a data dword and sign extends it to a qword.
9365 *
9366 * @returns Strict VBox status code.
9367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9368 * @param pu64Dst Where to return the sign extended value.
9369 * @param iSegReg The index of the segment register to use for
9370 * this access. The base and limits are checked.
9371 * @param GCPtrMem The address of the guest memory.
9372 */
9373IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9374{
9375 /* The lazy approach for now... */
9376 int32_t const *pi32Src;
9377 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9378 if (rc == VINF_SUCCESS)
9379 {
9380 *pu64Dst = *pi32Src;
9381 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9382 }
9383#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9384 else
9385 *pu64Dst = 0;
9386#endif
9387 return rc;
9388}
9389#endif
9390
9391
9392/**
9393 * Fetches a data qword.
9394 *
9395 * @returns Strict VBox status code.
9396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9397 * @param pu64Dst Where to return the qword.
9398 * @param iSegReg The index of the segment register to use for
9399 * this access. The base and limits are checked.
9400 * @param GCPtrMem The address of the guest memory.
9401 */
9402IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9403{
9404 /* The lazy approach for now... */
9405 uint64_t const *pu64Src;
9406 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9407 if (rc == VINF_SUCCESS)
9408 {
9409 *pu64Dst = *pu64Src;
9410 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9411 }
9412 return rc;
9413}
9414
9415
9416#ifdef IEM_WITH_SETJMP
9417/**
9418 * Fetches a data qword, longjmp on error.
9419 *
9420 * @returns The qword.
9421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9422 * @param iSegReg The index of the segment register to use for
9423 * this access. The base and limits are checked.
9424 * @param GCPtrMem The address of the guest memory.
9425 */
9426DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9427{
9428 /* The lazy approach for now... */
9429 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9430 uint64_t const u64Ret = *pu64Src;
9431 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9432 return u64Ret;
9433}
9434#endif
9435
9436
9437/**
9438 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9439 *
9440 * @returns Strict VBox status code.
9441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9442 * @param pu64Dst Where to return the qword.
9443 * @param iSegReg The index of the segment register to use for
9444 * this access. The base and limits are checked.
9445 * @param GCPtrMem The address of the guest memory.
9446 */
9447IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9448{
9449 /* The lazy approach for now... */
9450 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9451 if (RT_UNLIKELY(GCPtrMem & 15))
9452 return iemRaiseGeneralProtectionFault0(pVCpu);
9453
9454 uint64_t const *pu64Src;
9455 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9456 if (rc == VINF_SUCCESS)
9457 {
9458 *pu64Dst = *pu64Src;
9459 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9460 }
9461 return rc;
9462}
9463
9464
9465#ifdef IEM_WITH_SETJMP
9466/**
9467 * Fetches a data qword, longjmp on error.
9468 *
9469 * @returns The qword.
9470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9471 * @param iSegReg The index of the segment register to use for
9472 * this access. The base and limits are checked.
9473 * @param GCPtrMem The address of the guest memory.
9474 */
9475DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9476{
9477 /* The lazy approach for now... */
9478 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9479 if (RT_LIKELY(!(GCPtrMem & 15)))
9480 {
9481 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9482 uint64_t const u64Ret = *pu64Src;
9483 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9484 return u64Ret;
9485 }
9486
9487 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9488 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9489}
9490#endif
9491
9492
9493/**
9494 * Fetches a data tword.
9495 *
9496 * @returns Strict VBox status code.
9497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9498 * @param pr80Dst Where to return the tword.
9499 * @param iSegReg The index of the segment register to use for
9500 * this access. The base and limits are checked.
9501 * @param GCPtrMem The address of the guest memory.
9502 */
9503IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9504{
9505 /* The lazy approach for now... */
9506 PCRTFLOAT80U pr80Src;
9507 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9508 if (rc == VINF_SUCCESS)
9509 {
9510 *pr80Dst = *pr80Src;
9511 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9512 }
9513 return rc;
9514}
9515
9516
9517#ifdef IEM_WITH_SETJMP
9518/**
9519 * Fetches a data tword, longjmp on error.
9520 *
9521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9522 * @param pr80Dst Where to return the tword.
9523 * @param iSegReg The index of the segment register to use for
9524 * this access. The base and limits are checked.
9525 * @param GCPtrMem The address of the guest memory.
9526 */
9527DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9528{
9529 /* The lazy approach for now... */
9530 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9531 *pr80Dst = *pr80Src;
9532 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9533}
9534#endif
9535
9536
9537/**
9538 * Fetches a data dqword (double qword), generally SSE related.
9539 *
9540 * @returns Strict VBox status code.
9541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9542 * @param pu128Dst Where to return the qword.
9543 * @param iSegReg The index of the segment register to use for
9544 * this access. The base and limits are checked.
9545 * @param GCPtrMem The address of the guest memory.
9546 */
9547IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9548{
9549 /* The lazy approach for now... */
9550 PCRTUINT128U pu128Src;
9551 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9552 if (rc == VINF_SUCCESS)
9553 {
9554 pu128Dst->au64[0] = pu128Src->au64[0];
9555 pu128Dst->au64[1] = pu128Src->au64[1];
9556 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9557 }
9558 return rc;
9559}
9560
9561
9562#ifdef IEM_WITH_SETJMP
9563/**
9564 * Fetches a data dqword (double qword), generally SSE related.
9565 *
9566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9567 * @param pu128Dst Where to return the qword.
9568 * @param iSegReg The index of the segment register to use for
9569 * this access. The base and limits are checked.
9570 * @param GCPtrMem The address of the guest memory.
9571 */
9572IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9573{
9574 /* The lazy approach for now... */
9575 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9576 pu128Dst->au64[0] = pu128Src->au64[0];
9577 pu128Dst->au64[1] = pu128Src->au64[1];
9578 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9579}
9580#endif
9581
9582
9583/**
9584 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9585 * related.
9586 *
9587 * Raises \#GP(0) if not aligned.
9588 *
9589 * @returns Strict VBox status code.
9590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9591 * @param pu128Dst Where to return the qword.
9592 * @param iSegReg The index of the segment register to use for
9593 * this access. The base and limits are checked.
9594 * @param GCPtrMem The address of the guest memory.
9595 */
9596IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9597{
9598 /* The lazy approach for now... */
9599 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9600 if ( (GCPtrMem & 15)
9601 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9602 return iemRaiseGeneralProtectionFault0(pVCpu);
9603
9604 PCRTUINT128U pu128Src;
9605 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9606 if (rc == VINF_SUCCESS)
9607 {
9608 pu128Dst->au64[0] = pu128Src->au64[0];
9609 pu128Dst->au64[1] = pu128Src->au64[1];
9610 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9611 }
9612 return rc;
9613}
9614
9615
9616#ifdef IEM_WITH_SETJMP
9617/**
9618 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9619 * related, longjmp on error.
9620 *
9621 * Raises \#GP(0) if not aligned.
9622 *
9623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9624 * @param pu128Dst Where to return the qword.
9625 * @param iSegReg The index of the segment register to use for
9626 * this access. The base and limits are checked.
9627 * @param GCPtrMem The address of the guest memory.
9628 */
9629DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9630{
9631 /* The lazy approach for now... */
9632 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9633 if ( (GCPtrMem & 15) == 0
9634 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9635 {
9636 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9637 pu128Dst->au64[0] = pu128Src->au64[0];
9638 pu128Dst->au64[1] = pu128Src->au64[1];
9639 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9640 return;
9641 }
9642
9643 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9644 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9645}
9646#endif
9647
9648
9649/**
9650 * Fetches a data oword (octo word), generally AVX related.
9651 *
9652 * @returns Strict VBox status code.
9653 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9654 * @param pu256Dst Where to return the qword.
9655 * @param iSegReg The index of the segment register to use for
9656 * this access. The base and limits are checked.
9657 * @param GCPtrMem The address of the guest memory.
9658 */
9659IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9660{
9661 /* The lazy approach for now... */
9662 PCRTUINT256U pu256Src;
9663 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9664 if (rc == VINF_SUCCESS)
9665 {
9666 pu256Dst->au64[0] = pu256Src->au64[0];
9667 pu256Dst->au64[1] = pu256Src->au64[1];
9668 pu256Dst->au64[2] = pu256Src->au64[2];
9669 pu256Dst->au64[3] = pu256Src->au64[3];
9670 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9671 }
9672 return rc;
9673}
9674
9675
9676#ifdef IEM_WITH_SETJMP
9677/**
9678 * Fetches a data oword (octo word), generally AVX related.
9679 *
9680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9681 * @param pu256Dst Where to return the qword.
9682 * @param iSegReg The index of the segment register to use for
9683 * this access. The base and limits are checked.
9684 * @param GCPtrMem The address of the guest memory.
9685 */
9686IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9687{
9688 /* The lazy approach for now... */
9689 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9690 pu256Dst->au64[0] = pu256Src->au64[0];
9691 pu256Dst->au64[1] = pu256Src->au64[1];
9692 pu256Dst->au64[2] = pu256Src->au64[2];
9693 pu256Dst->au64[3] = pu256Src->au64[3];
9694 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9695}
9696#endif
9697
9698
9699/**
9700 * Fetches a data oword (octo word) at an aligned address, generally AVX
9701 * related.
9702 *
9703 * Raises \#GP(0) if not aligned.
9704 *
9705 * @returns Strict VBox status code.
9706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9707 * @param pu256Dst Where to return the qword.
9708 * @param iSegReg The index of the segment register to use for
9709 * this access. The base and limits are checked.
9710 * @param GCPtrMem The address of the guest memory.
9711 */
9712IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9713{
9714 /* The lazy approach for now... */
9715 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9716 if (GCPtrMem & 31)
9717 return iemRaiseGeneralProtectionFault0(pVCpu);
9718
9719 PCRTUINT256U pu256Src;
9720 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9721 if (rc == VINF_SUCCESS)
9722 {
9723 pu256Dst->au64[0] = pu256Src->au64[0];
9724 pu256Dst->au64[1] = pu256Src->au64[1];
9725 pu256Dst->au64[2] = pu256Src->au64[2];
9726 pu256Dst->au64[3] = pu256Src->au64[3];
9727 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9728 }
9729 return rc;
9730}
9731
9732
9733#ifdef IEM_WITH_SETJMP
9734/**
9735 * Fetches a data oword (octo word) at an aligned address, generally AVX
9736 * related, longjmp on error.
9737 *
9738 * Raises \#GP(0) if not aligned.
9739 *
9740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9741 * @param pu256Dst Where to return the qword.
9742 * @param iSegReg The index of the segment register to use for
9743 * this access. The base and limits are checked.
9744 * @param GCPtrMem The address of the guest memory.
9745 */
9746DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9747{
9748 /* The lazy approach for now... */
9749 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9750 if ((GCPtrMem & 31) == 0)
9751 {
9752 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9753 pu256Dst->au64[0] = pu256Src->au64[0];
9754 pu256Dst->au64[1] = pu256Src->au64[1];
9755 pu256Dst->au64[2] = pu256Src->au64[2];
9756 pu256Dst->au64[3] = pu256Src->au64[3];
9757 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9758 return;
9759 }
9760
9761 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9762 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9763}
9764#endif
9765
9766
9767
9768/**
9769 * Fetches a descriptor register (lgdt, lidt).
9770 *
9771 * @returns Strict VBox status code.
9772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9773 * @param pcbLimit Where to return the limit.
9774 * @param pGCPtrBase Where to return the base.
9775 * @param iSegReg The index of the segment register to use for
9776 * this access. The base and limits are checked.
9777 * @param GCPtrMem The address of the guest memory.
9778 * @param enmOpSize The effective operand size.
9779 */
9780IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9781 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9782{
9783 /*
9784 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9785 * little special:
9786 * - The two reads are done separately.
9787 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9788 * - We suspect the 386 to actually commit the limit before the base in
9789 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9790 * don't try emulate this eccentric behavior, because it's not well
9791 * enough understood and rather hard to trigger.
9792 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9793 */
9794 VBOXSTRICTRC rcStrict;
9795 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9796 {
9797 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9798 if (rcStrict == VINF_SUCCESS)
9799 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9800 }
9801 else
9802 {
9803 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9804 if (enmOpSize == IEMMODE_32BIT)
9805 {
9806 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9807 {
9808 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9809 if (rcStrict == VINF_SUCCESS)
9810 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9811 }
9812 else
9813 {
9814 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9815 if (rcStrict == VINF_SUCCESS)
9816 {
9817 *pcbLimit = (uint16_t)uTmp;
9818 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9819 }
9820 }
9821 if (rcStrict == VINF_SUCCESS)
9822 *pGCPtrBase = uTmp;
9823 }
9824 else
9825 {
9826 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9827 if (rcStrict == VINF_SUCCESS)
9828 {
9829 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9830 if (rcStrict == VINF_SUCCESS)
9831 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9832 }
9833 }
9834 }
9835 return rcStrict;
9836}
9837
9838
9839
9840/**
9841 * Stores a data byte.
9842 *
9843 * @returns Strict VBox status code.
9844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9845 * @param iSegReg The index of the segment register to use for
9846 * this access. The base and limits are checked.
9847 * @param GCPtrMem The address of the guest memory.
9848 * @param u8Value The value to store.
9849 */
9850IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9851{
9852 /* The lazy approach for now... */
9853 uint8_t *pu8Dst;
9854 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9855 if (rc == VINF_SUCCESS)
9856 {
9857 *pu8Dst = u8Value;
9858 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9859 }
9860 return rc;
9861}
9862
9863
9864#ifdef IEM_WITH_SETJMP
9865/**
9866 * Stores a data byte, longjmp on error.
9867 *
9868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9869 * @param iSegReg The index of the segment register to use for
9870 * this access. The base and limits are checked.
9871 * @param GCPtrMem The address of the guest memory.
9872 * @param u8Value The value to store.
9873 */
9874IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9875{
9876 /* The lazy approach for now... */
9877 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9878 *pu8Dst = u8Value;
9879 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9880}
9881#endif
9882
9883
9884/**
9885 * Stores a data word.
9886 *
9887 * @returns Strict VBox status code.
9888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9889 * @param iSegReg The index of the segment register to use for
9890 * this access. The base and limits are checked.
9891 * @param GCPtrMem The address of the guest memory.
9892 * @param u16Value The value to store.
9893 */
9894IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9895{
9896 /* The lazy approach for now... */
9897 uint16_t *pu16Dst;
9898 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9899 if (rc == VINF_SUCCESS)
9900 {
9901 *pu16Dst = u16Value;
9902 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9903 }
9904 return rc;
9905}
9906
9907
9908#ifdef IEM_WITH_SETJMP
9909/**
9910 * Stores a data word, longjmp on error.
9911 *
9912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9913 * @param iSegReg The index of the segment register to use for
9914 * this access. The base and limits are checked.
9915 * @param GCPtrMem The address of the guest memory.
9916 * @param u16Value The value to store.
9917 */
9918IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9919{
9920 /* The lazy approach for now... */
9921 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9922 *pu16Dst = u16Value;
9923 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9924}
9925#endif
9926
9927
9928/**
9929 * Stores a data dword.
9930 *
9931 * @returns Strict VBox status code.
9932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9933 * @param iSegReg The index of the segment register to use for
9934 * this access. The base and limits are checked.
9935 * @param GCPtrMem The address of the guest memory.
9936 * @param u32Value The value to store.
9937 */
9938IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9939{
9940 /* The lazy approach for now... */
9941 uint32_t *pu32Dst;
9942 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9943 if (rc == VINF_SUCCESS)
9944 {
9945 *pu32Dst = u32Value;
9946 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9947 }
9948 return rc;
9949}
9950
9951
9952#ifdef IEM_WITH_SETJMP
9953/**
9954 * Stores a data dword.
9955 *
9956 * @returns Strict VBox status code.
9957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9958 * @param iSegReg The index of the segment register to use for
9959 * this access. The base and limits are checked.
9960 * @param GCPtrMem The address of the guest memory.
9961 * @param u32Value The value to store.
9962 */
9963IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9964{
9965 /* The lazy approach for now... */
9966 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9967 *pu32Dst = u32Value;
9968 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9969}
9970#endif
9971
9972
9973/**
9974 * Stores a data qword.
9975 *
9976 * @returns Strict VBox status code.
9977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9978 * @param iSegReg The index of the segment register to use for
9979 * this access. The base and limits are checked.
9980 * @param GCPtrMem The address of the guest memory.
9981 * @param u64Value The value to store.
9982 */
9983IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9984{
9985 /* The lazy approach for now... */
9986 uint64_t *pu64Dst;
9987 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9988 if (rc == VINF_SUCCESS)
9989 {
9990 *pu64Dst = u64Value;
9991 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9992 }
9993 return rc;
9994}
9995
9996
9997#ifdef IEM_WITH_SETJMP
9998/**
9999 * Stores a data qword, longjmp on error.
10000 *
10001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10002 * @param iSegReg The index of the segment register to use for
10003 * this access. The base and limits are checked.
10004 * @param GCPtrMem The address of the guest memory.
10005 * @param u64Value The value to store.
10006 */
10007IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10008{
10009 /* The lazy approach for now... */
10010 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10011 *pu64Dst = u64Value;
10012 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10013}
10014#endif
10015
10016
10017/**
10018 * Stores a data dqword.
10019 *
10020 * @returns Strict VBox status code.
10021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10022 * @param iSegReg The index of the segment register to use for
10023 * this access. The base and limits are checked.
10024 * @param GCPtrMem The address of the guest memory.
10025 * @param u128Value The value to store.
10026 */
10027IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10028{
10029 /* The lazy approach for now... */
10030 PRTUINT128U pu128Dst;
10031 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10032 if (rc == VINF_SUCCESS)
10033 {
10034 pu128Dst->au64[0] = u128Value.au64[0];
10035 pu128Dst->au64[1] = u128Value.au64[1];
10036 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10037 }
10038 return rc;
10039}
10040
10041
10042#ifdef IEM_WITH_SETJMP
10043/**
10044 * Stores a data dqword, longjmp on error.
10045 *
10046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10047 * @param iSegReg The index of the segment register to use for
10048 * this access. The base and limits are checked.
10049 * @param GCPtrMem The address of the guest memory.
10050 * @param u128Value The value to store.
10051 */
10052IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10053{
10054 /* The lazy approach for now... */
10055 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10056 pu128Dst->au64[0] = u128Value.au64[0];
10057 pu128Dst->au64[1] = u128Value.au64[1];
10058 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10059}
10060#endif
10061
10062
10063/**
10064 * Stores a data dqword, SSE aligned.
10065 *
10066 * @returns Strict VBox status code.
10067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10068 * @param iSegReg The index of the segment register to use for
10069 * this access. The base and limits are checked.
10070 * @param GCPtrMem The address of the guest memory.
10071 * @param u128Value The value to store.
10072 */
10073IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10074{
10075 /* The lazy approach for now... */
10076 if ( (GCPtrMem & 15)
10077 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10078 return iemRaiseGeneralProtectionFault0(pVCpu);
10079
10080 PRTUINT128U pu128Dst;
10081 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10082 if (rc == VINF_SUCCESS)
10083 {
10084 pu128Dst->au64[0] = u128Value.au64[0];
10085 pu128Dst->au64[1] = u128Value.au64[1];
10086 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10087 }
10088 return rc;
10089}
10090
10091
10092#ifdef IEM_WITH_SETJMP
10093/**
10094 * Stores a data dqword, SSE aligned.
10095 *
10096 * @returns Strict VBox status code.
10097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10098 * @param iSegReg The index of the segment register to use for
10099 * this access. The base and limits are checked.
10100 * @param GCPtrMem The address of the guest memory.
10101 * @param u128Value The value to store.
10102 */
10103DECL_NO_INLINE(IEM_STATIC, void)
10104iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10105{
10106 /* The lazy approach for now... */
10107 if ( (GCPtrMem & 15) == 0
10108 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10109 {
10110 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10111 pu128Dst->au64[0] = u128Value.au64[0];
10112 pu128Dst->au64[1] = u128Value.au64[1];
10113 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10114 return;
10115 }
10116
10117 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10118 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10119}
10120#endif
10121
10122
10123/**
10124 * Stores a data dqword.
10125 *
10126 * @returns Strict VBox status code.
10127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10128 * @param iSegReg The index of the segment register to use for
10129 * this access. The base and limits are checked.
10130 * @param GCPtrMem The address of the guest memory.
10131 * @param pu256Value Pointer to the value to store.
10132 */
10133IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10134{
10135 /* The lazy approach for now... */
10136 PRTUINT256U pu256Dst;
10137 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10138 if (rc == VINF_SUCCESS)
10139 {
10140 pu256Dst->au64[0] = pu256Value->au64[0];
10141 pu256Dst->au64[1] = pu256Value->au64[1];
10142 pu256Dst->au64[2] = pu256Value->au64[2];
10143 pu256Dst->au64[3] = pu256Value->au64[3];
10144 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10145 }
10146 return rc;
10147}
10148
10149
10150#ifdef IEM_WITH_SETJMP
10151/**
10152 * Stores a data dqword, longjmp on error.
10153 *
10154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10155 * @param iSegReg The index of the segment register to use for
10156 * this access. The base and limits are checked.
10157 * @param GCPtrMem The address of the guest memory.
10158 * @param pu256Value Pointer to the value to store.
10159 */
10160IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10161{
10162 /* The lazy approach for now... */
10163 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10164 pu256Dst->au64[0] = pu256Value->au64[0];
10165 pu256Dst->au64[1] = pu256Value->au64[1];
10166 pu256Dst->au64[2] = pu256Value->au64[2];
10167 pu256Dst->au64[3] = pu256Value->au64[3];
10168 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10169}
10170#endif
10171
10172
10173/**
10174 * Stores a data dqword, AVX aligned.
10175 *
10176 * @returns Strict VBox status code.
10177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10178 * @param iSegReg The index of the segment register to use for
10179 * this access. The base and limits are checked.
10180 * @param GCPtrMem The address of the guest memory.
10181 * @param pu256Value Pointer to the value to store.
10182 */
10183IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10184{
10185 /* The lazy approach for now... */
10186 if (GCPtrMem & 31)
10187 return iemRaiseGeneralProtectionFault0(pVCpu);
10188
10189 PRTUINT256U pu256Dst;
10190 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10191 if (rc == VINF_SUCCESS)
10192 {
10193 pu256Dst->au64[0] = pu256Value->au64[0];
10194 pu256Dst->au64[1] = pu256Value->au64[1];
10195 pu256Dst->au64[2] = pu256Value->au64[2];
10196 pu256Dst->au64[3] = pu256Value->au64[3];
10197 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10198 }
10199 return rc;
10200}
10201
10202
10203#ifdef IEM_WITH_SETJMP
10204/**
10205 * Stores a data dqword, AVX aligned.
10206 *
10207 * @returns Strict VBox status code.
10208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10209 * @param iSegReg The index of the segment register to use for
10210 * this access. The base and limits are checked.
10211 * @param GCPtrMem The address of the guest memory.
10212 * @param pu256Value Pointer to the value to store.
10213 */
10214DECL_NO_INLINE(IEM_STATIC, void)
10215iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10216{
10217 /* The lazy approach for now... */
10218 if ((GCPtrMem & 31) == 0)
10219 {
10220 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10221 pu256Dst->au64[0] = pu256Value->au64[0];
10222 pu256Dst->au64[1] = pu256Value->au64[1];
10223 pu256Dst->au64[2] = pu256Value->au64[2];
10224 pu256Dst->au64[3] = pu256Value->au64[3];
10225 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10226 return;
10227 }
10228
10229 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10230 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10231}
10232#endif
10233
10234
10235/**
10236 * Stores a descriptor register (sgdt, sidt).
10237 *
10238 * @returns Strict VBox status code.
10239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10240 * @param cbLimit The limit.
10241 * @param GCPtrBase The base address.
10242 * @param iSegReg The index of the segment register to use for
10243 * this access. The base and limits are checked.
10244 * @param GCPtrMem The address of the guest memory.
10245 */
10246IEM_STATIC VBOXSTRICTRC
10247iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10248{
10249 /*
10250 * The SIDT and SGDT instructions actually stores the data using two
10251 * independent writes. The instructions does not respond to opsize prefixes.
10252 */
10253 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10254 if (rcStrict == VINF_SUCCESS)
10255 {
10256 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10257 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10258 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10259 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10260 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10261 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10262 else
10263 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10264 }
10265 return rcStrict;
10266}
10267
10268
10269/**
10270 * Pushes a word onto the stack.
10271 *
10272 * @returns Strict VBox status code.
10273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10274 * @param u16Value The value to push.
10275 */
10276IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10277{
10278 /* Increment the stack pointer. */
10279 uint64_t uNewRsp;
10280 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10281
10282 /* Write the word the lazy way. */
10283 uint16_t *pu16Dst;
10284 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10285 if (rc == VINF_SUCCESS)
10286 {
10287 *pu16Dst = u16Value;
10288 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10289 }
10290
10291 /* Commit the new RSP value unless we an access handler made trouble. */
10292 if (rc == VINF_SUCCESS)
10293 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10294
10295 return rc;
10296}
10297
10298
10299/**
10300 * Pushes a dword onto the stack.
10301 *
10302 * @returns Strict VBox status code.
10303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10304 * @param u32Value The value to push.
10305 */
10306IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10307{
10308 /* Increment the stack pointer. */
10309 uint64_t uNewRsp;
10310 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10311
10312 /* Write the dword the lazy way. */
10313 uint32_t *pu32Dst;
10314 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10315 if (rc == VINF_SUCCESS)
10316 {
10317 *pu32Dst = u32Value;
10318 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10319 }
10320
10321 /* Commit the new RSP value unless we an access handler made trouble. */
10322 if (rc == VINF_SUCCESS)
10323 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10324
10325 return rc;
10326}
10327
10328
10329/**
10330 * Pushes a dword segment register value onto the stack.
10331 *
10332 * @returns Strict VBox status code.
10333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10334 * @param u32Value The value to push.
10335 */
10336IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10337{
10338 /* Increment the stack pointer. */
10339 uint64_t uNewRsp;
10340 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10341
10342 /* The intel docs talks about zero extending the selector register
10343 value. My actual intel CPU here might be zero extending the value
10344 but it still only writes the lower word... */
10345 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10346 * happens when crossing an electric page boundrary, is the high word checked
10347 * for write accessibility or not? Probably it is. What about segment limits?
10348 * It appears this behavior is also shared with trap error codes.
10349 *
10350 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10351 * ancient hardware when it actually did change. */
10352 uint16_t *pu16Dst;
10353 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10354 if (rc == VINF_SUCCESS)
10355 {
10356 *pu16Dst = (uint16_t)u32Value;
10357 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10358 }
10359
10360 /* Commit the new RSP value unless we an access handler made trouble. */
10361 if (rc == VINF_SUCCESS)
10362 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10363
10364 return rc;
10365}
10366
10367
10368/**
10369 * Pushes a qword onto the stack.
10370 *
10371 * @returns Strict VBox status code.
10372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10373 * @param u64Value The value to push.
10374 */
10375IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10376{
10377 /* Increment the stack pointer. */
10378 uint64_t uNewRsp;
10379 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10380
10381 /* Write the word the lazy way. */
10382 uint64_t *pu64Dst;
10383 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10384 if (rc == VINF_SUCCESS)
10385 {
10386 *pu64Dst = u64Value;
10387 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10388 }
10389
10390 /* Commit the new RSP value unless we an access handler made trouble. */
10391 if (rc == VINF_SUCCESS)
10392 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10393
10394 return rc;
10395}
10396
10397
10398/**
10399 * Pops a word from the stack.
10400 *
10401 * @returns Strict VBox status code.
10402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10403 * @param pu16Value Where to store the popped value.
10404 */
10405IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10406{
10407 /* Increment the stack pointer. */
10408 uint64_t uNewRsp;
10409 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10410
10411 /* Write the word the lazy way. */
10412 uint16_t const *pu16Src;
10413 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10414 if (rc == VINF_SUCCESS)
10415 {
10416 *pu16Value = *pu16Src;
10417 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10418
10419 /* Commit the new RSP value. */
10420 if (rc == VINF_SUCCESS)
10421 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10422 }
10423
10424 return rc;
10425}
10426
10427
10428/**
10429 * Pops a dword from the stack.
10430 *
10431 * @returns Strict VBox status code.
10432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10433 * @param pu32Value Where to store the popped value.
10434 */
10435IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10436{
10437 /* Increment the stack pointer. */
10438 uint64_t uNewRsp;
10439 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10440
10441 /* Write the word the lazy way. */
10442 uint32_t const *pu32Src;
10443 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10444 if (rc == VINF_SUCCESS)
10445 {
10446 *pu32Value = *pu32Src;
10447 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10448
10449 /* Commit the new RSP value. */
10450 if (rc == VINF_SUCCESS)
10451 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10452 }
10453
10454 return rc;
10455}
10456
10457
10458/**
10459 * Pops a qword from the stack.
10460 *
10461 * @returns Strict VBox status code.
10462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10463 * @param pu64Value Where to store the popped value.
10464 */
10465IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10466{
10467 /* Increment the stack pointer. */
10468 uint64_t uNewRsp;
10469 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10470
10471 /* Write the word the lazy way. */
10472 uint64_t const *pu64Src;
10473 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10474 if (rc == VINF_SUCCESS)
10475 {
10476 *pu64Value = *pu64Src;
10477 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10478
10479 /* Commit the new RSP value. */
10480 if (rc == VINF_SUCCESS)
10481 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10482 }
10483
10484 return rc;
10485}
10486
10487
10488/**
10489 * Pushes a word onto the stack, using a temporary stack pointer.
10490 *
10491 * @returns Strict VBox status code.
10492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10493 * @param u16Value The value to push.
10494 * @param pTmpRsp Pointer to the temporary stack pointer.
10495 */
10496IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10497{
10498 /* Increment the stack pointer. */
10499 RTUINT64U NewRsp = *pTmpRsp;
10500 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10501
10502 /* Write the word the lazy way. */
10503 uint16_t *pu16Dst;
10504 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10505 if (rc == VINF_SUCCESS)
10506 {
10507 *pu16Dst = u16Value;
10508 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10509 }
10510
10511 /* Commit the new RSP value unless we an access handler made trouble. */
10512 if (rc == VINF_SUCCESS)
10513 *pTmpRsp = NewRsp;
10514
10515 return rc;
10516}
10517
10518
10519/**
10520 * Pushes a dword onto the stack, using a temporary stack pointer.
10521 *
10522 * @returns Strict VBox status code.
10523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10524 * @param u32Value The value to push.
10525 * @param pTmpRsp Pointer to the temporary stack pointer.
10526 */
10527IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10528{
10529 /* Increment the stack pointer. */
10530 RTUINT64U NewRsp = *pTmpRsp;
10531 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10532
10533 /* Write the word the lazy way. */
10534 uint32_t *pu32Dst;
10535 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10536 if (rc == VINF_SUCCESS)
10537 {
10538 *pu32Dst = u32Value;
10539 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10540 }
10541
10542 /* Commit the new RSP value unless we an access handler made trouble. */
10543 if (rc == VINF_SUCCESS)
10544 *pTmpRsp = NewRsp;
10545
10546 return rc;
10547}
10548
10549
10550/**
10551 * Pushes a dword onto the stack, using a temporary stack pointer.
10552 *
10553 * @returns Strict VBox status code.
10554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10555 * @param u64Value The value to push.
10556 * @param pTmpRsp Pointer to the temporary stack pointer.
10557 */
10558IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10559{
10560 /* Increment the stack pointer. */
10561 RTUINT64U NewRsp = *pTmpRsp;
10562 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10563
10564 /* Write the word the lazy way. */
10565 uint64_t *pu64Dst;
10566 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10567 if (rc == VINF_SUCCESS)
10568 {
10569 *pu64Dst = u64Value;
10570 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10571 }
10572
10573 /* Commit the new RSP value unless we an access handler made trouble. */
10574 if (rc == VINF_SUCCESS)
10575 *pTmpRsp = NewRsp;
10576
10577 return rc;
10578}
10579
10580
10581/**
10582 * Pops a word from the stack, using a temporary stack pointer.
10583 *
10584 * @returns Strict VBox status code.
10585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10586 * @param pu16Value Where to store the popped value.
10587 * @param pTmpRsp Pointer to the temporary stack pointer.
10588 */
10589IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10590{
10591 /* Increment the stack pointer. */
10592 RTUINT64U NewRsp = *pTmpRsp;
10593 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10594
10595 /* Write the word the lazy way. */
10596 uint16_t const *pu16Src;
10597 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10598 if (rc == VINF_SUCCESS)
10599 {
10600 *pu16Value = *pu16Src;
10601 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10602
10603 /* Commit the new RSP value. */
10604 if (rc == VINF_SUCCESS)
10605 *pTmpRsp = NewRsp;
10606 }
10607
10608 return rc;
10609}
10610
10611
10612/**
10613 * Pops a dword from the stack, using a temporary stack pointer.
10614 *
10615 * @returns Strict VBox status code.
10616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10617 * @param pu32Value Where to store the popped value.
10618 * @param pTmpRsp Pointer to the temporary stack pointer.
10619 */
10620IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10621{
10622 /* Increment the stack pointer. */
10623 RTUINT64U NewRsp = *pTmpRsp;
10624 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10625
10626 /* Write the word the lazy way. */
10627 uint32_t const *pu32Src;
10628 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10629 if (rc == VINF_SUCCESS)
10630 {
10631 *pu32Value = *pu32Src;
10632 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10633
10634 /* Commit the new RSP value. */
10635 if (rc == VINF_SUCCESS)
10636 *pTmpRsp = NewRsp;
10637 }
10638
10639 return rc;
10640}
10641
10642
10643/**
10644 * Pops a qword from the stack, using a temporary stack pointer.
10645 *
10646 * @returns Strict VBox status code.
10647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10648 * @param pu64Value Where to store the popped value.
10649 * @param pTmpRsp Pointer to the temporary stack pointer.
10650 */
10651IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10652{
10653 /* Increment the stack pointer. */
10654 RTUINT64U NewRsp = *pTmpRsp;
10655 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10656
10657 /* Write the word the lazy way. */
10658 uint64_t const *pu64Src;
10659 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10660 if (rcStrict == VINF_SUCCESS)
10661 {
10662 *pu64Value = *pu64Src;
10663 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10664
10665 /* Commit the new RSP value. */
10666 if (rcStrict == VINF_SUCCESS)
10667 *pTmpRsp = NewRsp;
10668 }
10669
10670 return rcStrict;
10671}
10672
10673
10674/**
10675 * Begin a special stack push (used by interrupt, exceptions and such).
10676 *
10677 * This will raise \#SS or \#PF if appropriate.
10678 *
10679 * @returns Strict VBox status code.
10680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10681 * @param cbMem The number of bytes to push onto the stack.
10682 * @param ppvMem Where to return the pointer to the stack memory.
10683 * As with the other memory functions this could be
10684 * direct access or bounce buffered access, so
10685 * don't commit register until the commit call
10686 * succeeds.
10687 * @param puNewRsp Where to return the new RSP value. This must be
10688 * passed unchanged to
10689 * iemMemStackPushCommitSpecial().
10690 */
10691IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10692{
10693 Assert(cbMem < UINT8_MAX);
10694 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10695 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10696}
10697
10698
10699/**
10700 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10701 *
10702 * This will update the rSP.
10703 *
10704 * @returns Strict VBox status code.
10705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10706 * @param pvMem The pointer returned by
10707 * iemMemStackPushBeginSpecial().
10708 * @param uNewRsp The new RSP value returned by
10709 * iemMemStackPushBeginSpecial().
10710 */
10711IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10712{
10713 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10714 if (rcStrict == VINF_SUCCESS)
10715 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10716 return rcStrict;
10717}
10718
10719
10720/**
10721 * Begin a special stack pop (used by iret, retf and such).
10722 *
10723 * This will raise \#SS or \#PF if appropriate.
10724 *
10725 * @returns Strict VBox status code.
10726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10727 * @param cbMem The number of bytes to pop from the stack.
10728 * @param ppvMem Where to return the pointer to the stack memory.
10729 * @param puNewRsp Where to return the new RSP value. This must be
10730 * assigned to CPUMCTX::rsp manually some time
10731 * after iemMemStackPopDoneSpecial() has been
10732 * called.
10733 */
10734IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10735{
10736 Assert(cbMem < UINT8_MAX);
10737 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10738 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10739}
10740
10741
10742/**
10743 * Continue a special stack pop (used by iret and retf).
10744 *
10745 * This will raise \#SS or \#PF if appropriate.
10746 *
10747 * @returns Strict VBox status code.
10748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10749 * @param cbMem The number of bytes to pop from the stack.
10750 * @param ppvMem Where to return the pointer to the stack memory.
10751 * @param puNewRsp Where to return the new RSP value. This must be
10752 * assigned to CPUMCTX::rsp manually some time
10753 * after iemMemStackPopDoneSpecial() has been
10754 * called.
10755 */
10756IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10757{
10758 Assert(cbMem < UINT8_MAX);
10759 RTUINT64U NewRsp;
10760 NewRsp.u = *puNewRsp;
10761 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10762 *puNewRsp = NewRsp.u;
10763 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10764}
10765
10766
10767/**
10768 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10769 * iemMemStackPopContinueSpecial).
10770 *
10771 * The caller will manually commit the rSP.
10772 *
10773 * @returns Strict VBox status code.
10774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10775 * @param pvMem The pointer returned by
10776 * iemMemStackPopBeginSpecial() or
10777 * iemMemStackPopContinueSpecial().
10778 */
10779IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10780{
10781 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10782}
10783
10784
10785/**
10786 * Fetches a system table byte.
10787 *
10788 * @returns Strict VBox status code.
10789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10790 * @param pbDst Where to return the byte.
10791 * @param iSegReg The index of the segment register to use for
10792 * this access. The base and limits are checked.
10793 * @param GCPtrMem The address of the guest memory.
10794 */
10795IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10796{
10797 /* The lazy approach for now... */
10798 uint8_t const *pbSrc;
10799 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10800 if (rc == VINF_SUCCESS)
10801 {
10802 *pbDst = *pbSrc;
10803 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10804 }
10805 return rc;
10806}
10807
10808
10809/**
10810 * Fetches a system table word.
10811 *
10812 * @returns Strict VBox status code.
10813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10814 * @param pu16Dst Where to return the word.
10815 * @param iSegReg The index of the segment register to use for
10816 * this access. The base and limits are checked.
10817 * @param GCPtrMem The address of the guest memory.
10818 */
10819IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10820{
10821 /* The lazy approach for now... */
10822 uint16_t const *pu16Src;
10823 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10824 if (rc == VINF_SUCCESS)
10825 {
10826 *pu16Dst = *pu16Src;
10827 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10828 }
10829 return rc;
10830}
10831
10832
10833/**
10834 * Fetches a system table dword.
10835 *
10836 * @returns Strict VBox status code.
10837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10838 * @param pu32Dst Where to return the dword.
10839 * @param iSegReg The index of the segment register to use for
10840 * this access. The base and limits are checked.
10841 * @param GCPtrMem The address of the guest memory.
10842 */
10843IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10844{
10845 /* The lazy approach for now... */
10846 uint32_t const *pu32Src;
10847 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10848 if (rc == VINF_SUCCESS)
10849 {
10850 *pu32Dst = *pu32Src;
10851 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10852 }
10853 return rc;
10854}
10855
10856
10857/**
10858 * Fetches a system table qword.
10859 *
10860 * @returns Strict VBox status code.
10861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10862 * @param pu64Dst Where to return the qword.
10863 * @param iSegReg The index of the segment register to use for
10864 * this access. The base and limits are checked.
10865 * @param GCPtrMem The address of the guest memory.
10866 */
10867IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10868{
10869 /* The lazy approach for now... */
10870 uint64_t const *pu64Src;
10871 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10872 if (rc == VINF_SUCCESS)
10873 {
10874 *pu64Dst = *pu64Src;
10875 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10876 }
10877 return rc;
10878}
10879
10880
10881/**
10882 * Fetches a descriptor table entry with caller specified error code.
10883 *
10884 * @returns Strict VBox status code.
10885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10886 * @param pDesc Where to return the descriptor table entry.
10887 * @param uSel The selector which table entry to fetch.
10888 * @param uXcpt The exception to raise on table lookup error.
10889 * @param uErrorCode The error code associated with the exception.
10890 */
10891IEM_STATIC VBOXSTRICTRC
10892iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10893{
10894 AssertPtr(pDesc);
10895 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10896
10897 /** @todo did the 286 require all 8 bytes to be accessible? */
10898 /*
10899 * Get the selector table base and check bounds.
10900 */
10901 RTGCPTR GCPtrBase;
10902 if (uSel & X86_SEL_LDT)
10903 {
10904 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10905 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10906 {
10907 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10908 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10909 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10910 uErrorCode, 0);
10911 }
10912
10913 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10914 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10915 }
10916 else
10917 {
10918 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10919 {
10920 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10921 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10922 uErrorCode, 0);
10923 }
10924 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10925 }
10926
10927 /*
10928 * Read the legacy descriptor and maybe the long mode extensions if
10929 * required.
10930 */
10931 VBOXSTRICTRC rcStrict;
10932 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10933 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10934 else
10935 {
10936 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10937 if (rcStrict == VINF_SUCCESS)
10938 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10939 if (rcStrict == VINF_SUCCESS)
10940 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10941 if (rcStrict == VINF_SUCCESS)
10942 pDesc->Legacy.au16[3] = 0;
10943 else
10944 return rcStrict;
10945 }
10946
10947 if (rcStrict == VINF_SUCCESS)
10948 {
10949 if ( !IEM_IS_LONG_MODE(pVCpu)
10950 || pDesc->Legacy.Gen.u1DescType)
10951 pDesc->Long.au64[1] = 0;
10952 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10953 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10954 else
10955 {
10956 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10957 /** @todo is this the right exception? */
10958 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10959 }
10960 }
10961 return rcStrict;
10962}
10963
10964
10965/**
10966 * Fetches a descriptor table entry.
10967 *
10968 * @returns Strict VBox status code.
10969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10970 * @param pDesc Where to return the descriptor table entry.
10971 * @param uSel The selector which table entry to fetch.
10972 * @param uXcpt The exception to raise on table lookup error.
10973 */
10974IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10975{
10976 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10977}
10978
10979
10980/**
10981 * Fakes a long mode stack selector for SS = 0.
10982 *
10983 * @param pDescSs Where to return the fake stack descriptor.
10984 * @param uDpl The DPL we want.
10985 */
10986IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10987{
10988 pDescSs->Long.au64[0] = 0;
10989 pDescSs->Long.au64[1] = 0;
10990 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10991 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10992 pDescSs->Long.Gen.u2Dpl = uDpl;
10993 pDescSs->Long.Gen.u1Present = 1;
10994 pDescSs->Long.Gen.u1Long = 1;
10995}
10996
10997
10998/**
10999 * Marks the selector descriptor as accessed (only non-system descriptors).
11000 *
11001 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11002 * will therefore skip the limit checks.
11003 *
11004 * @returns Strict VBox status code.
11005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11006 * @param uSel The selector.
11007 */
11008IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11009{
11010 /*
11011 * Get the selector table base and calculate the entry address.
11012 */
11013 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11014 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11015 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11016 GCPtr += uSel & X86_SEL_MASK;
11017
11018 /*
11019 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11020 * ugly stuff to avoid this. This will make sure it's an atomic access
11021 * as well more or less remove any question about 8-bit or 32-bit accesss.
11022 */
11023 VBOXSTRICTRC rcStrict;
11024 uint32_t volatile *pu32;
11025 if ((GCPtr & 3) == 0)
11026 {
11027 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11028 GCPtr += 2 + 2;
11029 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11030 if (rcStrict != VINF_SUCCESS)
11031 return rcStrict;
11032 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11033 }
11034 else
11035 {
11036 /* The misaligned GDT/LDT case, map the whole thing. */
11037 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11038 if (rcStrict != VINF_SUCCESS)
11039 return rcStrict;
11040 switch ((uintptr_t)pu32 & 3)
11041 {
11042 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11043 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11044 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11045 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11046 }
11047 }
11048
11049 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11050}
11051
11052/** @} */
11053
11054
11055/*
11056 * Include the C/C++ implementation of instruction.
11057 */
11058#include "IEMAllCImpl.cpp.h"
11059
11060
11061
11062/** @name "Microcode" macros.
11063 *
11064 * The idea is that we should be able to use the same code to interpret
11065 * instructions as well as recompiler instructions. Thus this obfuscation.
11066 *
11067 * @{
11068 */
11069#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11070#define IEM_MC_END() }
11071#define IEM_MC_PAUSE() do {} while (0)
11072#define IEM_MC_CONTINUE() do {} while (0)
11073
11074/** Internal macro. */
11075#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11076 do \
11077 { \
11078 VBOXSTRICTRC rcStrict2 = a_Expr; \
11079 if (rcStrict2 != VINF_SUCCESS) \
11080 return rcStrict2; \
11081 } while (0)
11082
11083
11084#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11085#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11086#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11087#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11088#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11089#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11090#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11091#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11092#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11093 do { \
11094 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11095 return iemRaiseDeviceNotAvailable(pVCpu); \
11096 } while (0)
11097#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11098 do { \
11099 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11100 return iemRaiseDeviceNotAvailable(pVCpu); \
11101 } while (0)
11102#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11103 do { \
11104 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11105 return iemRaiseMathFault(pVCpu); \
11106 } while (0)
11107#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11108 do { \
11109 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11110 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11111 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11112 return iemRaiseUndefinedOpcode(pVCpu); \
11113 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11114 return iemRaiseDeviceNotAvailable(pVCpu); \
11115 } while (0)
11116#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11117 do { \
11118 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11119 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11120 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11121 return iemRaiseUndefinedOpcode(pVCpu); \
11122 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11123 return iemRaiseDeviceNotAvailable(pVCpu); \
11124 } while (0)
11125#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11126 do { \
11127 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11128 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11129 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11130 return iemRaiseUndefinedOpcode(pVCpu); \
11131 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11132 return iemRaiseDeviceNotAvailable(pVCpu); \
11133 } while (0)
11134#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11135 do { \
11136 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11137 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11138 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11139 return iemRaiseUndefinedOpcode(pVCpu); \
11140 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11141 return iemRaiseDeviceNotAvailable(pVCpu); \
11142 } while (0)
11143#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11144 do { \
11145 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11146 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11147 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11148 return iemRaiseUndefinedOpcode(pVCpu); \
11149 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11150 return iemRaiseDeviceNotAvailable(pVCpu); \
11151 } while (0)
11152#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11153 do { \
11154 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11155 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11156 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11157 return iemRaiseUndefinedOpcode(pVCpu); \
11158 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11159 return iemRaiseDeviceNotAvailable(pVCpu); \
11160 } while (0)
11161#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11162 do { \
11163 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11164 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11165 return iemRaiseUndefinedOpcode(pVCpu); \
11166 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11167 return iemRaiseDeviceNotAvailable(pVCpu); \
11168 } while (0)
11169#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11170 do { \
11171 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11172 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11173 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11174 return iemRaiseUndefinedOpcode(pVCpu); \
11175 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11176 return iemRaiseDeviceNotAvailable(pVCpu); \
11177 } while (0)
11178#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11179 do { \
11180 if (pVCpu->iem.s.uCpl != 0) \
11181 return iemRaiseGeneralProtectionFault0(pVCpu); \
11182 } while (0)
11183#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11184 do { \
11185 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11186 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11187 } while (0)
11188#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11189 do { \
11190 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11191 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11192 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11193 return iemRaiseUndefinedOpcode(pVCpu); \
11194 } while (0)
11195#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11196 do { \
11197 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11198 return iemRaiseGeneralProtectionFault0(pVCpu); \
11199 } while (0)
11200
11201
11202#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11203#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11204#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11205#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11206#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11207#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11208#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11209 uint32_t a_Name; \
11210 uint32_t *a_pName = &a_Name
11211#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11212 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11213
11214#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11215#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11216
11217#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11218#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11219#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11220#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11221#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11222#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11223#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11224#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11225#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11226#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11227#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11228#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11229#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11230#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11231#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11232#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11233#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11234#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11235 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11236 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11237 } while (0)
11238#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11239 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11240 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11241 } while (0)
11242#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11243 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11244 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11245 } while (0)
11246/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11247#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11248 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11249 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11250 } while (0)
11251#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11252 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11253 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11254 } while (0)
11255/** @note Not for IOPL or IF testing or modification. */
11256#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11257#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11258#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11259#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11260
11261#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11262#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11263#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11264#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11265#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11266#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11267#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11268#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11269#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11270#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11271/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11272#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11273 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11274 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11275 } while (0)
11276#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11277 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11278 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11279 } while (0)
11280#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11281 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11282
11283
11284#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11285#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11286/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11287 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11288#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11289#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11290/** @note Not for IOPL or IF testing or modification. */
11291#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11292
11293#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11294#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11295#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11296 do { \
11297 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11298 *pu32Reg += (a_u32Value); \
11299 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11300 } while (0)
11301#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11302
11303#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11304#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11305#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11306 do { \
11307 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11308 *pu32Reg -= (a_u32Value); \
11309 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11310 } while (0)
11311#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11312#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11313
11314#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11315#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11316#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11317#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11318#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11319#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11320#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11321
11322#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11323#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11324#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11325#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11326
11327#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11328#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11329#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11330
11331#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11332#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11333#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11334
11335#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11336#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11337#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11338
11339#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11340#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11341#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11342
11343#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11344
11345#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11346
11347#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11348#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11349#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11350 do { \
11351 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11352 *pu32Reg &= (a_u32Value); \
11353 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11354 } while (0)
11355#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11356
11357#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11358#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11359#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11360 do { \
11361 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11362 *pu32Reg |= (a_u32Value); \
11363 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11364 } while (0)
11365#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11366
11367
11368/** @note Not for IOPL or IF modification. */
11369#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11370/** @note Not for IOPL or IF modification. */
11371#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11372/** @note Not for IOPL or IF modification. */
11373#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11374
11375#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11376
11377/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11378#define IEM_MC_FPU_TO_MMX_MODE() do { \
11379 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11380 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11381 } while (0)
11382
11383/** Switches the FPU state from MMX mode (FTW=0xffff). */
11384#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11385 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11386 } while (0)
11387
11388#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11389 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11390#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11391 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11392#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11393 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11394 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11395 } while (0)
11396#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11397 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11398 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11399 } while (0)
11400#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11401 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11402#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11403 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11404#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11405 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11406
11407#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11408 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11409 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11410 } while (0)
11411#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11412 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11413#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11414 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11415#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11416 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11417#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11418 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11419 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11420 } while (0)
11421#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11422 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11423#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11424 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11425 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11426 } while (0)
11427#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11428 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11429#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11430 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11431 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11432 } while (0)
11433#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11434 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11435#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11436 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11437#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11438 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11439#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11440 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11441#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11442 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11443 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11444 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11445 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11446 } while (0)
11447
11448#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11449 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11450 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11451 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11452 } while (0)
11453#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11454 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11455 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11456 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11457 } while (0)
11458#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11459 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11460 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11461 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11462 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11463 } while (0)
11464#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11465 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11466 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11467 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11468 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11469 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11470 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11471 } while (0)
11472
11473#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11474#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11475 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11476 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11477 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11478 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11479 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11480 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11481 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11482 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11483 } while (0)
11484#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11485 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11486 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11487 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11488 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11489 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11490 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11491 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11492 } while (0)
11493#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11494 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11495 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11496 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11497 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11498 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11499 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11500 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11501 } while (0)
11502#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11503 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11504 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11505 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11506 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11507 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11508 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11509 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11510 } while (0)
11511
11512#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11513 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11514#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11515 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11516#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11517 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11518#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11519 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11520 uintptr_t const iYRegTmp = (a_iYReg); \
11521 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11522 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11523 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11524 } while (0)
11525
11526#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11527 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11528 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11529 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11530 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11531 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11532 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11533 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11534 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11535 } while (0)
11536#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11537 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11538 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11539 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11540 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11541 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11542 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11543 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11544 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11545 } while (0)
11546#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11547 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11548 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11549 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11550 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11551 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11552 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11553 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11554 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11555 } while (0)
11556
11557#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11558 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11559 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11560 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11561 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11562 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11563 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11564 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11565 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11566 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11567 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11568 } while (0)
11569#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11570 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11571 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11572 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11573 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11574 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11575 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11576 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11577 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11578 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11579 } while (0)
11580#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11581 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11582 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11583 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11584 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11585 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11586 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11587 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11588 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11589 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11590 } while (0)
11591#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11592 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11593 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11594 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11595 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11596 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11597 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11598 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11599 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11600 } while (0)
11601
11602#ifndef IEM_WITH_SETJMP
11603# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11604 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11605# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11606 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11607# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11608 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11609#else
11610# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11611 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11612# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11613 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11614# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11615 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11616#endif
11617
11618#ifndef IEM_WITH_SETJMP
11619# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11620 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11621# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11622 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11623# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11624 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11625#else
11626# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11627 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11628# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11629 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11630# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11631 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11632#endif
11633
11634#ifndef IEM_WITH_SETJMP
11635# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11636 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11637# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11638 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11639# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11640 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11641#else
11642# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11643 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11644# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11645 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11646# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11647 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11648#endif
11649
11650#ifdef SOME_UNUSED_FUNCTION
11651# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11652 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11653#endif
11654
11655#ifndef IEM_WITH_SETJMP
11656# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11657 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11658# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11659 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11660# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11661 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11662# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11663 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11664#else
11665# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11666 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11667# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11668 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11669# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11670 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11671# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11672 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11673#endif
11674
11675#ifndef IEM_WITH_SETJMP
11676# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11678# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11680# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11682#else
11683# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11684 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11685# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11686 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11687# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11688 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11689#endif
11690
11691#ifndef IEM_WITH_SETJMP
11692# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11694# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11696#else
11697# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11698 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11699# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11700 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11701#endif
11702
11703#ifndef IEM_WITH_SETJMP
11704# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11705 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11706# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11707 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11708#else
11709# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11710 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11711# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11712 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11713#endif
11714
11715
11716
11717#ifndef IEM_WITH_SETJMP
11718# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11719 do { \
11720 uint8_t u8Tmp; \
11721 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11722 (a_u16Dst) = u8Tmp; \
11723 } while (0)
11724# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11725 do { \
11726 uint8_t u8Tmp; \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11728 (a_u32Dst) = u8Tmp; \
11729 } while (0)
11730# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11731 do { \
11732 uint8_t u8Tmp; \
11733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11734 (a_u64Dst) = u8Tmp; \
11735 } while (0)
11736# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11737 do { \
11738 uint16_t u16Tmp; \
11739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11740 (a_u32Dst) = u16Tmp; \
11741 } while (0)
11742# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11743 do { \
11744 uint16_t u16Tmp; \
11745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11746 (a_u64Dst) = u16Tmp; \
11747 } while (0)
11748# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11749 do { \
11750 uint32_t u32Tmp; \
11751 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11752 (a_u64Dst) = u32Tmp; \
11753 } while (0)
11754#else /* IEM_WITH_SETJMP */
11755# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11756 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11757# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11758 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11759# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11760 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11761# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11762 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11763# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11764 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11765# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11766 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11767#endif /* IEM_WITH_SETJMP */
11768
11769#ifndef IEM_WITH_SETJMP
11770# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11771 do { \
11772 uint8_t u8Tmp; \
11773 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11774 (a_u16Dst) = (int8_t)u8Tmp; \
11775 } while (0)
11776# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11777 do { \
11778 uint8_t u8Tmp; \
11779 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11780 (a_u32Dst) = (int8_t)u8Tmp; \
11781 } while (0)
11782# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11783 do { \
11784 uint8_t u8Tmp; \
11785 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11786 (a_u64Dst) = (int8_t)u8Tmp; \
11787 } while (0)
11788# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11789 do { \
11790 uint16_t u16Tmp; \
11791 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11792 (a_u32Dst) = (int16_t)u16Tmp; \
11793 } while (0)
11794# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11795 do { \
11796 uint16_t u16Tmp; \
11797 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11798 (a_u64Dst) = (int16_t)u16Tmp; \
11799 } while (0)
11800# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11801 do { \
11802 uint32_t u32Tmp; \
11803 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11804 (a_u64Dst) = (int32_t)u32Tmp; \
11805 } while (0)
11806#else /* IEM_WITH_SETJMP */
11807# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11808 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11809# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11810 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11811# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11812 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11813# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11814 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11815# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11816 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11817# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11818 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11819#endif /* IEM_WITH_SETJMP */
11820
11821#ifndef IEM_WITH_SETJMP
11822# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11823 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11824# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11825 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11826# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11827 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11828# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11829 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11830#else
11831# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11832 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11833# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11834 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11835# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11836 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11837# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11838 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11839#endif
11840
11841#ifndef IEM_WITH_SETJMP
11842# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11843 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11844# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11845 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11846# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11847 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11848# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11849 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11850#else
11851# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11852 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11853# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11854 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11855# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11856 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11857# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11858 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11859#endif
11860
11861#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11862#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11863#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11864#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11865#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11866#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11867#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11868 do { \
11869 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11870 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11871 } while (0)
11872
11873#ifndef IEM_WITH_SETJMP
11874# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11875 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11876# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11877 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11878#else
11879# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11880 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11881# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11882 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11883#endif
11884
11885#ifndef IEM_WITH_SETJMP
11886# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11887 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11888# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11889 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11890#else
11891# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11892 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11893# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11894 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11895#endif
11896
11897
11898#define IEM_MC_PUSH_U16(a_u16Value) \
11899 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11900#define IEM_MC_PUSH_U32(a_u32Value) \
11901 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11902#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11904#define IEM_MC_PUSH_U64(a_u64Value) \
11905 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11906
11907#define IEM_MC_POP_U16(a_pu16Value) \
11908 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11909#define IEM_MC_POP_U32(a_pu32Value) \
11910 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11911#define IEM_MC_POP_U64(a_pu64Value) \
11912 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11913
11914/** Maps guest memory for direct or bounce buffered access.
11915 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11916 * @remarks May return.
11917 */
11918#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11919 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11920
11921/** Maps guest memory for direct or bounce buffered access.
11922 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11923 * @remarks May return.
11924 */
11925#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11926 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11927
11928/** Commits the memory and unmaps the guest memory.
11929 * @remarks May return.
11930 */
11931#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11932 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11933
11934/** Commits the memory and unmaps the guest memory unless the FPU status word
11935 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11936 * that would cause FLD not to store.
11937 *
11938 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11939 * store, while \#P will not.
11940 *
11941 * @remarks May in theory return - for now.
11942 */
11943#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11944 do { \
11945 if ( !(a_u16FSW & X86_FSW_ES) \
11946 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11947 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11948 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11949 } while (0)
11950
11951/** Calculate efficient address from R/M. */
11952#ifndef IEM_WITH_SETJMP
11953# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11954 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11955#else
11956# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11957 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11958#endif
11959
11960#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11961#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11962#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11963#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11964#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11965#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11966#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11967
11968/**
11969 * Defers the rest of the instruction emulation to a C implementation routine
11970 * and returns, only taking the standard parameters.
11971 *
11972 * @param a_pfnCImpl The pointer to the C routine.
11973 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11974 */
11975#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11976
11977/**
11978 * Defers the rest of instruction emulation to a C implementation routine and
11979 * returns, taking one argument in addition to the standard ones.
11980 *
11981 * @param a_pfnCImpl The pointer to the C routine.
11982 * @param a0 The argument.
11983 */
11984#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11985
11986/**
11987 * Defers the rest of the instruction emulation to a C implementation routine
11988 * and returns, taking two arguments in addition to the standard ones.
11989 *
11990 * @param a_pfnCImpl The pointer to the C routine.
11991 * @param a0 The first extra argument.
11992 * @param a1 The second extra argument.
11993 */
11994#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11995
11996/**
11997 * Defers the rest of the instruction emulation to a C implementation routine
11998 * and returns, taking three arguments in addition to the standard ones.
11999 *
12000 * @param a_pfnCImpl The pointer to the C routine.
12001 * @param a0 The first extra argument.
12002 * @param a1 The second extra argument.
12003 * @param a2 The third extra argument.
12004 */
12005#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12006
12007/**
12008 * Defers the rest of the instruction emulation to a C implementation routine
12009 * and returns, taking four arguments in addition to the standard ones.
12010 *
12011 * @param a_pfnCImpl The pointer to the C routine.
12012 * @param a0 The first extra argument.
12013 * @param a1 The second extra argument.
12014 * @param a2 The third extra argument.
12015 * @param a3 The fourth extra argument.
12016 */
12017#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12018
12019/**
12020 * Defers the rest of the instruction emulation to a C implementation routine
12021 * and returns, taking two arguments in addition to the standard ones.
12022 *
12023 * @param a_pfnCImpl The pointer to the C routine.
12024 * @param a0 The first extra argument.
12025 * @param a1 The second extra argument.
12026 * @param a2 The third extra argument.
12027 * @param a3 The fourth extra argument.
12028 * @param a4 The fifth extra argument.
12029 */
12030#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12031
12032/**
12033 * Defers the entire instruction emulation to a C implementation routine and
12034 * returns, only taking the standard parameters.
12035 *
12036 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12037 *
12038 * @param a_pfnCImpl The pointer to the C routine.
12039 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12040 */
12041#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12042
12043/**
12044 * Defers the entire instruction emulation to a C implementation routine and
12045 * returns, taking one argument in addition to the standard ones.
12046 *
12047 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12048 *
12049 * @param a_pfnCImpl The pointer to the C routine.
12050 * @param a0 The argument.
12051 */
12052#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12053
12054/**
12055 * Defers the entire instruction emulation to a C implementation routine and
12056 * returns, taking two arguments in addition to the standard ones.
12057 *
12058 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12059 *
12060 * @param a_pfnCImpl The pointer to the C routine.
12061 * @param a0 The first extra argument.
12062 * @param a1 The second extra argument.
12063 */
12064#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12065
12066/**
12067 * Defers the entire instruction emulation to a C implementation routine and
12068 * returns, taking three arguments in addition to the standard ones.
12069 *
12070 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12071 *
12072 * @param a_pfnCImpl The pointer to the C routine.
12073 * @param a0 The first extra argument.
12074 * @param a1 The second extra argument.
12075 * @param a2 The third extra argument.
12076 */
12077#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12078
12079/**
12080 * Calls a FPU assembly implementation taking one visible argument.
12081 *
12082 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12083 * @param a0 The first extra argument.
12084 */
12085#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12086 do { \
12087 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12088 } while (0)
12089
12090/**
12091 * Calls a FPU assembly implementation taking two visible arguments.
12092 *
12093 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12094 * @param a0 The first extra argument.
12095 * @param a1 The second extra argument.
12096 */
12097#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12098 do { \
12099 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12100 } while (0)
12101
12102/**
12103 * Calls a FPU assembly implementation taking three visible arguments.
12104 *
12105 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12106 * @param a0 The first extra argument.
12107 * @param a1 The second extra argument.
12108 * @param a2 The third extra argument.
12109 */
12110#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12111 do { \
12112 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12113 } while (0)
12114
12115#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12116 do { \
12117 (a_FpuData).FSW = (a_FSW); \
12118 (a_FpuData).r80Result = *(a_pr80Value); \
12119 } while (0)
12120
12121/** Pushes FPU result onto the stack. */
12122#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12123 iemFpuPushResult(pVCpu, &a_FpuData)
12124/** Pushes FPU result onto the stack and sets the FPUDP. */
12125#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12126 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12127
12128/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12129#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12130 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12131
12132/** Stores FPU result in a stack register. */
12133#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12134 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12135/** Stores FPU result in a stack register and pops the stack. */
12136#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12137 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12138/** Stores FPU result in a stack register and sets the FPUDP. */
12139#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12140 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12141/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12142 * stack. */
12143#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12144 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12145
12146/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12147#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12148 iemFpuUpdateOpcodeAndIp(pVCpu)
12149/** Free a stack register (for FFREE and FFREEP). */
12150#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12151 iemFpuStackFree(pVCpu, a_iStReg)
12152/** Increment the FPU stack pointer. */
12153#define IEM_MC_FPU_STACK_INC_TOP() \
12154 iemFpuStackIncTop(pVCpu)
12155/** Decrement the FPU stack pointer. */
12156#define IEM_MC_FPU_STACK_DEC_TOP() \
12157 iemFpuStackDecTop(pVCpu)
12158
12159/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12160#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12161 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12162/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12163#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12164 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12165/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12166#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12167 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12168/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12169#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12170 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12171/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12172 * stack. */
12173#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12174 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12175/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12176#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12177 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12178
12179/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12180#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12181 iemFpuStackUnderflow(pVCpu, a_iStDst)
12182/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12183 * stack. */
12184#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12185 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12186/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12187 * FPUDS. */
12188#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12189 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12190/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12191 * FPUDS. Pops stack. */
12192#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12193 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12194/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12195 * stack twice. */
12196#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12197 iemFpuStackUnderflowThenPopPop(pVCpu)
12198/** Raises a FPU stack underflow exception for an instruction pushing a result
12199 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12200#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12201 iemFpuStackPushUnderflow(pVCpu)
12202/** Raises a FPU stack underflow exception for an instruction pushing a result
12203 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12204#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12205 iemFpuStackPushUnderflowTwo(pVCpu)
12206
12207/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12208 * FPUIP, FPUCS and FOP. */
12209#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12210 iemFpuStackPushOverflow(pVCpu)
12211/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12212 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12213#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12214 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12215/** Prepares for using the FPU state.
12216 * Ensures that we can use the host FPU in the current context (RC+R0.
12217 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12218#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12219/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12220#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12221/** Actualizes the guest FPU state so it can be accessed and modified. */
12222#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12223
12224/** Prepares for using the SSE state.
12225 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12226 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12227#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12228/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12229#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12230/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12231#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12232
12233/** Prepares for using the AVX state.
12234 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12235 * Ensures the guest AVX state in the CPUMCTX is up to date.
12236 * @note This will include the AVX512 state too when support for it is added
12237 * due to the zero extending feature of VEX instruction. */
12238#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12239/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12240#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12241/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12242#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12243
12244/**
12245 * Calls a MMX assembly implementation taking two visible arguments.
12246 *
12247 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12248 * @param a0 The first extra argument.
12249 * @param a1 The second extra argument.
12250 */
12251#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12252 do { \
12253 IEM_MC_PREPARE_FPU_USAGE(); \
12254 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12255 } while (0)
12256
12257/**
12258 * Calls a MMX assembly implementation taking three visible arguments.
12259 *
12260 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12261 * @param a0 The first extra argument.
12262 * @param a1 The second extra argument.
12263 * @param a2 The third extra argument.
12264 */
12265#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12266 do { \
12267 IEM_MC_PREPARE_FPU_USAGE(); \
12268 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12269 } while (0)
12270
12271
12272/**
12273 * Calls a SSE assembly implementation taking two visible arguments.
12274 *
12275 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12276 * @param a0 The first extra argument.
12277 * @param a1 The second extra argument.
12278 */
12279#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12280 do { \
12281 IEM_MC_PREPARE_SSE_USAGE(); \
12282 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12283 } while (0)
12284
12285/**
12286 * Calls a SSE assembly implementation taking three visible arguments.
12287 *
12288 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12289 * @param a0 The first extra argument.
12290 * @param a1 The second extra argument.
12291 * @param a2 The third extra argument.
12292 */
12293#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12294 do { \
12295 IEM_MC_PREPARE_SSE_USAGE(); \
12296 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12297 } while (0)
12298
12299
12300/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12301 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12302#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12303 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12304
12305/**
12306 * Calls a AVX assembly implementation taking two visible arguments.
12307 *
12308 * There is one implicit zero'th argument, a pointer to the extended state.
12309 *
12310 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12311 * @param a1 The first extra argument.
12312 * @param a2 The second extra argument.
12313 */
12314#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12315 do { \
12316 IEM_MC_PREPARE_AVX_USAGE(); \
12317 a_pfnAImpl(pXState, (a1), (a2)); \
12318 } while (0)
12319
12320/**
12321 * Calls a AVX assembly implementation taking three visible arguments.
12322 *
12323 * There is one implicit zero'th argument, a pointer to the extended state.
12324 *
12325 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12326 * @param a1 The first extra argument.
12327 * @param a2 The second extra argument.
12328 * @param a3 The third extra argument.
12329 */
12330#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12331 do { \
12332 IEM_MC_PREPARE_AVX_USAGE(); \
12333 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12334 } while (0)
12335
12336/** @note Not for IOPL or IF testing. */
12337#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12338/** @note Not for IOPL or IF testing. */
12339#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12340/** @note Not for IOPL or IF testing. */
12341#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12342/** @note Not for IOPL or IF testing. */
12343#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12344/** @note Not for IOPL or IF testing. */
12345#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12346 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12347 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12348/** @note Not for IOPL or IF testing. */
12349#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12350 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12351 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12352/** @note Not for IOPL or IF testing. */
12353#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12354 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12355 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12356 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12357/** @note Not for IOPL or IF testing. */
12358#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12359 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12360 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12361 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12362#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12363#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12364#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12365/** @note Not for IOPL or IF testing. */
12366#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12367 if ( pVCpu->cpum.GstCtx.cx != 0 \
12368 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12369/** @note Not for IOPL or IF testing. */
12370#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12371 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12372 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12373/** @note Not for IOPL or IF testing. */
12374#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12375 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12376 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12377/** @note Not for IOPL or IF testing. */
12378#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12379 if ( pVCpu->cpum.GstCtx.cx != 0 \
12380 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12381/** @note Not for IOPL or IF testing. */
12382#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12383 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12384 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12385/** @note Not for IOPL or IF testing. */
12386#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12387 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12388 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12389#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12390#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12391
12392#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12393 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12394#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12395 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12396#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12397 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12398#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12399 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12400#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12401 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12402#define IEM_MC_IF_FCW_IM() \
12403 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12404
12405#define IEM_MC_ELSE() } else {
12406#define IEM_MC_ENDIF() } do {} while (0)
12407
12408/** @} */
12409
12410
12411/** @name Opcode Debug Helpers.
12412 * @{
12413 */
12414#ifdef VBOX_WITH_STATISTICS
12415# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12416#else
12417# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12418#endif
12419
12420#ifdef DEBUG
12421# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12422 do { \
12423 IEMOP_INC_STATS(a_Stats); \
12424 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12425 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12426 } while (0)
12427
12428# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12429 do { \
12430 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12431 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12432 (void)RT_CONCAT(OP_,a_Upper); \
12433 (void)(a_fDisHints); \
12434 (void)(a_fIemHints); \
12435 } while (0)
12436
12437# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12438 do { \
12439 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12440 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12441 (void)RT_CONCAT(OP_,a_Upper); \
12442 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12443 (void)(a_fDisHints); \
12444 (void)(a_fIemHints); \
12445 } while (0)
12446
12447# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12448 do { \
12449 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12450 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12451 (void)RT_CONCAT(OP_,a_Upper); \
12452 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12453 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12454 (void)(a_fDisHints); \
12455 (void)(a_fIemHints); \
12456 } while (0)
12457
12458# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12459 do { \
12460 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12461 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12462 (void)RT_CONCAT(OP_,a_Upper); \
12463 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12464 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12465 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12466 (void)(a_fDisHints); \
12467 (void)(a_fIemHints); \
12468 } while (0)
12469
12470# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12471 do { \
12472 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12473 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12474 (void)RT_CONCAT(OP_,a_Upper); \
12475 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12476 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12477 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12478 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12479 (void)(a_fDisHints); \
12480 (void)(a_fIemHints); \
12481 } while (0)
12482
12483#else
12484# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12485
12486# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12487 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12488# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12489 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12490# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12491 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12492# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12493 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12494# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12495 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12496
12497#endif
12498
12499#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12500 IEMOP_MNEMONIC0EX(a_Lower, \
12501 #a_Lower, \
12502 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12503#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12504 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12505 #a_Lower " " #a_Op1, \
12506 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12507#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12508 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12509 #a_Lower " " #a_Op1 "," #a_Op2, \
12510 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12511#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12512 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12513 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12514 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12515#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12516 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12517 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12518 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12519
12520/** @} */
12521
12522
12523/** @name Opcode Helpers.
12524 * @{
12525 */
12526
12527#ifdef IN_RING3
12528# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12529 do { \
12530 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12531 else \
12532 { \
12533 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12534 return IEMOP_RAISE_INVALID_OPCODE(); \
12535 } \
12536 } while (0)
12537#else
12538# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12539 do { \
12540 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12541 else return IEMOP_RAISE_INVALID_OPCODE(); \
12542 } while (0)
12543#endif
12544
12545/** The instruction requires a 186 or later. */
12546#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12547# define IEMOP_HLP_MIN_186() do { } while (0)
12548#else
12549# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12550#endif
12551
12552/** The instruction requires a 286 or later. */
12553#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12554# define IEMOP_HLP_MIN_286() do { } while (0)
12555#else
12556# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12557#endif
12558
12559/** The instruction requires a 386 or later. */
12560#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12561# define IEMOP_HLP_MIN_386() do { } while (0)
12562#else
12563# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12564#endif
12565
12566/** The instruction requires a 386 or later if the given expression is true. */
12567#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12568# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12569#else
12570# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12571#endif
12572
12573/** The instruction requires a 486 or later. */
12574#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12575# define IEMOP_HLP_MIN_486() do { } while (0)
12576#else
12577# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12578#endif
12579
12580/** The instruction requires a Pentium (586) or later. */
12581#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12582# define IEMOP_HLP_MIN_586() do { } while (0)
12583#else
12584# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12585#endif
12586
12587/** The instruction requires a PentiumPro (686) or later. */
12588#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12589# define IEMOP_HLP_MIN_686() do { } while (0)
12590#else
12591# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12592#endif
12593
12594
12595/** The instruction raises an \#UD in real and V8086 mode. */
12596#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12597 do \
12598 { \
12599 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12600 else return IEMOP_RAISE_INVALID_OPCODE(); \
12601 } while (0)
12602
12603#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12604/** This instruction raises an \#UD in real and V8086 mode or when not using a
12605 * 64-bit code segment when in long mode (applicable to all VMX instructions
12606 * except VMCALL).
12607 */
12608#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12609 do \
12610 { \
12611 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12612 && ( !IEM_IS_LONG_MODE(pVCpu) \
12613 || IEM_IS_64BIT_CODE(pVCpu))) \
12614 { /* likely */ } \
12615 else \
12616 { \
12617 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12618 { \
12619 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12620 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12621 return IEMOP_RAISE_INVALID_OPCODE(); \
12622 } \
12623 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12624 { \
12625 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12626 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12627 return IEMOP_RAISE_INVALID_OPCODE(); \
12628 } \
12629 } \
12630 } while (0)
12631
12632/** The instruction can only be executed in VMX operation (VMX root mode and
12633 * non-root mode).
12634 *
12635 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12636 */
12637# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12638 do \
12639 { \
12640 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12641 else \
12642 { \
12643 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12644 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12645 return IEMOP_RAISE_INVALID_OPCODE(); \
12646 } \
12647 } while (0)
12648#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12649
12650/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12651 * 64-bit mode. */
12652#define IEMOP_HLP_NO_64BIT() \
12653 do \
12654 { \
12655 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12656 return IEMOP_RAISE_INVALID_OPCODE(); \
12657 } while (0)
12658
12659/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12660 * 64-bit mode. */
12661#define IEMOP_HLP_ONLY_64BIT() \
12662 do \
12663 { \
12664 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12665 return IEMOP_RAISE_INVALID_OPCODE(); \
12666 } while (0)
12667
12668/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12669#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12670 do \
12671 { \
12672 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12673 iemRecalEffOpSize64Default(pVCpu); \
12674 } while (0)
12675
12676/** The instruction has 64-bit operand size if 64-bit mode. */
12677#define IEMOP_HLP_64BIT_OP_SIZE() \
12678 do \
12679 { \
12680 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12681 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12682 } while (0)
12683
12684/** Only a REX prefix immediately preceeding the first opcode byte takes
12685 * effect. This macro helps ensuring this as well as logging bad guest code. */
12686#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12687 do \
12688 { \
12689 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12690 { \
12691 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12692 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12693 pVCpu->iem.s.uRexB = 0; \
12694 pVCpu->iem.s.uRexIndex = 0; \
12695 pVCpu->iem.s.uRexReg = 0; \
12696 iemRecalEffOpSize(pVCpu); \
12697 } \
12698 } while (0)
12699
12700/**
12701 * Done decoding.
12702 */
12703#define IEMOP_HLP_DONE_DECODING() \
12704 do \
12705 { \
12706 /*nothing for now, maybe later... */ \
12707 } while (0)
12708
12709/**
12710 * Done decoding, raise \#UD exception if lock prefix present.
12711 */
12712#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12713 do \
12714 { \
12715 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12716 { /* likely */ } \
12717 else \
12718 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12719 } while (0)
12720
12721
12722/**
12723 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12724 * repnz or size prefixes are present, or if in real or v8086 mode.
12725 */
12726#define IEMOP_HLP_DONE_VEX_DECODING() \
12727 do \
12728 { \
12729 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12730 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12731 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12732 { /* likely */ } \
12733 else \
12734 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12735 } while (0)
12736
12737/**
12738 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12739 * repnz or size prefixes are present, or if in real or v8086 mode.
12740 */
12741#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12742 do \
12743 { \
12744 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12745 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12746 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12747 && pVCpu->iem.s.uVexLength == 0)) \
12748 { /* likely */ } \
12749 else \
12750 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12751 } while (0)
12752
12753
12754/**
12755 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12756 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12757 * register 0, or if in real or v8086 mode.
12758 */
12759#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12760 do \
12761 { \
12762 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12763 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12764 && !pVCpu->iem.s.uVex3rdReg \
12765 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12766 { /* likely */ } \
12767 else \
12768 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12769 } while (0)
12770
12771/**
12772 * Done decoding VEX, no V, L=0.
12773 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12774 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12775 */
12776#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12777 do \
12778 { \
12779 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12780 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12781 && pVCpu->iem.s.uVexLength == 0 \
12782 && pVCpu->iem.s.uVex3rdReg == 0 \
12783 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12784 { /* likely */ } \
12785 else \
12786 return IEMOP_RAISE_INVALID_OPCODE(); \
12787 } while (0)
12788
12789#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12790 do \
12791 { \
12792 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12793 { /* likely */ } \
12794 else \
12795 { \
12796 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12797 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12798 } \
12799 } while (0)
12800#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12801 do \
12802 { \
12803 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12804 { /* likely */ } \
12805 else \
12806 { \
12807 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12808 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12809 } \
12810 } while (0)
12811
12812/**
12813 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12814 * are present.
12815 */
12816#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12817 do \
12818 { \
12819 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12820 { /* likely */ } \
12821 else \
12822 return IEMOP_RAISE_INVALID_OPCODE(); \
12823 } while (0)
12824
12825/**
12826 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12827 * prefixes are present.
12828 */
12829#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12830 do \
12831 { \
12832 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12833 { /* likely */ } \
12834 else \
12835 return IEMOP_RAISE_INVALID_OPCODE(); \
12836 } while (0)
12837
12838
12839/**
12840 * Calculates the effective address of a ModR/M memory operand.
12841 *
12842 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12843 *
12844 * @return Strict VBox status code.
12845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12846 * @param bRm The ModRM byte.
12847 * @param cbImm The size of any immediate following the
12848 * effective address opcode bytes. Important for
12849 * RIP relative addressing.
12850 * @param pGCPtrEff Where to return the effective address.
12851 */
12852IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12853{
12854 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12855# define SET_SS_DEF() \
12856 do \
12857 { \
12858 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12859 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12860 } while (0)
12861
12862 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12863 {
12864/** @todo Check the effective address size crap! */
12865 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12866 {
12867 uint16_t u16EffAddr;
12868
12869 /* Handle the disp16 form with no registers first. */
12870 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12871 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12872 else
12873 {
12874 /* Get the displacment. */
12875 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12876 {
12877 case 0: u16EffAddr = 0; break;
12878 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12879 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12880 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12881 }
12882
12883 /* Add the base and index registers to the disp. */
12884 switch (bRm & X86_MODRM_RM_MASK)
12885 {
12886 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12887 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12888 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12889 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12890 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12891 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12892 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12893 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12894 }
12895 }
12896
12897 *pGCPtrEff = u16EffAddr;
12898 }
12899 else
12900 {
12901 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12902 uint32_t u32EffAddr;
12903
12904 /* Handle the disp32 form with no registers first. */
12905 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12906 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12907 else
12908 {
12909 /* Get the register (or SIB) value. */
12910 switch ((bRm & X86_MODRM_RM_MASK))
12911 {
12912 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12913 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12914 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12915 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12916 case 4: /* SIB */
12917 {
12918 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12919
12920 /* Get the index and scale it. */
12921 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12922 {
12923 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12924 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12925 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12926 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12927 case 4: u32EffAddr = 0; /*none */ break;
12928 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12929 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12930 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12931 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12932 }
12933 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12934
12935 /* add base */
12936 switch (bSib & X86_SIB_BASE_MASK)
12937 {
12938 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12939 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12940 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12941 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12942 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12943 case 5:
12944 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12945 {
12946 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12947 SET_SS_DEF();
12948 }
12949 else
12950 {
12951 uint32_t u32Disp;
12952 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12953 u32EffAddr += u32Disp;
12954 }
12955 break;
12956 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12957 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12958 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12959 }
12960 break;
12961 }
12962 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12963 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12964 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12965 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12966 }
12967
12968 /* Get and add the displacement. */
12969 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12970 {
12971 case 0:
12972 break;
12973 case 1:
12974 {
12975 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12976 u32EffAddr += i8Disp;
12977 break;
12978 }
12979 case 2:
12980 {
12981 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12982 u32EffAddr += u32Disp;
12983 break;
12984 }
12985 default:
12986 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12987 }
12988
12989 }
12990 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12991 *pGCPtrEff = u32EffAddr;
12992 else
12993 {
12994 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12995 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12996 }
12997 }
12998 }
12999 else
13000 {
13001 uint64_t u64EffAddr;
13002
13003 /* Handle the rip+disp32 form with no registers first. */
13004 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13005 {
13006 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13007 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13008 }
13009 else
13010 {
13011 /* Get the register (or SIB) value. */
13012 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13013 {
13014 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13015 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13016 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13017 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13018 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13019 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13020 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13021 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13022 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13023 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13024 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13025 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13026 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13027 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13028 /* SIB */
13029 case 4:
13030 case 12:
13031 {
13032 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13033
13034 /* Get the index and scale it. */
13035 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13036 {
13037 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13038 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13039 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13040 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13041 case 4: u64EffAddr = 0; /*none */ break;
13042 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13043 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13044 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13045 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13046 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13047 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13048 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13049 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13050 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13051 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13052 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13053 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13054 }
13055 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13056
13057 /* add base */
13058 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13059 {
13060 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13061 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13062 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13063 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13064 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13065 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13066 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13067 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13068 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13069 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13070 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13071 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13072 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13073 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13074 /* complicated encodings */
13075 case 5:
13076 case 13:
13077 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13078 {
13079 if (!pVCpu->iem.s.uRexB)
13080 {
13081 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13082 SET_SS_DEF();
13083 }
13084 else
13085 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13086 }
13087 else
13088 {
13089 uint32_t u32Disp;
13090 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13091 u64EffAddr += (int32_t)u32Disp;
13092 }
13093 break;
13094 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13095 }
13096 break;
13097 }
13098 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13099 }
13100
13101 /* Get and add the displacement. */
13102 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13103 {
13104 case 0:
13105 break;
13106 case 1:
13107 {
13108 int8_t i8Disp;
13109 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13110 u64EffAddr += i8Disp;
13111 break;
13112 }
13113 case 2:
13114 {
13115 uint32_t u32Disp;
13116 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13117 u64EffAddr += (int32_t)u32Disp;
13118 break;
13119 }
13120 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13121 }
13122
13123 }
13124
13125 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13126 *pGCPtrEff = u64EffAddr;
13127 else
13128 {
13129 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13130 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13131 }
13132 }
13133
13134 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13135 return VINF_SUCCESS;
13136}
13137
13138
13139/**
13140 * Calculates the effective address of a ModR/M memory operand.
13141 *
13142 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13143 *
13144 * @return Strict VBox status code.
13145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13146 * @param bRm The ModRM byte.
13147 * @param cbImm The size of any immediate following the
13148 * effective address opcode bytes. Important for
13149 * RIP relative addressing.
13150 * @param pGCPtrEff Where to return the effective address.
13151 * @param offRsp RSP displacement.
13152 */
13153IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13154{
13155 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13156# define SET_SS_DEF() \
13157 do \
13158 { \
13159 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13160 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13161 } while (0)
13162
13163 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13164 {
13165/** @todo Check the effective address size crap! */
13166 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13167 {
13168 uint16_t u16EffAddr;
13169
13170 /* Handle the disp16 form with no registers first. */
13171 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13172 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13173 else
13174 {
13175 /* Get the displacment. */
13176 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13177 {
13178 case 0: u16EffAddr = 0; break;
13179 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13180 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13181 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13182 }
13183
13184 /* Add the base and index registers to the disp. */
13185 switch (bRm & X86_MODRM_RM_MASK)
13186 {
13187 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13188 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13189 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13190 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13191 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13192 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13193 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13194 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13195 }
13196 }
13197
13198 *pGCPtrEff = u16EffAddr;
13199 }
13200 else
13201 {
13202 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13203 uint32_t u32EffAddr;
13204
13205 /* Handle the disp32 form with no registers first. */
13206 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13207 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13208 else
13209 {
13210 /* Get the register (or SIB) value. */
13211 switch ((bRm & X86_MODRM_RM_MASK))
13212 {
13213 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13214 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13215 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13216 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13217 case 4: /* SIB */
13218 {
13219 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13220
13221 /* Get the index and scale it. */
13222 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13223 {
13224 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13225 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13226 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13227 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13228 case 4: u32EffAddr = 0; /*none */ break;
13229 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13230 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13231 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13232 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13233 }
13234 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13235
13236 /* add base */
13237 switch (bSib & X86_SIB_BASE_MASK)
13238 {
13239 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13240 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13241 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13242 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13243 case 4:
13244 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13245 SET_SS_DEF();
13246 break;
13247 case 5:
13248 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13249 {
13250 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13251 SET_SS_DEF();
13252 }
13253 else
13254 {
13255 uint32_t u32Disp;
13256 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13257 u32EffAddr += u32Disp;
13258 }
13259 break;
13260 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13261 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13262 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13263 }
13264 break;
13265 }
13266 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13267 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13268 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13269 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13270 }
13271
13272 /* Get and add the displacement. */
13273 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13274 {
13275 case 0:
13276 break;
13277 case 1:
13278 {
13279 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13280 u32EffAddr += i8Disp;
13281 break;
13282 }
13283 case 2:
13284 {
13285 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13286 u32EffAddr += u32Disp;
13287 break;
13288 }
13289 default:
13290 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13291 }
13292
13293 }
13294 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13295 *pGCPtrEff = u32EffAddr;
13296 else
13297 {
13298 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13299 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13300 }
13301 }
13302 }
13303 else
13304 {
13305 uint64_t u64EffAddr;
13306
13307 /* Handle the rip+disp32 form with no registers first. */
13308 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13309 {
13310 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13311 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13312 }
13313 else
13314 {
13315 /* Get the register (or SIB) value. */
13316 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13317 {
13318 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13319 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13320 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13321 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13322 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13323 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13324 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13325 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13326 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13327 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13328 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13329 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13330 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13331 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13332 /* SIB */
13333 case 4:
13334 case 12:
13335 {
13336 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13337
13338 /* Get the index and scale it. */
13339 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13340 {
13341 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13342 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13343 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13344 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13345 case 4: u64EffAddr = 0; /*none */ break;
13346 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13347 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13348 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13349 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13350 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13351 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13352 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13353 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13354 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13355 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13356 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13357 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13358 }
13359 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13360
13361 /* add base */
13362 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13363 {
13364 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13365 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13366 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13367 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13368 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13369 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13370 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13371 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13372 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13373 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13374 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13375 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13376 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13377 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13378 /* complicated encodings */
13379 case 5:
13380 case 13:
13381 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13382 {
13383 if (!pVCpu->iem.s.uRexB)
13384 {
13385 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13386 SET_SS_DEF();
13387 }
13388 else
13389 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13390 }
13391 else
13392 {
13393 uint32_t u32Disp;
13394 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13395 u64EffAddr += (int32_t)u32Disp;
13396 }
13397 break;
13398 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13399 }
13400 break;
13401 }
13402 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13403 }
13404
13405 /* Get and add the displacement. */
13406 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13407 {
13408 case 0:
13409 break;
13410 case 1:
13411 {
13412 int8_t i8Disp;
13413 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13414 u64EffAddr += i8Disp;
13415 break;
13416 }
13417 case 2:
13418 {
13419 uint32_t u32Disp;
13420 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13421 u64EffAddr += (int32_t)u32Disp;
13422 break;
13423 }
13424 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13425 }
13426
13427 }
13428
13429 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13430 *pGCPtrEff = u64EffAddr;
13431 else
13432 {
13433 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13434 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13435 }
13436 }
13437
13438 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13439 return VINF_SUCCESS;
13440}
13441
13442
13443#ifdef IEM_WITH_SETJMP
13444/**
13445 * Calculates the effective address of a ModR/M memory operand.
13446 *
13447 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13448 *
13449 * May longjmp on internal error.
13450 *
13451 * @return The effective address.
13452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13453 * @param bRm The ModRM byte.
13454 * @param cbImm The size of any immediate following the
13455 * effective address opcode bytes. Important for
13456 * RIP relative addressing.
13457 */
13458IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13459{
13460 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13461# define SET_SS_DEF() \
13462 do \
13463 { \
13464 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13465 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13466 } while (0)
13467
13468 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13469 {
13470/** @todo Check the effective address size crap! */
13471 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13472 {
13473 uint16_t u16EffAddr;
13474
13475 /* Handle the disp16 form with no registers first. */
13476 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13477 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13478 else
13479 {
13480 /* Get the displacment. */
13481 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13482 {
13483 case 0: u16EffAddr = 0; break;
13484 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13485 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13486 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13487 }
13488
13489 /* Add the base and index registers to the disp. */
13490 switch (bRm & X86_MODRM_RM_MASK)
13491 {
13492 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13493 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13494 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13495 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13496 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13497 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13498 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13499 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13500 }
13501 }
13502
13503 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13504 return u16EffAddr;
13505 }
13506
13507 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13508 uint32_t u32EffAddr;
13509
13510 /* Handle the disp32 form with no registers first. */
13511 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13512 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13513 else
13514 {
13515 /* Get the register (or SIB) value. */
13516 switch ((bRm & X86_MODRM_RM_MASK))
13517 {
13518 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13519 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13520 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13521 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13522 case 4: /* SIB */
13523 {
13524 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13525
13526 /* Get the index and scale it. */
13527 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13528 {
13529 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13530 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13531 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13532 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13533 case 4: u32EffAddr = 0; /*none */ break;
13534 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13535 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13536 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13537 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13538 }
13539 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13540
13541 /* add base */
13542 switch (bSib & X86_SIB_BASE_MASK)
13543 {
13544 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13545 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13546 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13547 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13548 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13549 case 5:
13550 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13551 {
13552 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13553 SET_SS_DEF();
13554 }
13555 else
13556 {
13557 uint32_t u32Disp;
13558 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13559 u32EffAddr += u32Disp;
13560 }
13561 break;
13562 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13563 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13564 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13565 }
13566 break;
13567 }
13568 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13569 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13570 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13571 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13572 }
13573
13574 /* Get and add the displacement. */
13575 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13576 {
13577 case 0:
13578 break;
13579 case 1:
13580 {
13581 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13582 u32EffAddr += i8Disp;
13583 break;
13584 }
13585 case 2:
13586 {
13587 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13588 u32EffAddr += u32Disp;
13589 break;
13590 }
13591 default:
13592 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13593 }
13594 }
13595
13596 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13597 {
13598 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13599 return u32EffAddr;
13600 }
13601 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13602 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13603 return u32EffAddr & UINT16_MAX;
13604 }
13605
13606 uint64_t u64EffAddr;
13607
13608 /* Handle the rip+disp32 form with no registers first. */
13609 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13610 {
13611 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13612 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13613 }
13614 else
13615 {
13616 /* Get the register (or SIB) value. */
13617 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13618 {
13619 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13620 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13621 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13622 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13623 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13624 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13625 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13626 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13627 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13628 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13629 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13630 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13631 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13632 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13633 /* SIB */
13634 case 4:
13635 case 12:
13636 {
13637 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13638
13639 /* Get the index and scale it. */
13640 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13641 {
13642 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13643 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13644 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13645 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13646 case 4: u64EffAddr = 0; /*none */ break;
13647 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13648 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13649 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13650 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13651 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13652 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13653 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13654 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13655 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13656 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13657 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13658 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13659 }
13660 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13661
13662 /* add base */
13663 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13664 {
13665 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13666 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13667 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13668 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13669 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13670 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13671 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13672 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13673 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13674 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13675 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13676 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13677 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13678 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13679 /* complicated encodings */
13680 case 5:
13681 case 13:
13682 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13683 {
13684 if (!pVCpu->iem.s.uRexB)
13685 {
13686 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13687 SET_SS_DEF();
13688 }
13689 else
13690 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13691 }
13692 else
13693 {
13694 uint32_t u32Disp;
13695 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13696 u64EffAddr += (int32_t)u32Disp;
13697 }
13698 break;
13699 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13700 }
13701 break;
13702 }
13703 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13704 }
13705
13706 /* Get and add the displacement. */
13707 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13708 {
13709 case 0:
13710 break;
13711 case 1:
13712 {
13713 int8_t i8Disp;
13714 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13715 u64EffAddr += i8Disp;
13716 break;
13717 }
13718 case 2:
13719 {
13720 uint32_t u32Disp;
13721 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13722 u64EffAddr += (int32_t)u32Disp;
13723 break;
13724 }
13725 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13726 }
13727
13728 }
13729
13730 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13731 {
13732 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13733 return u64EffAddr;
13734 }
13735 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13736 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13737 return u64EffAddr & UINT32_MAX;
13738}
13739#endif /* IEM_WITH_SETJMP */
13740
13741/** @} */
13742
13743
13744
13745/*
13746 * Include the instructions
13747 */
13748#include "IEMAllInstructions.cpp.h"
13749
13750
13751
13752#ifdef LOG_ENABLED
13753/**
13754 * Logs the current instruction.
13755 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13756 * @param fSameCtx Set if we have the same context information as the VMM,
13757 * clear if we may have already executed an instruction in
13758 * our debug context. When clear, we assume IEMCPU holds
13759 * valid CPU mode info.
13760 *
13761 * The @a fSameCtx parameter is now misleading and obsolete.
13762 * @param pszFunction The IEM function doing the execution.
13763 */
13764IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13765{
13766# ifdef IN_RING3
13767 if (LogIs2Enabled())
13768 {
13769 char szInstr[256];
13770 uint32_t cbInstr = 0;
13771 if (fSameCtx)
13772 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13773 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13774 szInstr, sizeof(szInstr), &cbInstr);
13775 else
13776 {
13777 uint32_t fFlags = 0;
13778 switch (pVCpu->iem.s.enmCpuMode)
13779 {
13780 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13781 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13782 case IEMMODE_16BIT:
13783 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13784 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13785 else
13786 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13787 break;
13788 }
13789 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13790 szInstr, sizeof(szInstr), &cbInstr);
13791 }
13792
13793 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13794 Log2(("**** %s\n"
13795 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13796 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13797 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13798 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13799 " %s\n"
13800 , pszFunction,
13801 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13802 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13803 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13804 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13805 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13806 szInstr));
13807
13808 if (LogIs3Enabled())
13809 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13810 }
13811 else
13812# endif
13813 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13814 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13815 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13816}
13817#endif /* LOG_ENABLED */
13818
13819
13820/**
13821 * Makes status code addjustments (pass up from I/O and access handler)
13822 * as well as maintaining statistics.
13823 *
13824 * @returns Strict VBox status code to pass up.
13825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13826 * @param rcStrict The status from executing an instruction.
13827 */
13828DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13829{
13830 if (rcStrict != VINF_SUCCESS)
13831 {
13832 if (RT_SUCCESS(rcStrict))
13833 {
13834 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13835 || rcStrict == VINF_IOM_R3_IOPORT_READ
13836 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13837 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13838 || rcStrict == VINF_IOM_R3_MMIO_READ
13839 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13840 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13841 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13842 || rcStrict == VINF_CPUM_R3_MSR_READ
13843 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13844 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13845 || rcStrict == VINF_EM_RAW_TO_R3
13846 || rcStrict == VINF_EM_TRIPLE_FAULT
13847 || rcStrict == VINF_GIM_R3_HYPERCALL
13848 /* raw-mode / virt handlers only: */
13849 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13850 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13851 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13852 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13853 || rcStrict == VINF_SELM_SYNC_GDT
13854 || rcStrict == VINF_CSAM_PENDING_ACTION
13855 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13856 /* nested hw.virt codes: */
13857 || rcStrict == VINF_SVM_VMEXIT
13858 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13859/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13860 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13861#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13862 if ( rcStrict == VINF_SVM_VMEXIT
13863 && rcPassUp == VINF_SUCCESS)
13864 rcStrict = VINF_SUCCESS;
13865 else
13866#endif
13867 if (rcPassUp == VINF_SUCCESS)
13868 pVCpu->iem.s.cRetInfStatuses++;
13869 else if ( rcPassUp < VINF_EM_FIRST
13870 || rcPassUp > VINF_EM_LAST
13871 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13872 {
13873 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13874 pVCpu->iem.s.cRetPassUpStatus++;
13875 rcStrict = rcPassUp;
13876 }
13877 else
13878 {
13879 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13880 pVCpu->iem.s.cRetInfStatuses++;
13881 }
13882 }
13883 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13884 pVCpu->iem.s.cRetAspectNotImplemented++;
13885 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13886 pVCpu->iem.s.cRetInstrNotImplemented++;
13887 else
13888 pVCpu->iem.s.cRetErrStatuses++;
13889 }
13890 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13891 {
13892 pVCpu->iem.s.cRetPassUpStatus++;
13893 rcStrict = pVCpu->iem.s.rcPassUp;
13894 }
13895
13896 return rcStrict;
13897}
13898
13899
13900/**
13901 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13902 * IEMExecOneWithPrefetchedByPC.
13903 *
13904 * Similar code is found in IEMExecLots.
13905 *
13906 * @return Strict VBox status code.
13907 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13908 * @param fExecuteInhibit If set, execute the instruction following CLI,
13909 * POP SS and MOV SS,GR.
13910 * @param pszFunction The calling function name.
13911 */
13912DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13913{
13914 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13915 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13916 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13917 RT_NOREF_PV(pszFunction);
13918
13919#ifdef IEM_WITH_SETJMP
13920 VBOXSTRICTRC rcStrict;
13921 jmp_buf JmpBuf;
13922 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13923 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13924 if ((rcStrict = setjmp(JmpBuf)) == 0)
13925 {
13926 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13927 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13928 }
13929 else
13930 pVCpu->iem.s.cLongJumps++;
13931 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13932#else
13933 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13934 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13935#endif
13936 if (rcStrict == VINF_SUCCESS)
13937 pVCpu->iem.s.cInstructions++;
13938 if (pVCpu->iem.s.cActiveMappings > 0)
13939 {
13940 Assert(rcStrict != VINF_SUCCESS);
13941 iemMemRollback(pVCpu);
13942 }
13943 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13944 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13945 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13946
13947//#ifdef DEBUG
13948// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13949//#endif
13950
13951 /* Execute the next instruction as well if a cli, pop ss or
13952 mov ss, Gr has just completed successfully. */
13953 if ( fExecuteInhibit
13954 && rcStrict == VINF_SUCCESS
13955 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13956 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
13957 {
13958 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13959 if (rcStrict == VINF_SUCCESS)
13960 {
13961#ifdef LOG_ENABLED
13962 iemLogCurInstr(pVCpu, false, pszFunction);
13963#endif
13964#ifdef IEM_WITH_SETJMP
13965 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13966 if ((rcStrict = setjmp(JmpBuf)) == 0)
13967 {
13968 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13969 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13970 }
13971 else
13972 pVCpu->iem.s.cLongJumps++;
13973 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13974#else
13975 IEM_OPCODE_GET_NEXT_U8(&b);
13976 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13977#endif
13978 if (rcStrict == VINF_SUCCESS)
13979 pVCpu->iem.s.cInstructions++;
13980 if (pVCpu->iem.s.cActiveMappings > 0)
13981 {
13982 Assert(rcStrict != VINF_SUCCESS);
13983 iemMemRollback(pVCpu);
13984 }
13985 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13986 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13987 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13988 }
13989 else if (pVCpu->iem.s.cActiveMappings > 0)
13990 iemMemRollback(pVCpu);
13991 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13992 }
13993
13994 /*
13995 * Return value fiddling, statistics and sanity assertions.
13996 */
13997 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13998
13999 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14000 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14001 return rcStrict;
14002}
14003
14004
14005#ifdef IN_RC
14006/**
14007 * Re-enters raw-mode or ensure we return to ring-3.
14008 *
14009 * @returns rcStrict, maybe modified.
14010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14011 * @param rcStrict The status code returne by the interpreter.
14012 */
14013DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14014{
14015 if ( !pVCpu->iem.s.fInPatchCode
14016 && ( rcStrict == VINF_SUCCESS
14017 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14018 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14019 {
14020 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14021 CPUMRawEnter(pVCpu);
14022 else
14023 {
14024 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14025 rcStrict = VINF_EM_RESCHEDULE;
14026 }
14027 }
14028 return rcStrict;
14029}
14030#endif
14031
14032
14033/**
14034 * Execute one instruction.
14035 *
14036 * @return Strict VBox status code.
14037 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14038 */
14039VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14040{
14041#ifdef LOG_ENABLED
14042 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14043#endif
14044
14045 /*
14046 * Do the decoding and emulation.
14047 */
14048 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14049 if (rcStrict == VINF_SUCCESS)
14050 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14051 else if (pVCpu->iem.s.cActiveMappings > 0)
14052 iemMemRollback(pVCpu);
14053
14054#ifdef IN_RC
14055 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14056#endif
14057 if (rcStrict != VINF_SUCCESS)
14058 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14059 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14060 return rcStrict;
14061}
14062
14063
14064VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14065{
14066 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14067
14068 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14069 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14070 if (rcStrict == VINF_SUCCESS)
14071 {
14072 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14073 if (pcbWritten)
14074 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14075 }
14076 else if (pVCpu->iem.s.cActiveMappings > 0)
14077 iemMemRollback(pVCpu);
14078
14079#ifdef IN_RC
14080 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14081#endif
14082 return rcStrict;
14083}
14084
14085
14086VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14087 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14088{
14089 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14090
14091 VBOXSTRICTRC rcStrict;
14092 if ( cbOpcodeBytes
14093 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14094 {
14095 iemInitDecoder(pVCpu, false);
14096#ifdef IEM_WITH_CODE_TLB
14097 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14098 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14099 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14100 pVCpu->iem.s.offCurInstrStart = 0;
14101 pVCpu->iem.s.offInstrNextByte = 0;
14102#else
14103 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14104 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14105#endif
14106 rcStrict = VINF_SUCCESS;
14107 }
14108 else
14109 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14110 if (rcStrict == VINF_SUCCESS)
14111 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14112 else if (pVCpu->iem.s.cActiveMappings > 0)
14113 iemMemRollback(pVCpu);
14114
14115#ifdef IN_RC
14116 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14117#endif
14118 return rcStrict;
14119}
14120
14121
14122VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14123{
14124 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14125
14126 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14127 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14128 if (rcStrict == VINF_SUCCESS)
14129 {
14130 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14131 if (pcbWritten)
14132 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14133 }
14134 else if (pVCpu->iem.s.cActiveMappings > 0)
14135 iemMemRollback(pVCpu);
14136
14137#ifdef IN_RC
14138 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14139#endif
14140 return rcStrict;
14141}
14142
14143
14144VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14145 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14146{
14147 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14148
14149 VBOXSTRICTRC rcStrict;
14150 if ( cbOpcodeBytes
14151 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14152 {
14153 iemInitDecoder(pVCpu, true);
14154#ifdef IEM_WITH_CODE_TLB
14155 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14156 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14157 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14158 pVCpu->iem.s.offCurInstrStart = 0;
14159 pVCpu->iem.s.offInstrNextByte = 0;
14160#else
14161 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14162 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14163#endif
14164 rcStrict = VINF_SUCCESS;
14165 }
14166 else
14167 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14168 if (rcStrict == VINF_SUCCESS)
14169 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14170 else if (pVCpu->iem.s.cActiveMappings > 0)
14171 iemMemRollback(pVCpu);
14172
14173#ifdef IN_RC
14174 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14175#endif
14176 return rcStrict;
14177}
14178
14179
14180/**
14181 * For debugging DISGetParamSize, may come in handy.
14182 *
14183 * @returns Strict VBox status code.
14184 * @param pVCpu The cross context virtual CPU structure of the
14185 * calling EMT.
14186 * @param pCtxCore The context core structure.
14187 * @param OpcodeBytesPC The PC of the opcode bytes.
14188 * @param pvOpcodeBytes Prefeched opcode bytes.
14189 * @param cbOpcodeBytes Number of prefetched bytes.
14190 * @param pcbWritten Where to return the number of bytes written.
14191 * Optional.
14192 */
14193VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14194 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14195 uint32_t *pcbWritten)
14196{
14197 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14198
14199 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14200 VBOXSTRICTRC rcStrict;
14201 if ( cbOpcodeBytes
14202 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14203 {
14204 iemInitDecoder(pVCpu, true);
14205#ifdef IEM_WITH_CODE_TLB
14206 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14207 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14208 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14209 pVCpu->iem.s.offCurInstrStart = 0;
14210 pVCpu->iem.s.offInstrNextByte = 0;
14211#else
14212 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14213 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14214#endif
14215 rcStrict = VINF_SUCCESS;
14216 }
14217 else
14218 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14219 if (rcStrict == VINF_SUCCESS)
14220 {
14221 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14222 if (pcbWritten)
14223 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14224 }
14225 else if (pVCpu->iem.s.cActiveMappings > 0)
14226 iemMemRollback(pVCpu);
14227
14228#ifdef IN_RC
14229 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14230#endif
14231 return rcStrict;
14232}
14233
14234
14235VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14236{
14237 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14238
14239 /*
14240 * See if there is an interrupt pending in TRPM, inject it if we can.
14241 */
14242 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14243#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14244 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14245 if (fIntrEnabled)
14246 {
14247 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14248 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14249 else
14250 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14251 }
14252#else
14253 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14254#endif
14255 if ( fIntrEnabled
14256 && TRPMHasTrap(pVCpu)
14257 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14258 {
14259 uint8_t u8TrapNo;
14260 TRPMEVENT enmType;
14261 RTGCUINT uErrCode;
14262 RTGCPTR uCr2;
14263 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14264 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14265 TRPMResetTrap(pVCpu);
14266 }
14267
14268 /*
14269 * Initial decoder init w/ prefetch, then setup setjmp.
14270 */
14271 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14272 if (rcStrict == VINF_SUCCESS)
14273 {
14274#ifdef IEM_WITH_SETJMP
14275 jmp_buf JmpBuf;
14276 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14277 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14278 pVCpu->iem.s.cActiveMappings = 0;
14279 if ((rcStrict = setjmp(JmpBuf)) == 0)
14280#endif
14281 {
14282 /*
14283 * The run loop. We limit ourselves to 4096 instructions right now.
14284 */
14285 PVM pVM = pVCpu->CTX_SUFF(pVM);
14286 uint32_t cInstr = 4096;
14287 for (;;)
14288 {
14289 /*
14290 * Log the state.
14291 */
14292#ifdef LOG_ENABLED
14293 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14294#endif
14295
14296 /*
14297 * Do the decoding and emulation.
14298 */
14299 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14300 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14301 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14302 {
14303 Assert(pVCpu->iem.s.cActiveMappings == 0);
14304 pVCpu->iem.s.cInstructions++;
14305 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14306 {
14307 uint32_t fCpu = pVCpu->fLocalForcedActions
14308 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14309 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14310 | VMCPU_FF_TLB_FLUSH
14311#ifdef VBOX_WITH_RAW_MODE
14312 | VMCPU_FF_TRPM_SYNC_IDT
14313 | VMCPU_FF_SELM_SYNC_TSS
14314 | VMCPU_FF_SELM_SYNC_GDT
14315 | VMCPU_FF_SELM_SYNC_LDT
14316#endif
14317 | VMCPU_FF_INHIBIT_INTERRUPTS
14318 | VMCPU_FF_BLOCK_NMIS
14319 | VMCPU_FF_UNHALT ));
14320
14321 if (RT_LIKELY( ( !fCpu
14322 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14323 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14324 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14325 {
14326 if (cInstr-- > 0)
14327 {
14328 Assert(pVCpu->iem.s.cActiveMappings == 0);
14329 iemReInitDecoder(pVCpu);
14330 continue;
14331 }
14332 }
14333 }
14334 Assert(pVCpu->iem.s.cActiveMappings == 0);
14335 }
14336 else if (pVCpu->iem.s.cActiveMappings > 0)
14337 iemMemRollback(pVCpu);
14338 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14339 break;
14340 }
14341 }
14342#ifdef IEM_WITH_SETJMP
14343 else
14344 {
14345 if (pVCpu->iem.s.cActiveMappings > 0)
14346 iemMemRollback(pVCpu);
14347 pVCpu->iem.s.cLongJumps++;
14348 }
14349 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14350#endif
14351
14352 /*
14353 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14354 */
14355 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14357 }
14358 else
14359 {
14360 if (pVCpu->iem.s.cActiveMappings > 0)
14361 iemMemRollback(pVCpu);
14362
14363#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14364 /*
14365 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14366 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14367 */
14368 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14369#endif
14370 }
14371
14372 /*
14373 * Maybe re-enter raw-mode and log.
14374 */
14375#ifdef IN_RC
14376 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14377#endif
14378 if (rcStrict != VINF_SUCCESS)
14379 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14380 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14381 if (pcInstructions)
14382 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14383 return rcStrict;
14384}
14385
14386
14387/**
14388 * Interface used by EMExecuteExec, does exit statistics and limits.
14389 *
14390 * @returns Strict VBox status code.
14391 * @param pVCpu The cross context virtual CPU structure.
14392 * @param fWillExit To be defined.
14393 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14394 * @param cMaxInstructions Maximum number of instructions to execute.
14395 * @param cMaxInstructionsWithoutExits
14396 * The max number of instructions without exits.
14397 * @param pStats Where to return statistics.
14398 */
14399VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14400 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14401{
14402 NOREF(fWillExit); /** @todo define flexible exit crits */
14403
14404 /*
14405 * Initialize return stats.
14406 */
14407 pStats->cInstructions = 0;
14408 pStats->cExits = 0;
14409 pStats->cMaxExitDistance = 0;
14410 pStats->cReserved = 0;
14411
14412 /*
14413 * Initial decoder init w/ prefetch, then setup setjmp.
14414 */
14415 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14416 if (rcStrict == VINF_SUCCESS)
14417 {
14418#ifdef IEM_WITH_SETJMP
14419 jmp_buf JmpBuf;
14420 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14421 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14422 pVCpu->iem.s.cActiveMappings = 0;
14423 if ((rcStrict = setjmp(JmpBuf)) == 0)
14424#endif
14425 {
14426#ifdef IN_RING0
14427 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14428#endif
14429 uint32_t cInstructionSinceLastExit = 0;
14430
14431 /*
14432 * The run loop. We limit ourselves to 4096 instructions right now.
14433 */
14434 PVM pVM = pVCpu->CTX_SUFF(pVM);
14435 for (;;)
14436 {
14437 /*
14438 * Log the state.
14439 */
14440#ifdef LOG_ENABLED
14441 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14442#endif
14443
14444 /*
14445 * Do the decoding and emulation.
14446 */
14447 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14448
14449 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14450 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14451
14452 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14453 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14454 {
14455 pStats->cExits += 1;
14456 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14457 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14458 cInstructionSinceLastExit = 0;
14459 }
14460
14461 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14462 {
14463 Assert(pVCpu->iem.s.cActiveMappings == 0);
14464 pVCpu->iem.s.cInstructions++;
14465 pStats->cInstructions++;
14466 cInstructionSinceLastExit++;
14467 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14468 {
14469 uint32_t fCpu = pVCpu->fLocalForcedActions
14470 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14471 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14472 | VMCPU_FF_TLB_FLUSH
14473#ifdef VBOX_WITH_RAW_MODE
14474 | VMCPU_FF_TRPM_SYNC_IDT
14475 | VMCPU_FF_SELM_SYNC_TSS
14476 | VMCPU_FF_SELM_SYNC_GDT
14477 | VMCPU_FF_SELM_SYNC_LDT
14478#endif
14479 | VMCPU_FF_INHIBIT_INTERRUPTS
14480 | VMCPU_FF_BLOCK_NMIS
14481 | VMCPU_FF_UNHALT ));
14482
14483 if (RT_LIKELY( ( ( !fCpu
14484 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14485 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14486 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) )
14487 || pStats->cInstructions < cMinInstructions))
14488 {
14489 if (pStats->cInstructions < cMaxInstructions)
14490 {
14491 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14492 {
14493#ifdef IN_RING0
14494 if ( !fCheckPreemptionPending
14495 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14496#endif
14497 {
14498 Assert(pVCpu->iem.s.cActiveMappings == 0);
14499 iemReInitDecoder(pVCpu);
14500 continue;
14501 }
14502#ifdef IN_RING0
14503 rcStrict = VINF_EM_RAW_INTERRUPT;
14504 break;
14505#endif
14506 }
14507 }
14508 }
14509 Assert(!(fCpu & VMCPU_FF_IEM));
14510 }
14511 Assert(pVCpu->iem.s.cActiveMappings == 0);
14512 }
14513 else if (pVCpu->iem.s.cActiveMappings > 0)
14514 iemMemRollback(pVCpu);
14515 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14516 break;
14517 }
14518 }
14519#ifdef IEM_WITH_SETJMP
14520 else
14521 {
14522 if (pVCpu->iem.s.cActiveMappings > 0)
14523 iemMemRollback(pVCpu);
14524 pVCpu->iem.s.cLongJumps++;
14525 }
14526 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14527#endif
14528
14529 /*
14530 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14531 */
14532 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14533 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14534 }
14535 else
14536 {
14537 if (pVCpu->iem.s.cActiveMappings > 0)
14538 iemMemRollback(pVCpu);
14539
14540#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14541 /*
14542 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14543 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14544 */
14545 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14546#endif
14547 }
14548
14549 /*
14550 * Maybe re-enter raw-mode and log.
14551 */
14552#ifdef IN_RC
14553 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14554#endif
14555 if (rcStrict != VINF_SUCCESS)
14556 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14557 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14558 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14559 return rcStrict;
14560}
14561
14562
14563/**
14564 * Injects a trap, fault, abort, software interrupt or external interrupt.
14565 *
14566 * The parameter list matches TRPMQueryTrapAll pretty closely.
14567 *
14568 * @returns Strict VBox status code.
14569 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14570 * @param u8TrapNo The trap number.
14571 * @param enmType What type is it (trap/fault/abort), software
14572 * interrupt or hardware interrupt.
14573 * @param uErrCode The error code if applicable.
14574 * @param uCr2 The CR2 value if applicable.
14575 * @param cbInstr The instruction length (only relevant for
14576 * software interrupts).
14577 */
14578VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14579 uint8_t cbInstr)
14580{
14581 iemInitDecoder(pVCpu, false);
14582#ifdef DBGFTRACE_ENABLED
14583 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14584 u8TrapNo, enmType, uErrCode, uCr2);
14585#endif
14586
14587 uint32_t fFlags;
14588 switch (enmType)
14589 {
14590 case TRPM_HARDWARE_INT:
14591 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14592 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14593 uErrCode = uCr2 = 0;
14594 break;
14595
14596 case TRPM_SOFTWARE_INT:
14597 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14598 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14599 uErrCode = uCr2 = 0;
14600 break;
14601
14602 case TRPM_TRAP:
14603 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14604 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14605 if (u8TrapNo == X86_XCPT_PF)
14606 fFlags |= IEM_XCPT_FLAGS_CR2;
14607 switch (u8TrapNo)
14608 {
14609 case X86_XCPT_DF:
14610 case X86_XCPT_TS:
14611 case X86_XCPT_NP:
14612 case X86_XCPT_SS:
14613 case X86_XCPT_PF:
14614 case X86_XCPT_AC:
14615 fFlags |= IEM_XCPT_FLAGS_ERR;
14616 break;
14617
14618 case X86_XCPT_NMI:
14619 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14620 break;
14621 }
14622 break;
14623
14624 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14625 }
14626
14627 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14628
14629 if (pVCpu->iem.s.cActiveMappings > 0)
14630 iemMemRollback(pVCpu);
14631
14632 return rcStrict;
14633}
14634
14635
14636/**
14637 * Injects the active TRPM event.
14638 *
14639 * @returns Strict VBox status code.
14640 * @param pVCpu The cross context virtual CPU structure.
14641 */
14642VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14643{
14644#ifndef IEM_IMPLEMENTS_TASKSWITCH
14645 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14646#else
14647 uint8_t u8TrapNo;
14648 TRPMEVENT enmType;
14649 RTGCUINT uErrCode;
14650 RTGCUINTPTR uCr2;
14651 uint8_t cbInstr;
14652 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14653 if (RT_FAILURE(rc))
14654 return rc;
14655
14656 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14657# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14658 if (rcStrict == VINF_SVM_VMEXIT)
14659 rcStrict = VINF_SUCCESS;
14660# endif
14661
14662 /** @todo Are there any other codes that imply the event was successfully
14663 * delivered to the guest? See @bugref{6607}. */
14664 if ( rcStrict == VINF_SUCCESS
14665 || rcStrict == VINF_IEM_RAISED_XCPT)
14666 TRPMResetTrap(pVCpu);
14667
14668 return rcStrict;
14669#endif
14670}
14671
14672
14673VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14674{
14675 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14676 return VERR_NOT_IMPLEMENTED;
14677}
14678
14679
14680VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14681{
14682 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14683 return VERR_NOT_IMPLEMENTED;
14684}
14685
14686
14687#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14688/**
14689 * Executes a IRET instruction with default operand size.
14690 *
14691 * This is for PATM.
14692 *
14693 * @returns VBox status code.
14694 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14695 * @param pCtxCore The register frame.
14696 */
14697VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14698{
14699 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14700
14701 iemCtxCoreToCtx(pCtx, pCtxCore);
14702 iemInitDecoder(pVCpu);
14703 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14704 if (rcStrict == VINF_SUCCESS)
14705 iemCtxToCtxCore(pCtxCore, pCtx);
14706 else
14707 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14708 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14709 return rcStrict;
14710}
14711#endif
14712
14713
14714/**
14715 * Macro used by the IEMExec* method to check the given instruction length.
14716 *
14717 * Will return on failure!
14718 *
14719 * @param a_cbInstr The given instruction length.
14720 * @param a_cbMin The minimum length.
14721 */
14722#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14723 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14724 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14725
14726
14727/**
14728 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14729 *
14730 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14731 *
14732 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14734 * @param rcStrict The status code to fiddle.
14735 */
14736DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14737{
14738 iemUninitExec(pVCpu);
14739#ifdef IN_RC
14740 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14741#else
14742 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14743#endif
14744}
14745
14746
14747/**
14748 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14749 *
14750 * This API ASSUMES that the caller has already verified that the guest code is
14751 * allowed to access the I/O port. (The I/O port is in the DX register in the
14752 * guest state.)
14753 *
14754 * @returns Strict VBox status code.
14755 * @param pVCpu The cross context virtual CPU structure.
14756 * @param cbValue The size of the I/O port access (1, 2, or 4).
14757 * @param enmAddrMode The addressing mode.
14758 * @param fRepPrefix Indicates whether a repeat prefix is used
14759 * (doesn't matter which for this instruction).
14760 * @param cbInstr The instruction length in bytes.
14761 * @param iEffSeg The effective segment address.
14762 * @param fIoChecked Whether the access to the I/O port has been
14763 * checked or not. It's typically checked in the
14764 * HM scenario.
14765 */
14766VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14767 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14768{
14769 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14770 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14771
14772 /*
14773 * State init.
14774 */
14775 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14776
14777 /*
14778 * Switch orgy for getting to the right handler.
14779 */
14780 VBOXSTRICTRC rcStrict;
14781 if (fRepPrefix)
14782 {
14783 switch (enmAddrMode)
14784 {
14785 case IEMMODE_16BIT:
14786 switch (cbValue)
14787 {
14788 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14789 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14790 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14791 default:
14792 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14793 }
14794 break;
14795
14796 case IEMMODE_32BIT:
14797 switch (cbValue)
14798 {
14799 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14800 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14801 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14802 default:
14803 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14804 }
14805 break;
14806
14807 case IEMMODE_64BIT:
14808 switch (cbValue)
14809 {
14810 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14811 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14812 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14813 default:
14814 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14815 }
14816 break;
14817
14818 default:
14819 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14820 }
14821 }
14822 else
14823 {
14824 switch (enmAddrMode)
14825 {
14826 case IEMMODE_16BIT:
14827 switch (cbValue)
14828 {
14829 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14830 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14831 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14832 default:
14833 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14834 }
14835 break;
14836
14837 case IEMMODE_32BIT:
14838 switch (cbValue)
14839 {
14840 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14841 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14842 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14843 default:
14844 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14845 }
14846 break;
14847
14848 case IEMMODE_64BIT:
14849 switch (cbValue)
14850 {
14851 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14852 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14853 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14854 default:
14855 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14856 }
14857 break;
14858
14859 default:
14860 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14861 }
14862 }
14863
14864 if (pVCpu->iem.s.cActiveMappings)
14865 iemMemRollback(pVCpu);
14866
14867 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14868}
14869
14870
14871/**
14872 * Interface for HM and EM for executing string I/O IN (read) instructions.
14873 *
14874 * This API ASSUMES that the caller has already verified that the guest code is
14875 * allowed to access the I/O port. (The I/O port is in the DX register in the
14876 * guest state.)
14877 *
14878 * @returns Strict VBox status code.
14879 * @param pVCpu The cross context virtual CPU structure.
14880 * @param cbValue The size of the I/O port access (1, 2, or 4).
14881 * @param enmAddrMode The addressing mode.
14882 * @param fRepPrefix Indicates whether a repeat prefix is used
14883 * (doesn't matter which for this instruction).
14884 * @param cbInstr The instruction length in bytes.
14885 * @param fIoChecked Whether the access to the I/O port has been
14886 * checked or not. It's typically checked in the
14887 * HM scenario.
14888 */
14889VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14890 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14891{
14892 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14893
14894 /*
14895 * State init.
14896 */
14897 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14898
14899 /*
14900 * Switch orgy for getting to the right handler.
14901 */
14902 VBOXSTRICTRC rcStrict;
14903 if (fRepPrefix)
14904 {
14905 switch (enmAddrMode)
14906 {
14907 case IEMMODE_16BIT:
14908 switch (cbValue)
14909 {
14910 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14911 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14912 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14913 default:
14914 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14915 }
14916 break;
14917
14918 case IEMMODE_32BIT:
14919 switch (cbValue)
14920 {
14921 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14922 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14923 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14924 default:
14925 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14926 }
14927 break;
14928
14929 case IEMMODE_64BIT:
14930 switch (cbValue)
14931 {
14932 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14933 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14934 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14935 default:
14936 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14937 }
14938 break;
14939
14940 default:
14941 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14942 }
14943 }
14944 else
14945 {
14946 switch (enmAddrMode)
14947 {
14948 case IEMMODE_16BIT:
14949 switch (cbValue)
14950 {
14951 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14952 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14953 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14954 default:
14955 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14956 }
14957 break;
14958
14959 case IEMMODE_32BIT:
14960 switch (cbValue)
14961 {
14962 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14963 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14964 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14965 default:
14966 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14967 }
14968 break;
14969
14970 case IEMMODE_64BIT:
14971 switch (cbValue)
14972 {
14973 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14974 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14975 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14976 default:
14977 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14978 }
14979 break;
14980
14981 default:
14982 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14983 }
14984 }
14985
14986 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
14987 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14988}
14989
14990
14991/**
14992 * Interface for rawmode to write execute an OUT instruction.
14993 *
14994 * @returns Strict VBox status code.
14995 * @param pVCpu The cross context virtual CPU structure.
14996 * @param cbInstr The instruction length in bytes.
14997 * @param u16Port The port to read.
14998 * @param fImm Whether the port is specified using an immediate operand or
14999 * using the implicit DX register.
15000 * @param cbReg The register size.
15001 *
15002 * @remarks In ring-0 not all of the state needs to be synced in.
15003 */
15004VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15005{
15006 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15007 Assert(cbReg <= 4 && cbReg != 3);
15008
15009 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15010 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15011 Assert(!pVCpu->iem.s.cActiveMappings);
15012 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15013}
15014
15015
15016/**
15017 * Interface for rawmode to write execute an IN instruction.
15018 *
15019 * @returns Strict VBox status code.
15020 * @param pVCpu The cross context virtual CPU structure.
15021 * @param cbInstr The instruction length in bytes.
15022 * @param u16Port The port to read.
15023 * @param fImm Whether the port is specified using an immediate operand or
15024 * using the implicit DX.
15025 * @param cbReg The register size.
15026 */
15027VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15028{
15029 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15030 Assert(cbReg <= 4 && cbReg != 3);
15031
15032 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15033 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15034 Assert(!pVCpu->iem.s.cActiveMappings);
15035 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15036}
15037
15038
15039/**
15040 * Interface for HM and EM to write to a CRx register.
15041 *
15042 * @returns Strict VBox status code.
15043 * @param pVCpu The cross context virtual CPU structure.
15044 * @param cbInstr The instruction length in bytes.
15045 * @param iCrReg The control register number (destination).
15046 * @param iGReg The general purpose register number (source).
15047 *
15048 * @remarks In ring-0 not all of the state needs to be synced in.
15049 */
15050VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15051{
15052 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15053 Assert(iCrReg < 16);
15054 Assert(iGReg < 16);
15055
15056 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15057 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15058 Assert(!pVCpu->iem.s.cActiveMappings);
15059 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15060}
15061
15062
15063/**
15064 * Interface for HM and EM to read from a CRx register.
15065 *
15066 * @returns Strict VBox status code.
15067 * @param pVCpu The cross context virtual CPU structure.
15068 * @param cbInstr The instruction length in bytes.
15069 * @param iGReg The general purpose register number (destination).
15070 * @param iCrReg The control register number (source).
15071 *
15072 * @remarks In ring-0 not all of the state needs to be synced in.
15073 */
15074VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15075{
15076 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15077 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15078 | CPUMCTX_EXTRN_APIC_TPR);
15079 Assert(iCrReg < 16);
15080 Assert(iGReg < 16);
15081
15082 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15083 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15084 Assert(!pVCpu->iem.s.cActiveMappings);
15085 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15086}
15087
15088
15089/**
15090 * Interface for HM and EM to clear the CR0[TS] bit.
15091 *
15092 * @returns Strict VBox status code.
15093 * @param pVCpu The cross context virtual CPU structure.
15094 * @param cbInstr The instruction length in bytes.
15095 *
15096 * @remarks In ring-0 not all of the state needs to be synced in.
15097 */
15098VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15099{
15100 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15101
15102 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15103 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15104 Assert(!pVCpu->iem.s.cActiveMappings);
15105 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15106}
15107
15108
15109/**
15110 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15111 *
15112 * @returns Strict VBox status code.
15113 * @param pVCpu The cross context virtual CPU structure.
15114 * @param cbInstr The instruction length in bytes.
15115 * @param uValue The value to load into CR0.
15116 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15117 * memory operand. Otherwise pass NIL_RTGCPTR.
15118 *
15119 * @remarks In ring-0 not all of the state needs to be synced in.
15120 */
15121VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15122{
15123 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15124
15125 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15126 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15127 Assert(!pVCpu->iem.s.cActiveMappings);
15128 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15129}
15130
15131
15132/**
15133 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15134 *
15135 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15136 *
15137 * @returns Strict VBox status code.
15138 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15139 * @param cbInstr The instruction length in bytes.
15140 * @remarks In ring-0 not all of the state needs to be synced in.
15141 * @thread EMT(pVCpu)
15142 */
15143VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15144{
15145 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15146
15147 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15148 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15149 Assert(!pVCpu->iem.s.cActiveMappings);
15150 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15151}
15152
15153
15154/**
15155 * Interface for HM and EM to emulate the WBINVD instruction.
15156 *
15157 * @returns Strict VBox status code.
15158 * @param pVCpu The cross context virtual CPU structure.
15159 * @param cbInstr The instruction length in bytes.
15160 *
15161 * @remarks In ring-0 not all of the state needs to be synced in.
15162 */
15163VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15164{
15165 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15166
15167 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15168 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15169 Assert(!pVCpu->iem.s.cActiveMappings);
15170 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15171}
15172
15173
15174/**
15175 * Interface for HM and EM to emulate the INVD instruction.
15176 *
15177 * @returns Strict VBox status code.
15178 * @param pVCpu The cross context virtual CPU structure.
15179 * @param cbInstr The instruction length in bytes.
15180 *
15181 * @remarks In ring-0 not all of the state needs to be synced in.
15182 */
15183VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15184{
15185 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15186
15187 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15188 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15189 Assert(!pVCpu->iem.s.cActiveMappings);
15190 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15191}
15192
15193
15194/**
15195 * Interface for HM and EM to emulate the INVLPG instruction.
15196 *
15197 * @returns Strict VBox status code.
15198 * @retval VINF_PGM_SYNC_CR3
15199 *
15200 * @param pVCpu The cross context virtual CPU structure.
15201 * @param cbInstr The instruction length in bytes.
15202 * @param GCPtrPage The effective address of the page to invalidate.
15203 *
15204 * @remarks In ring-0 not all of the state needs to be synced in.
15205 */
15206VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15207{
15208 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15209
15210 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15211 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15212 Assert(!pVCpu->iem.s.cActiveMappings);
15213 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15214}
15215
15216
15217/**
15218 * Interface for HM and EM to emulate the CPUID instruction.
15219 *
15220 * @returns Strict VBox status code.
15221 *
15222 * @param pVCpu The cross context virtual CPU structure.
15223 * @param cbInstr The instruction length in bytes.
15224 *
15225 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15226 */
15227VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15228{
15229 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15230 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15231
15232 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15233 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15234 Assert(!pVCpu->iem.s.cActiveMappings);
15235 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15236}
15237
15238
15239/**
15240 * Interface for HM and EM to emulate the RDPMC instruction.
15241 *
15242 * @returns Strict VBox status code.
15243 *
15244 * @param pVCpu The cross context virtual CPU structure.
15245 * @param cbInstr The instruction length in bytes.
15246 *
15247 * @remarks Not all of the state needs to be synced in.
15248 */
15249VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15250{
15251 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15252 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15253
15254 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15255 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15256 Assert(!pVCpu->iem.s.cActiveMappings);
15257 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15258}
15259
15260
15261/**
15262 * Interface for HM and EM to emulate the RDTSC instruction.
15263 *
15264 * @returns Strict VBox status code.
15265 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15266 *
15267 * @param pVCpu The cross context virtual CPU structure.
15268 * @param cbInstr The instruction length in bytes.
15269 *
15270 * @remarks Not all of the state needs to be synced in.
15271 */
15272VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15273{
15274 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15275 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15276
15277 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15278 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15279 Assert(!pVCpu->iem.s.cActiveMappings);
15280 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15281}
15282
15283
15284/**
15285 * Interface for HM and EM to emulate the RDTSCP instruction.
15286 *
15287 * @returns Strict VBox status code.
15288 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15289 *
15290 * @param pVCpu The cross context virtual CPU structure.
15291 * @param cbInstr The instruction length in bytes.
15292 *
15293 * @remarks Not all of the state needs to be synced in. Recommended
15294 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15295 */
15296VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15297{
15298 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15299 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15300
15301 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15302 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15303 Assert(!pVCpu->iem.s.cActiveMappings);
15304 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15305}
15306
15307
15308/**
15309 * Interface for HM and EM to emulate the RDMSR instruction.
15310 *
15311 * @returns Strict VBox status code.
15312 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15313 *
15314 * @param pVCpu The cross context virtual CPU structure.
15315 * @param cbInstr The instruction length in bytes.
15316 *
15317 * @remarks Not all of the state needs to be synced in. Requires RCX and
15318 * (currently) all MSRs.
15319 */
15320VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15321{
15322 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15323 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15324
15325 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15326 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15327 Assert(!pVCpu->iem.s.cActiveMappings);
15328 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15329}
15330
15331
15332/**
15333 * Interface for HM and EM to emulate the WRMSR instruction.
15334 *
15335 * @returns Strict VBox status code.
15336 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15337 *
15338 * @param pVCpu The cross context virtual CPU structure.
15339 * @param cbInstr The instruction length in bytes.
15340 *
15341 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15342 * and (currently) all MSRs.
15343 */
15344VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15345{
15346 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15347 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15348 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15349
15350 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15351 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15352 Assert(!pVCpu->iem.s.cActiveMappings);
15353 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15354}
15355
15356
15357/**
15358 * Interface for HM and EM to emulate the MONITOR instruction.
15359 *
15360 * @returns Strict VBox status code.
15361 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15362 *
15363 * @param pVCpu The cross context virtual CPU structure.
15364 * @param cbInstr The instruction length in bytes.
15365 *
15366 * @remarks Not all of the state needs to be synced in.
15367 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15368 * are used.
15369 */
15370VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15371{
15372 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15373 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15374
15375 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15376 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15377 Assert(!pVCpu->iem.s.cActiveMappings);
15378 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15379}
15380
15381
15382/**
15383 * Interface for HM and EM to emulate the MWAIT instruction.
15384 *
15385 * @returns Strict VBox status code.
15386 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15387 *
15388 * @param pVCpu The cross context virtual CPU structure.
15389 * @param cbInstr The instruction length in bytes.
15390 *
15391 * @remarks Not all of the state needs to be synced in.
15392 */
15393VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15394{
15395 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15396
15397 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15398 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15399 Assert(!pVCpu->iem.s.cActiveMappings);
15400 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15401}
15402
15403
15404/**
15405 * Interface for HM and EM to emulate the HLT instruction.
15406 *
15407 * @returns Strict VBox status code.
15408 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15409 *
15410 * @param pVCpu The cross context virtual CPU structure.
15411 * @param cbInstr The instruction length in bytes.
15412 *
15413 * @remarks Not all of the state needs to be synced in.
15414 */
15415VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15416{
15417 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15418
15419 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15420 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15421 Assert(!pVCpu->iem.s.cActiveMappings);
15422 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15423}
15424
15425
15426/**
15427 * Checks if IEM is in the process of delivering an event (interrupt or
15428 * exception).
15429 *
15430 * @returns true if we're in the process of raising an interrupt or exception,
15431 * false otherwise.
15432 * @param pVCpu The cross context virtual CPU structure.
15433 * @param puVector Where to store the vector associated with the
15434 * currently delivered event, optional.
15435 * @param pfFlags Where to store th event delivery flags (see
15436 * IEM_XCPT_FLAGS_XXX), optional.
15437 * @param puErr Where to store the error code associated with the
15438 * event, optional.
15439 * @param puCr2 Where to store the CR2 associated with the event,
15440 * optional.
15441 * @remarks The caller should check the flags to determine if the error code and
15442 * CR2 are valid for the event.
15443 */
15444VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15445{
15446 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15447 if (fRaisingXcpt)
15448 {
15449 if (puVector)
15450 *puVector = pVCpu->iem.s.uCurXcpt;
15451 if (pfFlags)
15452 *pfFlags = pVCpu->iem.s.fCurXcpt;
15453 if (puErr)
15454 *puErr = pVCpu->iem.s.uCurXcptErr;
15455 if (puCr2)
15456 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15457 }
15458 return fRaisingXcpt;
15459}
15460
15461#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15462
15463/**
15464 * Interface for HM and EM to emulate the CLGI instruction.
15465 *
15466 * @returns Strict VBox status code.
15467 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15468 * @param cbInstr The instruction length in bytes.
15469 * @thread EMT(pVCpu)
15470 */
15471VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15472{
15473 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15474
15475 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15476 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15477 Assert(!pVCpu->iem.s.cActiveMappings);
15478 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15479}
15480
15481
15482/**
15483 * Interface for HM and EM to emulate the STGI instruction.
15484 *
15485 * @returns Strict VBox status code.
15486 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15487 * @param cbInstr The instruction length in bytes.
15488 * @thread EMT(pVCpu)
15489 */
15490VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15491{
15492 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15493
15494 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15495 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15496 Assert(!pVCpu->iem.s.cActiveMappings);
15497 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15498}
15499
15500
15501/**
15502 * Interface for HM and EM to emulate the VMLOAD instruction.
15503 *
15504 * @returns Strict VBox status code.
15505 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15506 * @param cbInstr The instruction length in bytes.
15507 * @thread EMT(pVCpu)
15508 */
15509VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15510{
15511 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15512
15513 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15514 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15515 Assert(!pVCpu->iem.s.cActiveMappings);
15516 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15517}
15518
15519
15520/**
15521 * Interface for HM and EM to emulate the VMSAVE instruction.
15522 *
15523 * @returns Strict VBox status code.
15524 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15525 * @param cbInstr The instruction length in bytes.
15526 * @thread EMT(pVCpu)
15527 */
15528VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15529{
15530 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15531
15532 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15533 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15534 Assert(!pVCpu->iem.s.cActiveMappings);
15535 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15536}
15537
15538
15539/**
15540 * Interface for HM and EM to emulate the INVLPGA instruction.
15541 *
15542 * @returns Strict VBox status code.
15543 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15544 * @param cbInstr The instruction length in bytes.
15545 * @thread EMT(pVCpu)
15546 */
15547VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15548{
15549 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15550
15551 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15552 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15553 Assert(!pVCpu->iem.s.cActiveMappings);
15554 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15555}
15556
15557
15558/**
15559 * Interface for HM and EM to emulate the VMRUN instruction.
15560 *
15561 * @returns Strict VBox status code.
15562 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15563 * @param cbInstr The instruction length in bytes.
15564 * @thread EMT(pVCpu)
15565 */
15566VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15567{
15568 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15569 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15570
15571 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15572 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15573 Assert(!pVCpu->iem.s.cActiveMappings);
15574 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15575}
15576
15577
15578/**
15579 * Interface for HM and EM to emulate \#VMEXIT.
15580 *
15581 * @returns Strict VBox status code.
15582 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15583 * @param uExitCode The exit code.
15584 * @param uExitInfo1 The exit info. 1 field.
15585 * @param uExitInfo2 The exit info. 2 field.
15586 * @thread EMT(pVCpu)
15587 */
15588VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15589{
15590 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15591 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15592 if (pVCpu->iem.s.cActiveMappings)
15593 iemMemRollback(pVCpu);
15594 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15595}
15596
15597#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15598
15599#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15600
15601/**
15602 * Interface for HM and EM to emulate the VMREAD instruction.
15603 *
15604 * @returns Strict VBox status code.
15605 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15606 * @param pExitInfo Pointer to the VM-exit information struct.
15607 * @thread EMT(pVCpu)
15608 */
15609VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15610{
15611 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15612 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15613 Assert(pExitInfo);
15614
15615 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15616
15617 VBOXSTRICTRC rcStrict;
15618 uint8_t const cbInstr = pExitInfo->cbInstr;
15619 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15620 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15621 {
15622 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15623 {
15624 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15625 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15626 }
15627 else
15628 {
15629 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15630 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15631 }
15632 }
15633 else
15634 {
15635 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15636 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15637 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15638 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15639 }
15640 if (pVCpu->iem.s.cActiveMappings)
15641 iemMemRollback(pVCpu);
15642 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15643}
15644
15645
15646/**
15647 * Interface for HM and EM to emulate the VMWRITE instruction.
15648 *
15649 * @returns Strict VBox status code.
15650 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15651 * @param pExitInfo Pointer to the VM-exit information struct.
15652 * @thread EMT(pVCpu)
15653 */
15654VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15655{
15656 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15657 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15658 Assert(pExitInfo);
15659
15660 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15661
15662 uint64_t u64Val;
15663 uint8_t iEffSeg;
15664 IEMMODE enmEffAddrMode;
15665 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15666 {
15667 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15668 iEffSeg = UINT8_MAX;
15669 enmEffAddrMode = UINT8_MAX;
15670 }
15671 else
15672 {
15673 u64Val = pExitInfo->GCPtrEffAddr;
15674 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15675 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15676 }
15677 uint8_t const cbInstr = pExitInfo->cbInstr;
15678 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15679 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
15680 if (pVCpu->iem.s.cActiveMappings)
15681 iemMemRollback(pVCpu);
15682 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15683}
15684
15685
15686/**
15687 * Interface for HM and EM to emulate the VMPTRLD instruction.
15688 *
15689 * @returns Strict VBox status code.
15690 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15691 * @param pExitInfo Pointer to the VM-exit information struct.
15692 * @thread EMT(pVCpu)
15693 */
15694VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15695{
15696 Assert(pExitInfo);
15697 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15698 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15699
15700 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15701
15702 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15703 uint8_t const cbInstr = pExitInfo->cbInstr;
15704 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15705 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15706 if (pVCpu->iem.s.cActiveMappings)
15707 iemMemRollback(pVCpu);
15708 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15709}
15710
15711
15712/**
15713 * Interface for HM and EM to emulate the VMPTRST instruction.
15714 *
15715 * @returns Strict VBox status code.
15716 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15717 * @param pExitInfo Pointer to the VM-exit information struct.
15718 * @thread EMT(pVCpu)
15719 */
15720VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15721{
15722 Assert(pExitInfo);
15723 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15724 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15725
15726 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15727
15728 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15729 uint8_t const cbInstr = pExitInfo->cbInstr;
15730 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15731 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15732 if (pVCpu->iem.s.cActiveMappings)
15733 iemMemRollback(pVCpu);
15734 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15735}
15736
15737
15738/**
15739 * Interface for HM and EM to emulate the VMCLEAR instruction.
15740 *
15741 * @returns Strict VBox status code.
15742 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15743 * @param pExitInfo Pointer to the VM-exit information struct.
15744 * @thread EMT(pVCpu)
15745 */
15746VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15747{
15748 Assert(pExitInfo);
15749 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15750 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15751
15752 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15753
15754 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15755 uint8_t const cbInstr = pExitInfo->cbInstr;
15756 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15757 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15758 if (pVCpu->iem.s.cActiveMappings)
15759 iemMemRollback(pVCpu);
15760 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15761}
15762
15763
15764/**
15765 * Interface for HM and EM to emulate the VMXON instruction.
15766 *
15767 * @returns Strict VBox status code.
15768 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15769 * @param pExitInfo Pointer to the VM-exit information struct.
15770 * @thread EMT(pVCpu)
15771 */
15772VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15773{
15774 Assert(pExitInfo);
15775 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15776 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15777
15778 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15779
15780 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15781 uint8_t const cbInstr = pExitInfo->cbInstr;
15782 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
15783 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
15784 if (pVCpu->iem.s.cActiveMappings)
15785 iemMemRollback(pVCpu);
15786 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15787}
15788
15789
15790/**
15791 * Interface for HM and EM to emulate the VMXOFF instruction.
15792 *
15793 * @returns Strict VBox status code.
15794 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15795 * @param cbInstr The instruction length in bytes.
15796 * @thread EMT(pVCpu)
15797 */
15798VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
15799{
15800 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15801
15802 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15803 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
15804 Assert(!pVCpu->iem.s.cActiveMappings);
15805 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15806}
15807
15808#endif
15809
15810#ifdef IN_RING3
15811
15812/**
15813 * Handles the unlikely and probably fatal merge cases.
15814 *
15815 * @returns Merged status code.
15816 * @param rcStrict Current EM status code.
15817 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15818 * with @a rcStrict.
15819 * @param iMemMap The memory mapping index. For error reporting only.
15820 * @param pVCpu The cross context virtual CPU structure of the calling
15821 * thread, for error reporting only.
15822 */
15823DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15824 unsigned iMemMap, PVMCPU pVCpu)
15825{
15826 if (RT_FAILURE_NP(rcStrict))
15827 return rcStrict;
15828
15829 if (RT_FAILURE_NP(rcStrictCommit))
15830 return rcStrictCommit;
15831
15832 if (rcStrict == rcStrictCommit)
15833 return rcStrictCommit;
15834
15835 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15836 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15837 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15838 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15839 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15840 return VERR_IOM_FF_STATUS_IPE;
15841}
15842
15843
15844/**
15845 * Helper for IOMR3ProcessForceFlag.
15846 *
15847 * @returns Merged status code.
15848 * @param rcStrict Current EM status code.
15849 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15850 * with @a rcStrict.
15851 * @param iMemMap The memory mapping index. For error reporting only.
15852 * @param pVCpu The cross context virtual CPU structure of the calling
15853 * thread, for error reporting only.
15854 */
15855DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15856{
15857 /* Simple. */
15858 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15859 return rcStrictCommit;
15860
15861 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15862 return rcStrict;
15863
15864 /* EM scheduling status codes. */
15865 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15866 && rcStrict <= VINF_EM_LAST))
15867 {
15868 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15869 && rcStrictCommit <= VINF_EM_LAST))
15870 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15871 }
15872
15873 /* Unlikely */
15874 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15875}
15876
15877
15878/**
15879 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15880 *
15881 * @returns Merge between @a rcStrict and what the commit operation returned.
15882 * @param pVM The cross context VM structure.
15883 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15884 * @param rcStrict The status code returned by ring-0 or raw-mode.
15885 */
15886VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15887{
15888 /*
15889 * Reset the pending commit.
15890 */
15891 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15892 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15893 ("%#x %#x %#x\n",
15894 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15895 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15896
15897 /*
15898 * Commit the pending bounce buffers (usually just one).
15899 */
15900 unsigned cBufs = 0;
15901 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15902 while (iMemMap-- > 0)
15903 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15904 {
15905 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15906 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15907 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15908
15909 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15910 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15911 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15912
15913 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15914 {
15915 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15916 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15917 pbBuf,
15918 cbFirst,
15919 PGMACCESSORIGIN_IEM);
15920 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15921 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15922 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15923 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15924 }
15925
15926 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15927 {
15928 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15929 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15930 pbBuf + cbFirst,
15931 cbSecond,
15932 PGMACCESSORIGIN_IEM);
15933 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15934 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15935 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15936 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15937 }
15938 cBufs++;
15939 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15940 }
15941
15942 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15943 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15944 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15945 pVCpu->iem.s.cActiveMappings = 0;
15946 return rcStrict;
15947}
15948
15949#endif /* IN_RING3 */
15950
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette