VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 75135

Last change on this file since 75135 was 75135, checked in by vboxsync, 6 years ago

VMM: Nested VMX: bugref:9180 Setup VMX preemption timer, remove verbose comment later if needed.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 633.6 KB
Line 
1/* $Id: IEMAll.cpp 75135 2018-10-29 04:27:39Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/asm-math.h>
121#include <iprt/assert.h>
122#include <iprt/string.h>
123#include <iprt/x86.h>
124
125
126/*********************************************************************************************************************************
127* Structures and Typedefs *
128*********************************************************************************************************************************/
129/** @typedef PFNIEMOP
130 * Pointer to an opcode decoder function.
131 */
132
133/** @def FNIEMOP_DEF
134 * Define an opcode decoder function.
135 *
136 * We're using macors for this so that adding and removing parameters as well as
137 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
138 *
139 * @param a_Name The function name.
140 */
141
142/** @typedef PFNIEMOPRM
143 * Pointer to an opcode decoder function with RM byte.
144 */
145
146/** @def FNIEMOPRM_DEF
147 * Define an opcode decoder function with RM byte.
148 *
149 * We're using macors for this so that adding and removing parameters as well as
150 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
151 *
152 * @param a_Name The function name.
153 */
154
155#if defined(__GNUC__) && defined(RT_ARCH_X86)
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
157typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
158# define FNIEMOP_DEF(a_Name) \
159 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
160# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
161 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
162# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
163 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
164
165#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
167typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
168# define FNIEMOP_DEF(a_Name) \
169 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
170# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
171 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
173 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
174
175#elif defined(__GNUC__)
176typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
177typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
178# define FNIEMOP_DEF(a_Name) \
179 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
180# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
181 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
182# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
183 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
184
185#else
186typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
187typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
188# define FNIEMOP_DEF(a_Name) \
189 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
190# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
191 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
192# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
193 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
194
195#endif
196#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
197
198
199/**
200 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
201 */
202typedef union IEMSELDESC
203{
204 /** The legacy view. */
205 X86DESC Legacy;
206 /** The long mode view. */
207 X86DESC64 Long;
208} IEMSELDESC;
209/** Pointer to a selector descriptor table entry. */
210typedef IEMSELDESC *PIEMSELDESC;
211
212/**
213 * CPU exception classes.
214 */
215typedef enum IEMXCPTCLASS
216{
217 IEMXCPTCLASS_BENIGN,
218 IEMXCPTCLASS_CONTRIBUTORY,
219 IEMXCPTCLASS_PAGE_FAULT,
220 IEMXCPTCLASS_DOUBLE_FAULT
221} IEMXCPTCLASS;
222
223
224/*********************************************************************************************************************************
225* Defined Constants And Macros *
226*********************************************************************************************************************************/
227/** @def IEM_WITH_SETJMP
228 * Enables alternative status code handling using setjmps.
229 *
230 * This adds a bit of expense via the setjmp() call since it saves all the
231 * non-volatile registers. However, it eliminates return code checks and allows
232 * for more optimal return value passing (return regs instead of stack buffer).
233 */
234#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
235# define IEM_WITH_SETJMP
236#endif
237
238/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
239 * due to GCC lacking knowledge about the value range of a switch. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
241
242/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
243#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
244
245/**
246 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
247 * occation.
248 */
249#ifdef LOG_ENABLED
250# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
251 do { \
252 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
253 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
254 } while (0)
255#else
256# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
257 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
258#endif
259
260/**
261 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
262 * occation using the supplied logger statement.
263 *
264 * @param a_LoggerArgs What to log on failure.
265 */
266#ifdef LOG_ENABLED
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
268 do { \
269 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
270 /*LogFunc(a_LoggerArgs);*/ \
271 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
272 } while (0)
273#else
274# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
275 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
276#endif
277
278/**
279 * Call an opcode decoder function.
280 *
281 * We're using macors for this so that adding and removing parameters can be
282 * done as we please. See FNIEMOP_DEF.
283 */
284#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
285
286/**
287 * Call a common opcode decoder function taking one extra argument.
288 *
289 * We're using macors for this so that adding and removing parameters can be
290 * done as we please. See FNIEMOP_DEF_1.
291 */
292#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
293
294/**
295 * Call a common opcode decoder function taking one extra argument.
296 *
297 * We're using macors for this so that adding and removing parameters can be
298 * done as we please. See FNIEMOP_DEF_1.
299 */
300#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
301
302/**
303 * Check if we're currently executing in real or virtual 8086 mode.
304 *
305 * @returns @c true if it is, @c false if not.
306 * @param a_pVCpu The IEM state of the current CPU.
307 */
308#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
309
310/**
311 * Check if we're currently executing in virtual 8086 mode.
312 *
313 * @returns @c true if it is, @c false if not.
314 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
315 */
316#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
317
318/**
319 * Check if we're currently executing in long mode.
320 *
321 * @returns @c true if it is, @c false if not.
322 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
323 */
324#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
325
326/**
327 * Check if we're currently executing in a 64-bit code segment.
328 *
329 * @returns @c true if it is, @c false if not.
330 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
331 */
332#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
333
334/**
335 * Check if we're currently executing in real mode.
336 *
337 * @returns @c true if it is, @c false if not.
338 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
339 */
340#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
341
342/**
343 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
344 * @returns PCCPUMFEATURES
345 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
346 */
347#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
348
349/**
350 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
351 * @returns PCCPUMFEATURES
352 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
353 */
354#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
355
356/**
357 * Evaluates to true if we're presenting an Intel CPU to the guest.
358 */
359#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
360
361/**
362 * Evaluates to true if we're presenting an AMD CPU to the guest.
363 */
364#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
365
366/**
367 * Check if the address is canonical.
368 */
369#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
370
371/**
372 * Gets the effective VEX.VVVV value.
373 *
374 * The 4th bit is ignored if not 64-bit code.
375 * @returns effective V-register value.
376 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
377 */
378#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
379 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
380
381/** @def IEM_USE_UNALIGNED_DATA_ACCESS
382 * Use unaligned accesses instead of elaborate byte assembly. */
383#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
384# define IEM_USE_UNALIGNED_DATA_ACCESS
385#endif
386
387#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
388
389/**
390 * Check if the guest has entered VMX root operation.
391 */
392# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
393
394/**
395 * Check if the guest has entered VMX non-root operation.
396 */
397# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
398
399/**
400 * Check if the nested-guest has the given Pin-based VM-execution control set.
401 */
402# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
403 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
404
405/**
406 * Check if the nested-guest has the given Processor-based VM-execution control set.
407 */
408#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
409 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
410
411/**
412 * Check if the nested-guest has the given Secondary Processor-based VM-execution
413 * control set.
414 */
415#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
416 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
417
418/**
419 * Invokes the VMX VM-exit handler for an instruction intercept.
420 */
421# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
422 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
423
424/**
425 * Invokes the VMX VM-exit handler for an instruction intercept where the
426 * instruction provides additional VM-exit information.
427 */
428# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
429 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
430
431/**
432 * Invokes the VMX VM-exit handler for a task switch.
433 */
434# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
435 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
436
437/**
438 * Invokes the VMX VM-exit handler for MWAIT.
439 */
440# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
441 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
442
443/**
444 * Invokes the VMX VM-exit handle for triple faults.
445 */
446# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
447 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
448
449#else
450# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
451# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
452# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
453# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
454# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
455# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
457# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
458# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
460
461#endif
462
463#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
464/**
465 * Check if an SVM control/instruction intercept is set.
466 */
467# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
468 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
469
470/**
471 * Check if an SVM read CRx intercept is set.
472 */
473# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
474 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
475
476/**
477 * Check if an SVM write CRx intercept is set.
478 */
479# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
480 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
481
482/**
483 * Check if an SVM read DRx intercept is set.
484 */
485# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
486 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
487
488/**
489 * Check if an SVM write DRx intercept is set.
490 */
491# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
492 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
493
494/**
495 * Check if an SVM exception intercept is set.
496 */
497# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
498 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
499
500/**
501 * Invokes the SVM \#VMEXIT handler for the nested-guest.
502 */
503# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
504 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
505
506/**
507 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
508 * corresponding decode assist information.
509 */
510# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
511 do \
512 { \
513 uint64_t uExitInfo1; \
514 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
515 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
516 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
517 else \
518 uExitInfo1 = 0; \
519 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
520 } while (0)
521
522/** Check and handles SVM nested-guest instruction intercept and updates
523 * NRIP if needed.
524 */
525# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
526 do \
527 { \
528 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
529 { \
530 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
531 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
532 } \
533 } while (0)
534
535/** Checks and handles SVM nested-guest CR0 read intercept. */
536# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
537 do \
538 { \
539 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
540 { /* probably likely */ } \
541 else \
542 { \
543 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
544 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
545 } \
546 } while (0)
547
548/**
549 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
550 */
551# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
552 do { \
553 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
554 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
555 } while (0)
556
557#else
558# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
559# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
560# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
561# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
562# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
563# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
564# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
565# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
566# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
567# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
568# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
569
570#endif
571
572
573/*********************************************************************************************************************************
574* Global Variables *
575*********************************************************************************************************************************/
576extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
577
578
579/** Function table for the ADD instruction. */
580IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
581{
582 iemAImpl_add_u8, iemAImpl_add_u8_locked,
583 iemAImpl_add_u16, iemAImpl_add_u16_locked,
584 iemAImpl_add_u32, iemAImpl_add_u32_locked,
585 iemAImpl_add_u64, iemAImpl_add_u64_locked
586};
587
588/** Function table for the ADC instruction. */
589IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
590{
591 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
592 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
593 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
594 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
595};
596
597/** Function table for the SUB instruction. */
598IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
599{
600 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
601 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
602 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
603 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
604};
605
606/** Function table for the SBB instruction. */
607IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
608{
609 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
610 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
611 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
612 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
613};
614
615/** Function table for the OR instruction. */
616IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
617{
618 iemAImpl_or_u8, iemAImpl_or_u8_locked,
619 iemAImpl_or_u16, iemAImpl_or_u16_locked,
620 iemAImpl_or_u32, iemAImpl_or_u32_locked,
621 iemAImpl_or_u64, iemAImpl_or_u64_locked
622};
623
624/** Function table for the XOR instruction. */
625IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
626{
627 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
628 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
629 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
630 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
631};
632
633/** Function table for the AND instruction. */
634IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
635{
636 iemAImpl_and_u8, iemAImpl_and_u8_locked,
637 iemAImpl_and_u16, iemAImpl_and_u16_locked,
638 iemAImpl_and_u32, iemAImpl_and_u32_locked,
639 iemAImpl_and_u64, iemAImpl_and_u64_locked
640};
641
642/** Function table for the CMP instruction.
643 * @remarks Making operand order ASSUMPTIONS.
644 */
645IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
646{
647 iemAImpl_cmp_u8, NULL,
648 iemAImpl_cmp_u16, NULL,
649 iemAImpl_cmp_u32, NULL,
650 iemAImpl_cmp_u64, NULL
651};
652
653/** Function table for the TEST instruction.
654 * @remarks Making operand order ASSUMPTIONS.
655 */
656IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
657{
658 iemAImpl_test_u8, NULL,
659 iemAImpl_test_u16, NULL,
660 iemAImpl_test_u32, NULL,
661 iemAImpl_test_u64, NULL
662};
663
664/** Function table for the BT instruction. */
665IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
666{
667 NULL, NULL,
668 iemAImpl_bt_u16, NULL,
669 iemAImpl_bt_u32, NULL,
670 iemAImpl_bt_u64, NULL
671};
672
673/** Function table for the BTC instruction. */
674IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
675{
676 NULL, NULL,
677 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
678 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
679 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
680};
681
682/** Function table for the BTR instruction. */
683IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
684{
685 NULL, NULL,
686 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
687 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
688 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
689};
690
691/** Function table for the BTS instruction. */
692IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
693{
694 NULL, NULL,
695 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
696 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
697 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
698};
699
700/** Function table for the BSF instruction. */
701IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
702{
703 NULL, NULL,
704 iemAImpl_bsf_u16, NULL,
705 iemAImpl_bsf_u32, NULL,
706 iemAImpl_bsf_u64, NULL
707};
708
709/** Function table for the BSR instruction. */
710IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
711{
712 NULL, NULL,
713 iemAImpl_bsr_u16, NULL,
714 iemAImpl_bsr_u32, NULL,
715 iemAImpl_bsr_u64, NULL
716};
717
718/** Function table for the IMUL instruction. */
719IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
720{
721 NULL, NULL,
722 iemAImpl_imul_two_u16, NULL,
723 iemAImpl_imul_two_u32, NULL,
724 iemAImpl_imul_two_u64, NULL
725};
726
727/** Group 1 /r lookup table. */
728IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
729{
730 &g_iemAImpl_add,
731 &g_iemAImpl_or,
732 &g_iemAImpl_adc,
733 &g_iemAImpl_sbb,
734 &g_iemAImpl_and,
735 &g_iemAImpl_sub,
736 &g_iemAImpl_xor,
737 &g_iemAImpl_cmp
738};
739
740/** Function table for the INC instruction. */
741IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
742{
743 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
744 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
745 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
746 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
747};
748
749/** Function table for the DEC instruction. */
750IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
751{
752 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
753 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
754 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
755 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
756};
757
758/** Function table for the NEG instruction. */
759IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
760{
761 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
762 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
763 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
764 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
765};
766
767/** Function table for the NOT instruction. */
768IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
769{
770 iemAImpl_not_u8, iemAImpl_not_u8_locked,
771 iemAImpl_not_u16, iemAImpl_not_u16_locked,
772 iemAImpl_not_u32, iemAImpl_not_u32_locked,
773 iemAImpl_not_u64, iemAImpl_not_u64_locked
774};
775
776
777/** Function table for the ROL instruction. */
778IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
779{
780 iemAImpl_rol_u8,
781 iemAImpl_rol_u16,
782 iemAImpl_rol_u32,
783 iemAImpl_rol_u64
784};
785
786/** Function table for the ROR instruction. */
787IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
788{
789 iemAImpl_ror_u8,
790 iemAImpl_ror_u16,
791 iemAImpl_ror_u32,
792 iemAImpl_ror_u64
793};
794
795/** Function table for the RCL instruction. */
796IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
797{
798 iemAImpl_rcl_u8,
799 iemAImpl_rcl_u16,
800 iemAImpl_rcl_u32,
801 iemAImpl_rcl_u64
802};
803
804/** Function table for the RCR instruction. */
805IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
806{
807 iemAImpl_rcr_u8,
808 iemAImpl_rcr_u16,
809 iemAImpl_rcr_u32,
810 iemAImpl_rcr_u64
811};
812
813/** Function table for the SHL instruction. */
814IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
815{
816 iemAImpl_shl_u8,
817 iemAImpl_shl_u16,
818 iemAImpl_shl_u32,
819 iemAImpl_shl_u64
820};
821
822/** Function table for the SHR instruction. */
823IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
824{
825 iemAImpl_shr_u8,
826 iemAImpl_shr_u16,
827 iemAImpl_shr_u32,
828 iemAImpl_shr_u64
829};
830
831/** Function table for the SAR instruction. */
832IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
833{
834 iemAImpl_sar_u8,
835 iemAImpl_sar_u16,
836 iemAImpl_sar_u32,
837 iemAImpl_sar_u64
838};
839
840
841/** Function table for the MUL instruction. */
842IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
843{
844 iemAImpl_mul_u8,
845 iemAImpl_mul_u16,
846 iemAImpl_mul_u32,
847 iemAImpl_mul_u64
848};
849
850/** Function table for the IMUL instruction working implicitly on rAX. */
851IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
852{
853 iemAImpl_imul_u8,
854 iemAImpl_imul_u16,
855 iemAImpl_imul_u32,
856 iemAImpl_imul_u64
857};
858
859/** Function table for the DIV instruction. */
860IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
861{
862 iemAImpl_div_u8,
863 iemAImpl_div_u16,
864 iemAImpl_div_u32,
865 iemAImpl_div_u64
866};
867
868/** Function table for the MUL instruction. */
869IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
870{
871 iemAImpl_idiv_u8,
872 iemAImpl_idiv_u16,
873 iemAImpl_idiv_u32,
874 iemAImpl_idiv_u64
875};
876
877/** Function table for the SHLD instruction */
878IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
879{
880 iemAImpl_shld_u16,
881 iemAImpl_shld_u32,
882 iemAImpl_shld_u64,
883};
884
885/** Function table for the SHRD instruction */
886IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
887{
888 iemAImpl_shrd_u16,
889 iemAImpl_shrd_u32,
890 iemAImpl_shrd_u64,
891};
892
893
894/** Function table for the PUNPCKLBW instruction */
895IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
896/** Function table for the PUNPCKLBD instruction */
897IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
898/** Function table for the PUNPCKLDQ instruction */
899IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
900/** Function table for the PUNPCKLQDQ instruction */
901IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
902
903/** Function table for the PUNPCKHBW instruction */
904IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
905/** Function table for the PUNPCKHBD instruction */
906IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
907/** Function table for the PUNPCKHDQ instruction */
908IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
909/** Function table for the PUNPCKHQDQ instruction */
910IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
911
912/** Function table for the PXOR instruction */
913IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
914/** Function table for the PCMPEQB instruction */
915IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
916/** Function table for the PCMPEQW instruction */
917IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
918/** Function table for the PCMPEQD instruction */
919IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
920
921
922#if defined(IEM_LOG_MEMORY_WRITES)
923/** What IEM just wrote. */
924uint8_t g_abIemWrote[256];
925/** How much IEM just wrote. */
926size_t g_cbIemWrote;
927#endif
928
929
930/*********************************************************************************************************************************
931* Internal Functions *
932*********************************************************************************************************************************/
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
934IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
935IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
936IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
937/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
938IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
939IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
940IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
941IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
942IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
943IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
944IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
945IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
946IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
947IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
948IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
949IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
950#ifdef IEM_WITH_SETJMP
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
953DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
956#endif
957
958IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
959IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
960IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
969IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
970IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
971IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
972IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
973IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
974IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
975
976#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
978IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2,
979 uint8_t cbInstr);
980IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);
982IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu);
983#endif
984
985#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
986IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
987IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr,
988 uint64_t uCr2);
989#endif
990
991
992/**
993 * Sets the pass up status.
994 *
995 * @returns VINF_SUCCESS.
996 * @param pVCpu The cross context virtual CPU structure of the
997 * calling thread.
998 * @param rcPassUp The pass up status. Must be informational.
999 * VINF_SUCCESS is not allowed.
1000 */
1001IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1002{
1003 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1004
1005 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1006 if (rcOldPassUp == VINF_SUCCESS)
1007 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1008 /* If both are EM scheduling codes, use EM priority rules. */
1009 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1010 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1011 {
1012 if (rcPassUp < rcOldPassUp)
1013 {
1014 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1015 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1016 }
1017 else
1018 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1019 }
1020 /* Override EM scheduling with specific status code. */
1021 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1022 {
1023 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1024 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1025 }
1026 /* Don't override specific status code, first come first served. */
1027 else
1028 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1029 return VINF_SUCCESS;
1030}
1031
1032
1033/**
1034 * Calculates the CPU mode.
1035 *
1036 * This is mainly for updating IEMCPU::enmCpuMode.
1037 *
1038 * @returns CPU mode.
1039 * @param pVCpu The cross context virtual CPU structure of the
1040 * calling thread.
1041 */
1042DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1043{
1044 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1045 return IEMMODE_64BIT;
1046 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1047 return IEMMODE_32BIT;
1048 return IEMMODE_16BIT;
1049}
1050
1051
1052/**
1053 * Initializes the execution state.
1054 *
1055 * @param pVCpu The cross context virtual CPU structure of the
1056 * calling thread.
1057 * @param fBypassHandlers Whether to bypass access handlers.
1058 *
1059 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1060 * side-effects in strict builds.
1061 */
1062DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1063{
1064 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1065 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1066
1067#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1068 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1069 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1070 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1072 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1074 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1075 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1076#endif
1077
1078#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1079 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1080#endif
1081 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1082 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1083#ifdef VBOX_STRICT
1084 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1085 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1086 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1087 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1088 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1089 pVCpu->iem.s.uRexReg = 127;
1090 pVCpu->iem.s.uRexB = 127;
1091 pVCpu->iem.s.offModRm = 127;
1092 pVCpu->iem.s.uRexIndex = 127;
1093 pVCpu->iem.s.iEffSeg = 127;
1094 pVCpu->iem.s.idxPrefix = 127;
1095 pVCpu->iem.s.uVex3rdReg = 127;
1096 pVCpu->iem.s.uVexLength = 127;
1097 pVCpu->iem.s.fEvexStuff = 127;
1098 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1099# ifdef IEM_WITH_CODE_TLB
1100 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1101 pVCpu->iem.s.pbInstrBuf = NULL;
1102 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1103 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1104 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1105 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1106# else
1107 pVCpu->iem.s.offOpcode = 127;
1108 pVCpu->iem.s.cbOpcode = 127;
1109# endif
1110#endif
1111
1112 pVCpu->iem.s.cActiveMappings = 0;
1113 pVCpu->iem.s.iNextMapping = 0;
1114 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1115 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1116#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1117 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1118 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1119 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1120 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1121 if (!pVCpu->iem.s.fInPatchCode)
1122 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1123#endif
1124}
1125
1126#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1127/**
1128 * Performs a minimal reinitialization of the execution state.
1129 *
1130 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1131 * 'world-switch' types operations on the CPU. Currently only nested
1132 * hardware-virtualization uses it.
1133 *
1134 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1135 */
1136IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1137{
1138 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1139 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1140
1141 pVCpu->iem.s.uCpl = uCpl;
1142 pVCpu->iem.s.enmCpuMode = enmMode;
1143 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1144 pVCpu->iem.s.enmEffAddrMode = enmMode;
1145 if (enmMode != IEMMODE_64BIT)
1146 {
1147 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1148 pVCpu->iem.s.enmEffOpSize = enmMode;
1149 }
1150 else
1151 {
1152 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1153 pVCpu->iem.s.enmEffOpSize = enmMode;
1154 }
1155 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1156#ifndef IEM_WITH_CODE_TLB
1157 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1158 pVCpu->iem.s.offOpcode = 0;
1159 pVCpu->iem.s.cbOpcode = 0;
1160#endif
1161 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1162}
1163#endif
1164
1165/**
1166 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1167 *
1168 * @param pVCpu The cross context virtual CPU structure of the
1169 * calling thread.
1170 */
1171DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1172{
1173 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1174#ifdef VBOX_STRICT
1175# ifdef IEM_WITH_CODE_TLB
1176 NOREF(pVCpu);
1177# else
1178 pVCpu->iem.s.cbOpcode = 0;
1179# endif
1180#else
1181 NOREF(pVCpu);
1182#endif
1183}
1184
1185
1186/**
1187 * Initializes the decoder state.
1188 *
1189 * iemReInitDecoder is mostly a copy of this function.
1190 *
1191 * @param pVCpu The cross context virtual CPU structure of the
1192 * calling thread.
1193 * @param fBypassHandlers Whether to bypass access handlers.
1194 */
1195DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1196{
1197 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1198 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1199
1200#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1209#endif
1210
1211#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1212 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1213#endif
1214 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1215 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1216 pVCpu->iem.s.enmCpuMode = enmMode;
1217 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1218 pVCpu->iem.s.enmEffAddrMode = enmMode;
1219 if (enmMode != IEMMODE_64BIT)
1220 {
1221 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1222 pVCpu->iem.s.enmEffOpSize = enmMode;
1223 }
1224 else
1225 {
1226 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1227 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1228 }
1229 pVCpu->iem.s.fPrefixes = 0;
1230 pVCpu->iem.s.uRexReg = 0;
1231 pVCpu->iem.s.uRexB = 0;
1232 pVCpu->iem.s.uRexIndex = 0;
1233 pVCpu->iem.s.idxPrefix = 0;
1234 pVCpu->iem.s.uVex3rdReg = 0;
1235 pVCpu->iem.s.uVexLength = 0;
1236 pVCpu->iem.s.fEvexStuff = 0;
1237 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1238#ifdef IEM_WITH_CODE_TLB
1239 pVCpu->iem.s.pbInstrBuf = NULL;
1240 pVCpu->iem.s.offInstrNextByte = 0;
1241 pVCpu->iem.s.offCurInstrStart = 0;
1242# ifdef VBOX_STRICT
1243 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1244 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1245 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1246# endif
1247#else
1248 pVCpu->iem.s.offOpcode = 0;
1249 pVCpu->iem.s.cbOpcode = 0;
1250#endif
1251 pVCpu->iem.s.offModRm = 0;
1252 pVCpu->iem.s.cActiveMappings = 0;
1253 pVCpu->iem.s.iNextMapping = 0;
1254 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1255 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1256#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1257 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1258 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1259 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1260 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1261 if (!pVCpu->iem.s.fInPatchCode)
1262 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1263#endif
1264
1265#ifdef DBGFTRACE_ENABLED
1266 switch (enmMode)
1267 {
1268 case IEMMODE_64BIT:
1269 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1270 break;
1271 case IEMMODE_32BIT:
1272 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1273 break;
1274 case IEMMODE_16BIT:
1275 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1276 break;
1277 }
1278#endif
1279}
1280
1281
1282/**
1283 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1284 *
1285 * This is mostly a copy of iemInitDecoder.
1286 *
1287 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1288 */
1289DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1290{
1291 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1292
1293#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1294 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1295 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1296 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1297 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1298 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1299 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1300 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1301 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1302#endif
1303
1304 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1305 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1306 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1307 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1308 pVCpu->iem.s.enmEffAddrMode = enmMode;
1309 if (enmMode != IEMMODE_64BIT)
1310 {
1311 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1312 pVCpu->iem.s.enmEffOpSize = enmMode;
1313 }
1314 else
1315 {
1316 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1317 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1318 }
1319 pVCpu->iem.s.fPrefixes = 0;
1320 pVCpu->iem.s.uRexReg = 0;
1321 pVCpu->iem.s.uRexB = 0;
1322 pVCpu->iem.s.uRexIndex = 0;
1323 pVCpu->iem.s.idxPrefix = 0;
1324 pVCpu->iem.s.uVex3rdReg = 0;
1325 pVCpu->iem.s.uVexLength = 0;
1326 pVCpu->iem.s.fEvexStuff = 0;
1327 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1328#ifdef IEM_WITH_CODE_TLB
1329 if (pVCpu->iem.s.pbInstrBuf)
1330 {
1331 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1332 - pVCpu->iem.s.uInstrBufPc;
1333 if (off < pVCpu->iem.s.cbInstrBufTotal)
1334 {
1335 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1336 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1337 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1338 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1339 else
1340 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1341 }
1342 else
1343 {
1344 pVCpu->iem.s.pbInstrBuf = NULL;
1345 pVCpu->iem.s.offInstrNextByte = 0;
1346 pVCpu->iem.s.offCurInstrStart = 0;
1347 pVCpu->iem.s.cbInstrBuf = 0;
1348 pVCpu->iem.s.cbInstrBufTotal = 0;
1349 }
1350 }
1351 else
1352 {
1353 pVCpu->iem.s.offInstrNextByte = 0;
1354 pVCpu->iem.s.offCurInstrStart = 0;
1355 pVCpu->iem.s.cbInstrBuf = 0;
1356 pVCpu->iem.s.cbInstrBufTotal = 0;
1357 }
1358#else
1359 pVCpu->iem.s.cbOpcode = 0;
1360 pVCpu->iem.s.offOpcode = 0;
1361#endif
1362 pVCpu->iem.s.offModRm = 0;
1363 Assert(pVCpu->iem.s.cActiveMappings == 0);
1364 pVCpu->iem.s.iNextMapping = 0;
1365 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1366 Assert(pVCpu->iem.s.fBypassHandlers == false);
1367#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1368 if (!pVCpu->iem.s.fInPatchCode)
1369 { /* likely */ }
1370 else
1371 {
1372 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1373 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1374 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1375 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1376 if (!pVCpu->iem.s.fInPatchCode)
1377 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1378 }
1379#endif
1380
1381#ifdef DBGFTRACE_ENABLED
1382 switch (enmMode)
1383 {
1384 case IEMMODE_64BIT:
1385 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1386 break;
1387 case IEMMODE_32BIT:
1388 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1389 break;
1390 case IEMMODE_16BIT:
1391 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1392 break;
1393 }
1394#endif
1395}
1396
1397
1398
1399/**
1400 * Prefetch opcodes the first time when starting executing.
1401 *
1402 * @returns Strict VBox status code.
1403 * @param pVCpu The cross context virtual CPU structure of the
1404 * calling thread.
1405 * @param fBypassHandlers Whether to bypass access handlers.
1406 */
1407IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1408{
1409 iemInitDecoder(pVCpu, fBypassHandlers);
1410
1411#ifdef IEM_WITH_CODE_TLB
1412 /** @todo Do ITLB lookup here. */
1413
1414#else /* !IEM_WITH_CODE_TLB */
1415
1416 /*
1417 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1418 *
1419 * First translate CS:rIP to a physical address.
1420 */
1421 uint32_t cbToTryRead;
1422 RTGCPTR GCPtrPC;
1423 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1424 {
1425 cbToTryRead = PAGE_SIZE;
1426 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1427 if (IEM_IS_CANONICAL(GCPtrPC))
1428 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1429 else
1430 return iemRaiseGeneralProtectionFault0(pVCpu);
1431 }
1432 else
1433 {
1434 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1435 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1436 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1437 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1438 else
1439 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1440 if (cbToTryRead) { /* likely */ }
1441 else /* overflowed */
1442 {
1443 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1444 cbToTryRead = UINT32_MAX;
1445 }
1446 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1447 Assert(GCPtrPC <= UINT32_MAX);
1448 }
1449
1450# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1451 /* Allow interpretation of patch manager code blocks since they can for
1452 instance throw #PFs for perfectly good reasons. */
1453 if (pVCpu->iem.s.fInPatchCode)
1454 {
1455 size_t cbRead = 0;
1456 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1457 AssertRCReturn(rc, rc);
1458 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1459 return VINF_SUCCESS;
1460 }
1461# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1462
1463 RTGCPHYS GCPhys;
1464 uint64_t fFlags;
1465 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1466 if (RT_SUCCESS(rc)) { /* probable */ }
1467 else
1468 {
1469 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1470 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1471 }
1472 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1473 else
1474 {
1475 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1476 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1477 }
1478 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1479 else
1480 {
1481 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1482 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1483 }
1484 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1485 /** @todo Check reserved bits and such stuff. PGM is better at doing
1486 * that, so do it when implementing the guest virtual address
1487 * TLB... */
1488
1489 /*
1490 * Read the bytes at this address.
1491 */
1492 PVM pVM = pVCpu->CTX_SUFF(pVM);
1493# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1494 size_t cbActual;
1495 if ( PATMIsEnabled(pVM)
1496 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1497 {
1498 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1499 Assert(cbActual > 0);
1500 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1501 }
1502 else
1503# endif
1504 {
1505 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1506 if (cbToTryRead > cbLeftOnPage)
1507 cbToTryRead = cbLeftOnPage;
1508 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1509 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1510
1511 if (!pVCpu->iem.s.fBypassHandlers)
1512 {
1513 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1514 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1515 { /* likely */ }
1516 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1517 {
1518 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1519 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1520 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1521 }
1522 else
1523 {
1524 Log((RT_SUCCESS(rcStrict)
1525 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1526 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1527 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1528 return rcStrict;
1529 }
1530 }
1531 else
1532 {
1533 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1534 if (RT_SUCCESS(rc))
1535 { /* likely */ }
1536 else
1537 {
1538 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1539 GCPtrPC, GCPhys, rc, cbToTryRead));
1540 return rc;
1541 }
1542 }
1543 pVCpu->iem.s.cbOpcode = cbToTryRead;
1544 }
1545#endif /* !IEM_WITH_CODE_TLB */
1546 return VINF_SUCCESS;
1547}
1548
1549
1550/**
1551 * Invalidates the IEM TLBs.
1552 *
1553 * This is called internally as well as by PGM when moving GC mappings.
1554 *
1555 * @returns
1556 * @param pVCpu The cross context virtual CPU structure of the calling
1557 * thread.
1558 * @param fVmm Set when PGM calls us with a remapping.
1559 */
1560VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1561{
1562#ifdef IEM_WITH_CODE_TLB
1563 pVCpu->iem.s.cbInstrBufTotal = 0;
1564 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1565 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1566 { /* very likely */ }
1567 else
1568 {
1569 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1570 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1571 while (i-- > 0)
1572 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1573 }
1574#endif
1575
1576#ifdef IEM_WITH_DATA_TLB
1577 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1578 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1579 { /* very likely */ }
1580 else
1581 {
1582 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1583 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1584 while (i-- > 0)
1585 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1586 }
1587#endif
1588 NOREF(pVCpu); NOREF(fVmm);
1589}
1590
1591
1592/**
1593 * Invalidates a page in the TLBs.
1594 *
1595 * @param pVCpu The cross context virtual CPU structure of the calling
1596 * thread.
1597 * @param GCPtr The address of the page to invalidate
1598 */
1599VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1600{
1601#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1602 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1603 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1604 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1605 uintptr_t idx = (uint8_t)GCPtr;
1606
1607# ifdef IEM_WITH_CODE_TLB
1608 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1609 {
1610 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1611 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1612 pVCpu->iem.s.cbInstrBufTotal = 0;
1613 }
1614# endif
1615
1616# ifdef IEM_WITH_DATA_TLB
1617 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1618 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1619# endif
1620#else
1621 NOREF(pVCpu); NOREF(GCPtr);
1622#endif
1623}
1624
1625
1626/**
1627 * Invalidates the host physical aspects of the IEM TLBs.
1628 *
1629 * This is called internally as well as by PGM when moving GC mappings.
1630 *
1631 * @param pVCpu The cross context virtual CPU structure of the calling
1632 * thread.
1633 */
1634VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1635{
1636#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1637 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1638
1639# ifdef IEM_WITH_CODE_TLB
1640 pVCpu->iem.s.cbInstrBufTotal = 0;
1641# endif
1642 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1643 if (uTlbPhysRev != 0)
1644 {
1645 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1646 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1647 }
1648 else
1649 {
1650 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1651 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1652
1653 unsigned i;
1654# ifdef IEM_WITH_CODE_TLB
1655 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1656 while (i-- > 0)
1657 {
1658 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1659 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1660 }
1661# endif
1662# ifdef IEM_WITH_DATA_TLB
1663 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1664 while (i-- > 0)
1665 {
1666 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1667 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1668 }
1669# endif
1670 }
1671#else
1672 NOREF(pVCpu);
1673#endif
1674}
1675
1676
1677/**
1678 * Invalidates the host physical aspects of the IEM TLBs.
1679 *
1680 * This is called internally as well as by PGM when moving GC mappings.
1681 *
1682 * @param pVM The cross context VM structure.
1683 *
1684 * @remarks Caller holds the PGM lock.
1685 */
1686VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1687{
1688 RT_NOREF_PV(pVM);
1689}
1690
1691#ifdef IEM_WITH_CODE_TLB
1692
1693/**
1694 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1695 * failure and jumps.
1696 *
1697 * We end up here for a number of reasons:
1698 * - pbInstrBuf isn't yet initialized.
1699 * - Advancing beyond the buffer boundrary (e.g. cross page).
1700 * - Advancing beyond the CS segment limit.
1701 * - Fetching from non-mappable page (e.g. MMIO).
1702 *
1703 * @param pVCpu The cross context virtual CPU structure of the
1704 * calling thread.
1705 * @param pvDst Where to return the bytes.
1706 * @param cbDst Number of bytes to read.
1707 *
1708 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1709 */
1710IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1711{
1712#ifdef IN_RING3
1713 for (;;)
1714 {
1715 Assert(cbDst <= 8);
1716 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1717
1718 /*
1719 * We might have a partial buffer match, deal with that first to make the
1720 * rest simpler. This is the first part of the cross page/buffer case.
1721 */
1722 if (pVCpu->iem.s.pbInstrBuf != NULL)
1723 {
1724 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1725 {
1726 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1727 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1728 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1729
1730 cbDst -= cbCopy;
1731 pvDst = (uint8_t *)pvDst + cbCopy;
1732 offBuf += cbCopy;
1733 pVCpu->iem.s.offInstrNextByte += offBuf;
1734 }
1735 }
1736
1737 /*
1738 * Check segment limit, figuring how much we're allowed to access at this point.
1739 *
1740 * We will fault immediately if RIP is past the segment limit / in non-canonical
1741 * territory. If we do continue, there are one or more bytes to read before we
1742 * end up in trouble and we need to do that first before faulting.
1743 */
1744 RTGCPTR GCPtrFirst;
1745 uint32_t cbMaxRead;
1746 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1747 {
1748 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1749 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1750 { /* likely */ }
1751 else
1752 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1753 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1754 }
1755 else
1756 {
1757 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1758 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1759 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1760 { /* likely */ }
1761 else
1762 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1763 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1764 if (cbMaxRead != 0)
1765 { /* likely */ }
1766 else
1767 {
1768 /* Overflowed because address is 0 and limit is max. */
1769 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1770 cbMaxRead = X86_PAGE_SIZE;
1771 }
1772 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1773 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1774 if (cbMaxRead2 < cbMaxRead)
1775 cbMaxRead = cbMaxRead2;
1776 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1777 }
1778
1779 /*
1780 * Get the TLB entry for this piece of code.
1781 */
1782 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1783 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1784 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1785 if (pTlbe->uTag == uTag)
1786 {
1787 /* likely when executing lots of code, otherwise unlikely */
1788# ifdef VBOX_WITH_STATISTICS
1789 pVCpu->iem.s.CodeTlb.cTlbHits++;
1790# endif
1791 }
1792 else
1793 {
1794 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1795# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1796 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1797 {
1798 pTlbe->uTag = uTag;
1799 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1800 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1801 pTlbe->GCPhys = NIL_RTGCPHYS;
1802 pTlbe->pbMappingR3 = NULL;
1803 }
1804 else
1805# endif
1806 {
1807 RTGCPHYS GCPhys;
1808 uint64_t fFlags;
1809 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1810 if (RT_FAILURE(rc))
1811 {
1812 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1813 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1814 }
1815
1816 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1817 pTlbe->uTag = uTag;
1818 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1819 pTlbe->GCPhys = GCPhys;
1820 pTlbe->pbMappingR3 = NULL;
1821 }
1822 }
1823
1824 /*
1825 * Check TLB page table level access flags.
1826 */
1827 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1828 {
1829 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1830 {
1831 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1832 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1833 }
1834 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1835 {
1836 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1837 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1838 }
1839 }
1840
1841# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1842 /*
1843 * Allow interpretation of patch manager code blocks since they can for
1844 * instance throw #PFs for perfectly good reasons.
1845 */
1846 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1847 { /* no unlikely */ }
1848 else
1849 {
1850 /** @todo Could be optimized this a little in ring-3 if we liked. */
1851 size_t cbRead = 0;
1852 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1853 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1854 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1855 return;
1856 }
1857# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1858
1859 /*
1860 * Look up the physical page info if necessary.
1861 */
1862 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1863 { /* not necessary */ }
1864 else
1865 {
1866 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1867 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1868 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1869 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1870 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1871 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1872 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1873 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1874 }
1875
1876# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1877 /*
1878 * Try do a direct read using the pbMappingR3 pointer.
1879 */
1880 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1881 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1882 {
1883 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1884 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1885 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1886 {
1887 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1888 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1889 }
1890 else
1891 {
1892 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1893 Assert(cbInstr < cbMaxRead);
1894 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1895 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1896 }
1897 if (cbDst <= cbMaxRead)
1898 {
1899 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1900 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1901 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1902 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1903 return;
1904 }
1905 pVCpu->iem.s.pbInstrBuf = NULL;
1906
1907 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1908 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1909 }
1910 else
1911# endif
1912#if 0
1913 /*
1914 * If there is no special read handling, so we can read a bit more and
1915 * put it in the prefetch buffer.
1916 */
1917 if ( cbDst < cbMaxRead
1918 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1919 {
1920 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1921 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1922 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1923 { /* likely */ }
1924 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1925 {
1926 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1927 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1928 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1929 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1930 }
1931 else
1932 {
1933 Log((RT_SUCCESS(rcStrict)
1934 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1935 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1936 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1937 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1938 }
1939 }
1940 /*
1941 * Special read handling, so only read exactly what's needed.
1942 * This is a highly unlikely scenario.
1943 */
1944 else
1945#endif
1946 {
1947 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1948 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1949 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1950 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1951 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1952 { /* likely */ }
1953 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1954 {
1955 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1956 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1957 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1958 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1959 }
1960 else
1961 {
1962 Log((RT_SUCCESS(rcStrict)
1963 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1964 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1965 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1966 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1967 }
1968 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1969 if (cbToRead == cbDst)
1970 return;
1971 }
1972
1973 /*
1974 * More to read, loop.
1975 */
1976 cbDst -= cbMaxRead;
1977 pvDst = (uint8_t *)pvDst + cbMaxRead;
1978 }
1979#else
1980 RT_NOREF(pvDst, cbDst);
1981 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1982#endif
1983}
1984
1985#else
1986
1987/**
1988 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1989 * exception if it fails.
1990 *
1991 * @returns Strict VBox status code.
1992 * @param pVCpu The cross context virtual CPU structure of the
1993 * calling thread.
1994 * @param cbMin The minimum number of bytes relative offOpcode
1995 * that must be read.
1996 */
1997IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1998{
1999 /*
2000 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2001 *
2002 * First translate CS:rIP to a physical address.
2003 */
2004 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2005 uint32_t cbToTryRead;
2006 RTGCPTR GCPtrNext;
2007 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2008 {
2009 cbToTryRead = PAGE_SIZE;
2010 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2011 if (!IEM_IS_CANONICAL(GCPtrNext))
2012 return iemRaiseGeneralProtectionFault0(pVCpu);
2013 }
2014 else
2015 {
2016 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2017 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2018 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2019 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2020 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2021 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2022 if (!cbToTryRead) /* overflowed */
2023 {
2024 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2025 cbToTryRead = UINT32_MAX;
2026 /** @todo check out wrapping around the code segment. */
2027 }
2028 if (cbToTryRead < cbMin - cbLeft)
2029 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2030 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2031 }
2032
2033 /* Only read up to the end of the page, and make sure we don't read more
2034 than the opcode buffer can hold. */
2035 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2036 if (cbToTryRead > cbLeftOnPage)
2037 cbToTryRead = cbLeftOnPage;
2038 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2039 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2040/** @todo r=bird: Convert assertion into undefined opcode exception? */
2041 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2042
2043# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2044 /* Allow interpretation of patch manager code blocks since they can for
2045 instance throw #PFs for perfectly good reasons. */
2046 if (pVCpu->iem.s.fInPatchCode)
2047 {
2048 size_t cbRead = 0;
2049 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2050 AssertRCReturn(rc, rc);
2051 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2052 return VINF_SUCCESS;
2053 }
2054# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2055
2056 RTGCPHYS GCPhys;
2057 uint64_t fFlags;
2058 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2059 if (RT_FAILURE(rc))
2060 {
2061 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2062 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2063 }
2064 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2065 {
2066 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2067 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2068 }
2069 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2070 {
2071 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2072 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2073 }
2074 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2075 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2076 /** @todo Check reserved bits and such stuff. PGM is better at doing
2077 * that, so do it when implementing the guest virtual address
2078 * TLB... */
2079
2080 /*
2081 * Read the bytes at this address.
2082 *
2083 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2084 * and since PATM should only patch the start of an instruction there
2085 * should be no need to check again here.
2086 */
2087 if (!pVCpu->iem.s.fBypassHandlers)
2088 {
2089 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2090 cbToTryRead, PGMACCESSORIGIN_IEM);
2091 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2092 { /* likely */ }
2093 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2094 {
2095 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2096 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2097 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2098 }
2099 else
2100 {
2101 Log((RT_SUCCESS(rcStrict)
2102 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2103 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2104 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2105 return rcStrict;
2106 }
2107 }
2108 else
2109 {
2110 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2111 if (RT_SUCCESS(rc))
2112 { /* likely */ }
2113 else
2114 {
2115 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2116 return rc;
2117 }
2118 }
2119 pVCpu->iem.s.cbOpcode += cbToTryRead;
2120 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2121
2122 return VINF_SUCCESS;
2123}
2124
2125#endif /* !IEM_WITH_CODE_TLB */
2126#ifndef IEM_WITH_SETJMP
2127
2128/**
2129 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2130 *
2131 * @returns Strict VBox status code.
2132 * @param pVCpu The cross context virtual CPU structure of the
2133 * calling thread.
2134 * @param pb Where to return the opcode byte.
2135 */
2136DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2137{
2138 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2139 if (rcStrict == VINF_SUCCESS)
2140 {
2141 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2142 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2143 pVCpu->iem.s.offOpcode = offOpcode + 1;
2144 }
2145 else
2146 *pb = 0;
2147 return rcStrict;
2148}
2149
2150
2151/**
2152 * Fetches the next opcode byte.
2153 *
2154 * @returns Strict VBox status code.
2155 * @param pVCpu The cross context virtual CPU structure of the
2156 * calling thread.
2157 * @param pu8 Where to return the opcode byte.
2158 */
2159DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2160{
2161 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2162 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2163 {
2164 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2165 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2166 return VINF_SUCCESS;
2167 }
2168 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2169}
2170
2171#else /* IEM_WITH_SETJMP */
2172
2173/**
2174 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2175 *
2176 * @returns The opcode byte.
2177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2178 */
2179DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2180{
2181# ifdef IEM_WITH_CODE_TLB
2182 uint8_t u8;
2183 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2184 return u8;
2185# else
2186 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2187 if (rcStrict == VINF_SUCCESS)
2188 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2189 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2190# endif
2191}
2192
2193
2194/**
2195 * Fetches the next opcode byte, longjmp on error.
2196 *
2197 * @returns The opcode byte.
2198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2199 */
2200DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2201{
2202# ifdef IEM_WITH_CODE_TLB
2203 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2204 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2205 if (RT_LIKELY( pbBuf != NULL
2206 && offBuf < pVCpu->iem.s.cbInstrBuf))
2207 {
2208 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2209 return pbBuf[offBuf];
2210 }
2211# else
2212 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2213 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2214 {
2215 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2216 return pVCpu->iem.s.abOpcode[offOpcode];
2217 }
2218# endif
2219 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2220}
2221
2222#endif /* IEM_WITH_SETJMP */
2223
2224/**
2225 * Fetches the next opcode byte, returns automatically on failure.
2226 *
2227 * @param a_pu8 Where to return the opcode byte.
2228 * @remark Implicitly references pVCpu.
2229 */
2230#ifndef IEM_WITH_SETJMP
2231# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2232 do \
2233 { \
2234 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2235 if (rcStrict2 == VINF_SUCCESS) \
2236 { /* likely */ } \
2237 else \
2238 return rcStrict2; \
2239 } while (0)
2240#else
2241# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2242#endif /* IEM_WITH_SETJMP */
2243
2244
2245#ifndef IEM_WITH_SETJMP
2246/**
2247 * Fetches the next signed byte from the opcode stream.
2248 *
2249 * @returns Strict VBox status code.
2250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2251 * @param pi8 Where to return the signed byte.
2252 */
2253DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2254{
2255 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2256}
2257#endif /* !IEM_WITH_SETJMP */
2258
2259
2260/**
2261 * Fetches the next signed byte from the opcode stream, returning automatically
2262 * on failure.
2263 *
2264 * @param a_pi8 Where to return the signed byte.
2265 * @remark Implicitly references pVCpu.
2266 */
2267#ifndef IEM_WITH_SETJMP
2268# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2269 do \
2270 { \
2271 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2272 if (rcStrict2 != VINF_SUCCESS) \
2273 return rcStrict2; \
2274 } while (0)
2275#else /* IEM_WITH_SETJMP */
2276# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2277
2278#endif /* IEM_WITH_SETJMP */
2279
2280#ifndef IEM_WITH_SETJMP
2281
2282/**
2283 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2284 *
2285 * @returns Strict VBox status code.
2286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2287 * @param pu16 Where to return the opcode dword.
2288 */
2289DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2290{
2291 uint8_t u8;
2292 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2293 if (rcStrict == VINF_SUCCESS)
2294 *pu16 = (int8_t)u8;
2295 return rcStrict;
2296}
2297
2298
2299/**
2300 * Fetches the next signed byte from the opcode stream, extending it to
2301 * unsigned 16-bit.
2302 *
2303 * @returns Strict VBox status code.
2304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2305 * @param pu16 Where to return the unsigned word.
2306 */
2307DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2308{
2309 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2310 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2311 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2312
2313 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2314 pVCpu->iem.s.offOpcode = offOpcode + 1;
2315 return VINF_SUCCESS;
2316}
2317
2318#endif /* !IEM_WITH_SETJMP */
2319
2320/**
2321 * Fetches the next signed byte from the opcode stream and sign-extending it to
2322 * a word, returning automatically on failure.
2323 *
2324 * @param a_pu16 Where to return the word.
2325 * @remark Implicitly references pVCpu.
2326 */
2327#ifndef IEM_WITH_SETJMP
2328# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2329 do \
2330 { \
2331 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2332 if (rcStrict2 != VINF_SUCCESS) \
2333 return rcStrict2; \
2334 } while (0)
2335#else
2336# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2337#endif
2338
2339#ifndef IEM_WITH_SETJMP
2340
2341/**
2342 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2343 *
2344 * @returns Strict VBox status code.
2345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2346 * @param pu32 Where to return the opcode dword.
2347 */
2348DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2349{
2350 uint8_t u8;
2351 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2352 if (rcStrict == VINF_SUCCESS)
2353 *pu32 = (int8_t)u8;
2354 return rcStrict;
2355}
2356
2357
2358/**
2359 * Fetches the next signed byte from the opcode stream, extending it to
2360 * unsigned 32-bit.
2361 *
2362 * @returns Strict VBox status code.
2363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2364 * @param pu32 Where to return the unsigned dword.
2365 */
2366DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2367{
2368 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2369 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2370 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2371
2372 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2373 pVCpu->iem.s.offOpcode = offOpcode + 1;
2374 return VINF_SUCCESS;
2375}
2376
2377#endif /* !IEM_WITH_SETJMP */
2378
2379/**
2380 * Fetches the next signed byte from the opcode stream and sign-extending it to
2381 * a word, returning automatically on failure.
2382 *
2383 * @param a_pu32 Where to return the word.
2384 * @remark Implicitly references pVCpu.
2385 */
2386#ifndef IEM_WITH_SETJMP
2387#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2388 do \
2389 { \
2390 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2391 if (rcStrict2 != VINF_SUCCESS) \
2392 return rcStrict2; \
2393 } while (0)
2394#else
2395# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2396#endif
2397
2398#ifndef IEM_WITH_SETJMP
2399
2400/**
2401 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2402 *
2403 * @returns Strict VBox status code.
2404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2405 * @param pu64 Where to return the opcode qword.
2406 */
2407DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2408{
2409 uint8_t u8;
2410 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2411 if (rcStrict == VINF_SUCCESS)
2412 *pu64 = (int8_t)u8;
2413 return rcStrict;
2414}
2415
2416
2417/**
2418 * Fetches the next signed byte from the opcode stream, extending it to
2419 * unsigned 64-bit.
2420 *
2421 * @returns Strict VBox status code.
2422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2423 * @param pu64 Where to return the unsigned qword.
2424 */
2425DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2426{
2427 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2428 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2429 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2430
2431 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2432 pVCpu->iem.s.offOpcode = offOpcode + 1;
2433 return VINF_SUCCESS;
2434}
2435
2436#endif /* !IEM_WITH_SETJMP */
2437
2438
2439/**
2440 * Fetches the next signed byte from the opcode stream and sign-extending it to
2441 * a word, returning automatically on failure.
2442 *
2443 * @param a_pu64 Where to return the word.
2444 * @remark Implicitly references pVCpu.
2445 */
2446#ifndef IEM_WITH_SETJMP
2447# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2448 do \
2449 { \
2450 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2451 if (rcStrict2 != VINF_SUCCESS) \
2452 return rcStrict2; \
2453 } while (0)
2454#else
2455# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2456#endif
2457
2458
2459#ifndef IEM_WITH_SETJMP
2460/**
2461 * Fetches the next opcode byte.
2462 *
2463 * @returns Strict VBox status code.
2464 * @param pVCpu The cross context virtual CPU structure of the
2465 * calling thread.
2466 * @param pu8 Where to return the opcode byte.
2467 */
2468DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2469{
2470 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2471 pVCpu->iem.s.offModRm = offOpcode;
2472 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2473 {
2474 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2475 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2476 return VINF_SUCCESS;
2477 }
2478 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2479}
2480#else /* IEM_WITH_SETJMP */
2481/**
2482 * Fetches the next opcode byte, longjmp on error.
2483 *
2484 * @returns The opcode byte.
2485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2486 */
2487DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2488{
2489# ifdef IEM_WITH_CODE_TLB
2490 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2491 pVCpu->iem.s.offModRm = offBuf;
2492 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2493 if (RT_LIKELY( pbBuf != NULL
2494 && offBuf < pVCpu->iem.s.cbInstrBuf))
2495 {
2496 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2497 return pbBuf[offBuf];
2498 }
2499# else
2500 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2501 pVCpu->iem.s.offModRm = offOpcode;
2502 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2503 {
2504 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2505 return pVCpu->iem.s.abOpcode[offOpcode];
2506 }
2507# endif
2508 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2509}
2510#endif /* IEM_WITH_SETJMP */
2511
2512/**
2513 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2514 * on failure.
2515 *
2516 * Will note down the position of the ModR/M byte for VT-x exits.
2517 *
2518 * @param a_pbRm Where to return the RM opcode byte.
2519 * @remark Implicitly references pVCpu.
2520 */
2521#ifndef IEM_WITH_SETJMP
2522# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2523 do \
2524 { \
2525 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2526 if (rcStrict2 == VINF_SUCCESS) \
2527 { /* likely */ } \
2528 else \
2529 return rcStrict2; \
2530 } while (0)
2531#else
2532# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2533#endif /* IEM_WITH_SETJMP */
2534
2535
2536#ifndef IEM_WITH_SETJMP
2537
2538/**
2539 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2540 *
2541 * @returns Strict VBox status code.
2542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2543 * @param pu16 Where to return the opcode word.
2544 */
2545DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2546{
2547 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2548 if (rcStrict == VINF_SUCCESS)
2549 {
2550 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2551# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2552 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2553# else
2554 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2555# endif
2556 pVCpu->iem.s.offOpcode = offOpcode + 2;
2557 }
2558 else
2559 *pu16 = 0;
2560 return rcStrict;
2561}
2562
2563
2564/**
2565 * Fetches the next opcode word.
2566 *
2567 * @returns Strict VBox status code.
2568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2569 * @param pu16 Where to return the opcode word.
2570 */
2571DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2572{
2573 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2574 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2575 {
2576 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2577# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2578 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2579# else
2580 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2581# endif
2582 return VINF_SUCCESS;
2583 }
2584 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2585}
2586
2587#else /* IEM_WITH_SETJMP */
2588
2589/**
2590 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2591 *
2592 * @returns The opcode word.
2593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2594 */
2595DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2596{
2597# ifdef IEM_WITH_CODE_TLB
2598 uint16_t u16;
2599 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2600 return u16;
2601# else
2602 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2603 if (rcStrict == VINF_SUCCESS)
2604 {
2605 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2606 pVCpu->iem.s.offOpcode += 2;
2607# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2608 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2609# else
2610 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2611# endif
2612 }
2613 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2614# endif
2615}
2616
2617
2618/**
2619 * Fetches the next opcode word, longjmp on error.
2620 *
2621 * @returns The opcode word.
2622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2623 */
2624DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2625{
2626# ifdef IEM_WITH_CODE_TLB
2627 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2628 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2629 if (RT_LIKELY( pbBuf != NULL
2630 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2631 {
2632 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2633# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2634 return *(uint16_t const *)&pbBuf[offBuf];
2635# else
2636 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2637# endif
2638 }
2639# else
2640 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2641 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2642 {
2643 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2644# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2645 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2646# else
2647 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2648# endif
2649 }
2650# endif
2651 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2652}
2653
2654#endif /* IEM_WITH_SETJMP */
2655
2656
2657/**
2658 * Fetches the next opcode word, returns automatically on failure.
2659 *
2660 * @param a_pu16 Where to return the opcode word.
2661 * @remark Implicitly references pVCpu.
2662 */
2663#ifndef IEM_WITH_SETJMP
2664# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2665 do \
2666 { \
2667 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2668 if (rcStrict2 != VINF_SUCCESS) \
2669 return rcStrict2; \
2670 } while (0)
2671#else
2672# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2673#endif
2674
2675#ifndef IEM_WITH_SETJMP
2676
2677/**
2678 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2679 *
2680 * @returns Strict VBox status code.
2681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2682 * @param pu32 Where to return the opcode double word.
2683 */
2684DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2685{
2686 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2687 if (rcStrict == VINF_SUCCESS)
2688 {
2689 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2690 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2691 pVCpu->iem.s.offOpcode = offOpcode + 2;
2692 }
2693 else
2694 *pu32 = 0;
2695 return rcStrict;
2696}
2697
2698
2699/**
2700 * Fetches the next opcode word, zero extending it to a double word.
2701 *
2702 * @returns Strict VBox status code.
2703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2704 * @param pu32 Where to return the opcode double word.
2705 */
2706DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2707{
2708 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2709 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2710 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2711
2712 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2713 pVCpu->iem.s.offOpcode = offOpcode + 2;
2714 return VINF_SUCCESS;
2715}
2716
2717#endif /* !IEM_WITH_SETJMP */
2718
2719
2720/**
2721 * Fetches the next opcode word and zero extends it to a double word, returns
2722 * automatically on failure.
2723 *
2724 * @param a_pu32 Where to return the opcode double word.
2725 * @remark Implicitly references pVCpu.
2726 */
2727#ifndef IEM_WITH_SETJMP
2728# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2729 do \
2730 { \
2731 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2732 if (rcStrict2 != VINF_SUCCESS) \
2733 return rcStrict2; \
2734 } while (0)
2735#else
2736# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2737#endif
2738
2739#ifndef IEM_WITH_SETJMP
2740
2741/**
2742 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2743 *
2744 * @returns Strict VBox status code.
2745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2746 * @param pu64 Where to return the opcode quad word.
2747 */
2748DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2749{
2750 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2751 if (rcStrict == VINF_SUCCESS)
2752 {
2753 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2754 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2755 pVCpu->iem.s.offOpcode = offOpcode + 2;
2756 }
2757 else
2758 *pu64 = 0;
2759 return rcStrict;
2760}
2761
2762
2763/**
2764 * Fetches the next opcode word, zero extending it to a quad word.
2765 *
2766 * @returns Strict VBox status code.
2767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2768 * @param pu64 Where to return the opcode quad word.
2769 */
2770DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2771{
2772 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2773 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2774 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2775
2776 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2777 pVCpu->iem.s.offOpcode = offOpcode + 2;
2778 return VINF_SUCCESS;
2779}
2780
2781#endif /* !IEM_WITH_SETJMP */
2782
2783/**
2784 * Fetches the next opcode word and zero extends it to a quad word, returns
2785 * automatically on failure.
2786 *
2787 * @param a_pu64 Where to return the opcode quad word.
2788 * @remark Implicitly references pVCpu.
2789 */
2790#ifndef IEM_WITH_SETJMP
2791# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2792 do \
2793 { \
2794 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2795 if (rcStrict2 != VINF_SUCCESS) \
2796 return rcStrict2; \
2797 } while (0)
2798#else
2799# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2800#endif
2801
2802
2803#ifndef IEM_WITH_SETJMP
2804/**
2805 * Fetches the next signed word from the opcode stream.
2806 *
2807 * @returns Strict VBox status code.
2808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2809 * @param pi16 Where to return the signed word.
2810 */
2811DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2812{
2813 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2814}
2815#endif /* !IEM_WITH_SETJMP */
2816
2817
2818/**
2819 * Fetches the next signed word from the opcode stream, returning automatically
2820 * on failure.
2821 *
2822 * @param a_pi16 Where to return the signed word.
2823 * @remark Implicitly references pVCpu.
2824 */
2825#ifndef IEM_WITH_SETJMP
2826# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2827 do \
2828 { \
2829 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2830 if (rcStrict2 != VINF_SUCCESS) \
2831 return rcStrict2; \
2832 } while (0)
2833#else
2834# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2835#endif
2836
2837#ifndef IEM_WITH_SETJMP
2838
2839/**
2840 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2841 *
2842 * @returns Strict VBox status code.
2843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2844 * @param pu32 Where to return the opcode dword.
2845 */
2846DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2847{
2848 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2849 if (rcStrict == VINF_SUCCESS)
2850 {
2851 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2852# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2853 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2854# else
2855 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2856 pVCpu->iem.s.abOpcode[offOpcode + 1],
2857 pVCpu->iem.s.abOpcode[offOpcode + 2],
2858 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2859# endif
2860 pVCpu->iem.s.offOpcode = offOpcode + 4;
2861 }
2862 else
2863 *pu32 = 0;
2864 return rcStrict;
2865}
2866
2867
2868/**
2869 * Fetches the next opcode dword.
2870 *
2871 * @returns Strict VBox status code.
2872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2873 * @param pu32 Where to return the opcode double word.
2874 */
2875DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2876{
2877 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2878 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2879 {
2880 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2881# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2882 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2883# else
2884 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2885 pVCpu->iem.s.abOpcode[offOpcode + 1],
2886 pVCpu->iem.s.abOpcode[offOpcode + 2],
2887 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2888# endif
2889 return VINF_SUCCESS;
2890 }
2891 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2892}
2893
2894#else /* !IEM_WITH_SETJMP */
2895
2896/**
2897 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2898 *
2899 * @returns The opcode dword.
2900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2901 */
2902DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2903{
2904# ifdef IEM_WITH_CODE_TLB
2905 uint32_t u32;
2906 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2907 return u32;
2908# else
2909 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2910 if (rcStrict == VINF_SUCCESS)
2911 {
2912 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2913 pVCpu->iem.s.offOpcode = offOpcode + 4;
2914# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2915 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2916# else
2917 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2918 pVCpu->iem.s.abOpcode[offOpcode + 1],
2919 pVCpu->iem.s.abOpcode[offOpcode + 2],
2920 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2921# endif
2922 }
2923 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2924# endif
2925}
2926
2927
2928/**
2929 * Fetches the next opcode dword, longjmp on error.
2930 *
2931 * @returns The opcode dword.
2932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2933 */
2934DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2935{
2936# ifdef IEM_WITH_CODE_TLB
2937 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2938 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2939 if (RT_LIKELY( pbBuf != NULL
2940 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2941 {
2942 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2943# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2944 return *(uint32_t const *)&pbBuf[offBuf];
2945# else
2946 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2947 pbBuf[offBuf + 1],
2948 pbBuf[offBuf + 2],
2949 pbBuf[offBuf + 3]);
2950# endif
2951 }
2952# else
2953 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2954 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2955 {
2956 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2957# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2958 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2959# else
2960 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2961 pVCpu->iem.s.abOpcode[offOpcode + 1],
2962 pVCpu->iem.s.abOpcode[offOpcode + 2],
2963 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2964# endif
2965 }
2966# endif
2967 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2968}
2969
2970#endif /* !IEM_WITH_SETJMP */
2971
2972
2973/**
2974 * Fetches the next opcode dword, returns automatically on failure.
2975 *
2976 * @param a_pu32 Where to return the opcode dword.
2977 * @remark Implicitly references pVCpu.
2978 */
2979#ifndef IEM_WITH_SETJMP
2980# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2981 do \
2982 { \
2983 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2984 if (rcStrict2 != VINF_SUCCESS) \
2985 return rcStrict2; \
2986 } while (0)
2987#else
2988# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2989#endif
2990
2991#ifndef IEM_WITH_SETJMP
2992
2993/**
2994 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2995 *
2996 * @returns Strict VBox status code.
2997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2998 * @param pu64 Where to return the opcode dword.
2999 */
3000DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3001{
3002 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3003 if (rcStrict == VINF_SUCCESS)
3004 {
3005 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3006 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3007 pVCpu->iem.s.abOpcode[offOpcode + 1],
3008 pVCpu->iem.s.abOpcode[offOpcode + 2],
3009 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3010 pVCpu->iem.s.offOpcode = offOpcode + 4;
3011 }
3012 else
3013 *pu64 = 0;
3014 return rcStrict;
3015}
3016
3017
3018/**
3019 * Fetches the next opcode dword, zero extending it to a quad word.
3020 *
3021 * @returns Strict VBox status code.
3022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3023 * @param pu64 Where to return the opcode quad word.
3024 */
3025DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3026{
3027 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3028 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3029 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3030
3031 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3032 pVCpu->iem.s.abOpcode[offOpcode + 1],
3033 pVCpu->iem.s.abOpcode[offOpcode + 2],
3034 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3035 pVCpu->iem.s.offOpcode = offOpcode + 4;
3036 return VINF_SUCCESS;
3037}
3038
3039#endif /* !IEM_WITH_SETJMP */
3040
3041
3042/**
3043 * Fetches the next opcode dword and zero extends it to a quad word, returns
3044 * automatically on failure.
3045 *
3046 * @param a_pu64 Where to return the opcode quad word.
3047 * @remark Implicitly references pVCpu.
3048 */
3049#ifndef IEM_WITH_SETJMP
3050# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3051 do \
3052 { \
3053 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3054 if (rcStrict2 != VINF_SUCCESS) \
3055 return rcStrict2; \
3056 } while (0)
3057#else
3058# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3059#endif
3060
3061
3062#ifndef IEM_WITH_SETJMP
3063/**
3064 * Fetches the next signed double word from the opcode stream.
3065 *
3066 * @returns Strict VBox status code.
3067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3068 * @param pi32 Where to return the signed double word.
3069 */
3070DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3071{
3072 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3073}
3074#endif
3075
3076/**
3077 * Fetches the next signed double word from the opcode stream, returning
3078 * automatically on failure.
3079 *
3080 * @param a_pi32 Where to return the signed double word.
3081 * @remark Implicitly references pVCpu.
3082 */
3083#ifndef IEM_WITH_SETJMP
3084# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3085 do \
3086 { \
3087 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3088 if (rcStrict2 != VINF_SUCCESS) \
3089 return rcStrict2; \
3090 } while (0)
3091#else
3092# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3093#endif
3094
3095#ifndef IEM_WITH_SETJMP
3096
3097/**
3098 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3099 *
3100 * @returns Strict VBox status code.
3101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3102 * @param pu64 Where to return the opcode qword.
3103 */
3104DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3105{
3106 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3107 if (rcStrict == VINF_SUCCESS)
3108 {
3109 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3110 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3111 pVCpu->iem.s.abOpcode[offOpcode + 1],
3112 pVCpu->iem.s.abOpcode[offOpcode + 2],
3113 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3114 pVCpu->iem.s.offOpcode = offOpcode + 4;
3115 }
3116 else
3117 *pu64 = 0;
3118 return rcStrict;
3119}
3120
3121
3122/**
3123 * Fetches the next opcode dword, sign extending it into a quad word.
3124 *
3125 * @returns Strict VBox status code.
3126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3127 * @param pu64 Where to return the opcode quad word.
3128 */
3129DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3130{
3131 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3132 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3133 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3134
3135 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3136 pVCpu->iem.s.abOpcode[offOpcode + 1],
3137 pVCpu->iem.s.abOpcode[offOpcode + 2],
3138 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3139 *pu64 = i32;
3140 pVCpu->iem.s.offOpcode = offOpcode + 4;
3141 return VINF_SUCCESS;
3142}
3143
3144#endif /* !IEM_WITH_SETJMP */
3145
3146
3147/**
3148 * Fetches the next opcode double word and sign extends it to a quad word,
3149 * returns automatically on failure.
3150 *
3151 * @param a_pu64 Where to return the opcode quad word.
3152 * @remark Implicitly references pVCpu.
3153 */
3154#ifndef IEM_WITH_SETJMP
3155# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3156 do \
3157 { \
3158 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3159 if (rcStrict2 != VINF_SUCCESS) \
3160 return rcStrict2; \
3161 } while (0)
3162#else
3163# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3164#endif
3165
3166#ifndef IEM_WITH_SETJMP
3167
3168/**
3169 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3170 *
3171 * @returns Strict VBox status code.
3172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3173 * @param pu64 Where to return the opcode qword.
3174 */
3175DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3176{
3177 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3178 if (rcStrict == VINF_SUCCESS)
3179 {
3180 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3181# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3182 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3183# else
3184 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3185 pVCpu->iem.s.abOpcode[offOpcode + 1],
3186 pVCpu->iem.s.abOpcode[offOpcode + 2],
3187 pVCpu->iem.s.abOpcode[offOpcode + 3],
3188 pVCpu->iem.s.abOpcode[offOpcode + 4],
3189 pVCpu->iem.s.abOpcode[offOpcode + 5],
3190 pVCpu->iem.s.abOpcode[offOpcode + 6],
3191 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3192# endif
3193 pVCpu->iem.s.offOpcode = offOpcode + 8;
3194 }
3195 else
3196 *pu64 = 0;
3197 return rcStrict;
3198}
3199
3200
3201/**
3202 * Fetches the next opcode qword.
3203 *
3204 * @returns Strict VBox status code.
3205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3206 * @param pu64 Where to return the opcode qword.
3207 */
3208DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3209{
3210 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3211 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3212 {
3213# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3214 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3215# else
3216 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3217 pVCpu->iem.s.abOpcode[offOpcode + 1],
3218 pVCpu->iem.s.abOpcode[offOpcode + 2],
3219 pVCpu->iem.s.abOpcode[offOpcode + 3],
3220 pVCpu->iem.s.abOpcode[offOpcode + 4],
3221 pVCpu->iem.s.abOpcode[offOpcode + 5],
3222 pVCpu->iem.s.abOpcode[offOpcode + 6],
3223 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3224# endif
3225 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3226 return VINF_SUCCESS;
3227 }
3228 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3229}
3230
3231#else /* IEM_WITH_SETJMP */
3232
3233/**
3234 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3235 *
3236 * @returns The opcode qword.
3237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3238 */
3239DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3240{
3241# ifdef IEM_WITH_CODE_TLB
3242 uint64_t u64;
3243 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3244 return u64;
3245# else
3246 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3247 if (rcStrict == VINF_SUCCESS)
3248 {
3249 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3250 pVCpu->iem.s.offOpcode = offOpcode + 8;
3251# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3252 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3253# else
3254 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3255 pVCpu->iem.s.abOpcode[offOpcode + 1],
3256 pVCpu->iem.s.abOpcode[offOpcode + 2],
3257 pVCpu->iem.s.abOpcode[offOpcode + 3],
3258 pVCpu->iem.s.abOpcode[offOpcode + 4],
3259 pVCpu->iem.s.abOpcode[offOpcode + 5],
3260 pVCpu->iem.s.abOpcode[offOpcode + 6],
3261 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3262# endif
3263 }
3264 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3265# endif
3266}
3267
3268
3269/**
3270 * Fetches the next opcode qword, longjmp on error.
3271 *
3272 * @returns The opcode qword.
3273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3274 */
3275DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3276{
3277# ifdef IEM_WITH_CODE_TLB
3278 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3279 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3280 if (RT_LIKELY( pbBuf != NULL
3281 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3282 {
3283 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3284# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3285 return *(uint64_t const *)&pbBuf[offBuf];
3286# else
3287 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3288 pbBuf[offBuf + 1],
3289 pbBuf[offBuf + 2],
3290 pbBuf[offBuf + 3],
3291 pbBuf[offBuf + 4],
3292 pbBuf[offBuf + 5],
3293 pbBuf[offBuf + 6],
3294 pbBuf[offBuf + 7]);
3295# endif
3296 }
3297# else
3298 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3299 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3300 {
3301 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3302# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3303 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3304# else
3305 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3306 pVCpu->iem.s.abOpcode[offOpcode + 1],
3307 pVCpu->iem.s.abOpcode[offOpcode + 2],
3308 pVCpu->iem.s.abOpcode[offOpcode + 3],
3309 pVCpu->iem.s.abOpcode[offOpcode + 4],
3310 pVCpu->iem.s.abOpcode[offOpcode + 5],
3311 pVCpu->iem.s.abOpcode[offOpcode + 6],
3312 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3313# endif
3314 }
3315# endif
3316 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3317}
3318
3319#endif /* IEM_WITH_SETJMP */
3320
3321/**
3322 * Fetches the next opcode quad word, returns automatically on failure.
3323 *
3324 * @param a_pu64 Where to return the opcode quad word.
3325 * @remark Implicitly references pVCpu.
3326 */
3327#ifndef IEM_WITH_SETJMP
3328# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3329 do \
3330 { \
3331 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3332 if (rcStrict2 != VINF_SUCCESS) \
3333 return rcStrict2; \
3334 } while (0)
3335#else
3336# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3337#endif
3338
3339
3340/** @name Misc Worker Functions.
3341 * @{
3342 */
3343
3344/**
3345 * Gets the exception class for the specified exception vector.
3346 *
3347 * @returns The class of the specified exception.
3348 * @param uVector The exception vector.
3349 */
3350IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3351{
3352 Assert(uVector <= X86_XCPT_LAST);
3353 switch (uVector)
3354 {
3355 case X86_XCPT_DE:
3356 case X86_XCPT_TS:
3357 case X86_XCPT_NP:
3358 case X86_XCPT_SS:
3359 case X86_XCPT_GP:
3360 case X86_XCPT_SX: /* AMD only */
3361 return IEMXCPTCLASS_CONTRIBUTORY;
3362
3363 case X86_XCPT_PF:
3364 case X86_XCPT_VE: /* Intel only */
3365 return IEMXCPTCLASS_PAGE_FAULT;
3366
3367 case X86_XCPT_DF:
3368 return IEMXCPTCLASS_DOUBLE_FAULT;
3369 }
3370 return IEMXCPTCLASS_BENIGN;
3371}
3372
3373
3374/**
3375 * Evaluates how to handle an exception caused during delivery of another event
3376 * (exception / interrupt).
3377 *
3378 * @returns How to handle the recursive exception.
3379 * @param pVCpu The cross context virtual CPU structure of the
3380 * calling thread.
3381 * @param fPrevFlags The flags of the previous event.
3382 * @param uPrevVector The vector of the previous event.
3383 * @param fCurFlags The flags of the current exception.
3384 * @param uCurVector The vector of the current exception.
3385 * @param pfXcptRaiseInfo Where to store additional information about the
3386 * exception condition. Optional.
3387 */
3388VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3389 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3390{
3391 /*
3392 * Only CPU exceptions can be raised while delivering other events, software interrupt
3393 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3394 */
3395 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3396 Assert(pVCpu); RT_NOREF(pVCpu);
3397 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3398
3399 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3400 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3401 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3402 {
3403 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3404 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3405 {
3406 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3407 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3408 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3409 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3410 {
3411 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3412 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3413 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3414 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3415 uCurVector, pVCpu->cpum.GstCtx.cr2));
3416 }
3417 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3418 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3419 {
3420 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3421 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3422 }
3423 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3424 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3425 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3426 {
3427 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3428 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3429 }
3430 }
3431 else
3432 {
3433 if (uPrevVector == X86_XCPT_NMI)
3434 {
3435 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3436 if (uCurVector == X86_XCPT_PF)
3437 {
3438 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3439 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3440 }
3441 }
3442 else if ( uPrevVector == X86_XCPT_AC
3443 && uCurVector == X86_XCPT_AC)
3444 {
3445 enmRaise = IEMXCPTRAISE_CPU_HANG;
3446 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3447 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3448 }
3449 }
3450 }
3451 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3452 {
3453 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3454 if (uCurVector == X86_XCPT_PF)
3455 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3456 }
3457 else
3458 {
3459 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3460 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3461 }
3462
3463 if (pfXcptRaiseInfo)
3464 *pfXcptRaiseInfo = fRaiseInfo;
3465 return enmRaise;
3466}
3467
3468
3469/**
3470 * Enters the CPU shutdown state initiated by a triple fault or other
3471 * unrecoverable conditions.
3472 *
3473 * @returns Strict VBox status code.
3474 * @param pVCpu The cross context virtual CPU structure of the
3475 * calling thread.
3476 */
3477IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3478{
3479 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3480 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3481
3482 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3483 {
3484 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3485 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3486 }
3487
3488 RT_NOREF(pVCpu);
3489 return VINF_EM_TRIPLE_FAULT;
3490}
3491
3492
3493/**
3494 * Validates a new SS segment.
3495 *
3496 * @returns VBox strict status code.
3497 * @param pVCpu The cross context virtual CPU structure of the
3498 * calling thread.
3499 * @param NewSS The new SS selctor.
3500 * @param uCpl The CPL to load the stack for.
3501 * @param pDesc Where to return the descriptor.
3502 */
3503IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3504{
3505 /* Null selectors are not allowed (we're not called for dispatching
3506 interrupts with SS=0 in long mode). */
3507 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3508 {
3509 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3510 return iemRaiseTaskSwitchFault0(pVCpu);
3511 }
3512
3513 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3514 if ((NewSS & X86_SEL_RPL) != uCpl)
3515 {
3516 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3517 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3518 }
3519
3520 /*
3521 * Read the descriptor.
3522 */
3523 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3524 if (rcStrict != VINF_SUCCESS)
3525 return rcStrict;
3526
3527 /*
3528 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3529 */
3530 if (!pDesc->Legacy.Gen.u1DescType)
3531 {
3532 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3533 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3534 }
3535
3536 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3537 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3538 {
3539 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3540 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3541 }
3542 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3543 {
3544 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3545 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3546 }
3547
3548 /* Is it there? */
3549 /** @todo testcase: Is this checked before the canonical / limit check below? */
3550 if (!pDesc->Legacy.Gen.u1Present)
3551 {
3552 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3553 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3554 }
3555
3556 return VINF_SUCCESS;
3557}
3558
3559
3560/**
3561 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3562 * not.
3563 *
3564 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3565 */
3566#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3567# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3568#else
3569# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3570#endif
3571
3572/**
3573 * Updates the EFLAGS in the correct manner wrt. PATM.
3574 *
3575 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3576 * @param a_fEfl The new EFLAGS.
3577 */
3578#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3579# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3580#else
3581# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3582#endif
3583
3584
3585/** @} */
3586
3587/** @name Raising Exceptions.
3588 *
3589 * @{
3590 */
3591
3592
3593/**
3594 * Loads the specified stack far pointer from the TSS.
3595 *
3596 * @returns VBox strict status code.
3597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3598 * @param uCpl The CPL to load the stack for.
3599 * @param pSelSS Where to return the new stack segment.
3600 * @param puEsp Where to return the new stack pointer.
3601 */
3602IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3603{
3604 VBOXSTRICTRC rcStrict;
3605 Assert(uCpl < 4);
3606
3607 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3608 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3609 {
3610 /*
3611 * 16-bit TSS (X86TSS16).
3612 */
3613 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3614 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3615 {
3616 uint32_t off = uCpl * 4 + 2;
3617 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3618 {
3619 /** @todo check actual access pattern here. */
3620 uint32_t u32Tmp = 0; /* gcc maybe... */
3621 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3622 if (rcStrict == VINF_SUCCESS)
3623 {
3624 *puEsp = RT_LOWORD(u32Tmp);
3625 *pSelSS = RT_HIWORD(u32Tmp);
3626 return VINF_SUCCESS;
3627 }
3628 }
3629 else
3630 {
3631 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3632 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3633 }
3634 break;
3635 }
3636
3637 /*
3638 * 32-bit TSS (X86TSS32).
3639 */
3640 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3641 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3642 {
3643 uint32_t off = uCpl * 8 + 4;
3644 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3645 {
3646/** @todo check actual access pattern here. */
3647 uint64_t u64Tmp;
3648 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3649 if (rcStrict == VINF_SUCCESS)
3650 {
3651 *puEsp = u64Tmp & UINT32_MAX;
3652 *pSelSS = (RTSEL)(u64Tmp >> 32);
3653 return VINF_SUCCESS;
3654 }
3655 }
3656 else
3657 {
3658 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3659 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3660 }
3661 break;
3662 }
3663
3664 default:
3665 AssertFailed();
3666 rcStrict = VERR_IEM_IPE_4;
3667 break;
3668 }
3669
3670 *puEsp = 0; /* make gcc happy */
3671 *pSelSS = 0; /* make gcc happy */
3672 return rcStrict;
3673}
3674
3675
3676/**
3677 * Loads the specified stack pointer from the 64-bit TSS.
3678 *
3679 * @returns VBox strict status code.
3680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3681 * @param uCpl The CPL to load the stack for.
3682 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3683 * @param puRsp Where to return the new stack pointer.
3684 */
3685IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3686{
3687 Assert(uCpl < 4);
3688 Assert(uIst < 8);
3689 *puRsp = 0; /* make gcc happy */
3690
3691 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3692 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3693
3694 uint32_t off;
3695 if (uIst)
3696 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3697 else
3698 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3699 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3700 {
3701 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3702 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3703 }
3704
3705 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3706}
3707
3708
3709/**
3710 * Adjust the CPU state according to the exception being raised.
3711 *
3712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3713 * @param u8Vector The exception that has been raised.
3714 */
3715DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3716{
3717 switch (u8Vector)
3718 {
3719 case X86_XCPT_DB:
3720 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3721 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3722 break;
3723 /** @todo Read the AMD and Intel exception reference... */
3724 }
3725}
3726
3727
3728/**
3729 * Implements exceptions and interrupts for real mode.
3730 *
3731 * @returns VBox strict status code.
3732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3733 * @param cbInstr The number of bytes to offset rIP by in the return
3734 * address.
3735 * @param u8Vector The interrupt / exception vector number.
3736 * @param fFlags The flags.
3737 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3738 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3739 */
3740IEM_STATIC VBOXSTRICTRC
3741iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3742 uint8_t cbInstr,
3743 uint8_t u8Vector,
3744 uint32_t fFlags,
3745 uint16_t uErr,
3746 uint64_t uCr2)
3747{
3748 NOREF(uErr); NOREF(uCr2);
3749 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3750
3751 /*
3752 * Read the IDT entry.
3753 */
3754 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3755 {
3756 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3757 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3758 }
3759 RTFAR16 Idte;
3760 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3761 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3762 {
3763 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3764 return rcStrict;
3765 }
3766
3767 /*
3768 * Push the stack frame.
3769 */
3770 uint16_t *pu16Frame;
3771 uint64_t uNewRsp;
3772 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3773 if (rcStrict != VINF_SUCCESS)
3774 return rcStrict;
3775
3776 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3777#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3778 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3779 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3780 fEfl |= UINT16_C(0xf000);
3781#endif
3782 pu16Frame[2] = (uint16_t)fEfl;
3783 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3784 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3785 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3786 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3787 return rcStrict;
3788
3789 /*
3790 * Load the vector address into cs:ip and make exception specific state
3791 * adjustments.
3792 */
3793 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3794 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3795 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3796 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3797 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3798 pVCpu->cpum.GstCtx.rip = Idte.off;
3799 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3800 IEMMISC_SET_EFL(pVCpu, fEfl);
3801
3802 /** @todo do we actually do this in real mode? */
3803 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3804 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3805
3806 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3807}
3808
3809
3810/**
3811 * Loads a NULL data selector into when coming from V8086 mode.
3812 *
3813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3814 * @param pSReg Pointer to the segment register.
3815 */
3816IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3817{
3818 pSReg->Sel = 0;
3819 pSReg->ValidSel = 0;
3820 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3821 {
3822 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3823 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3824 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3825 }
3826 else
3827 {
3828 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3829 /** @todo check this on AMD-V */
3830 pSReg->u64Base = 0;
3831 pSReg->u32Limit = 0;
3832 }
3833}
3834
3835
3836/**
3837 * Loads a segment selector during a task switch in V8086 mode.
3838 *
3839 * @param pSReg Pointer to the segment register.
3840 * @param uSel The selector value to load.
3841 */
3842IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3843{
3844 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3845 pSReg->Sel = uSel;
3846 pSReg->ValidSel = uSel;
3847 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3848 pSReg->u64Base = uSel << 4;
3849 pSReg->u32Limit = 0xffff;
3850 pSReg->Attr.u = 0xf3;
3851}
3852
3853
3854/**
3855 * Loads a NULL data selector into a selector register, both the hidden and
3856 * visible parts, in protected mode.
3857 *
3858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3859 * @param pSReg Pointer to the segment register.
3860 * @param uRpl The RPL.
3861 */
3862IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3863{
3864 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3865 * data selector in protected mode. */
3866 pSReg->Sel = uRpl;
3867 pSReg->ValidSel = uRpl;
3868 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3869 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3870 {
3871 /* VT-x (Intel 3960x) observed doing something like this. */
3872 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3873 pSReg->u32Limit = UINT32_MAX;
3874 pSReg->u64Base = 0;
3875 }
3876 else
3877 {
3878 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3879 pSReg->u32Limit = 0;
3880 pSReg->u64Base = 0;
3881 }
3882}
3883
3884
3885/**
3886 * Loads a segment selector during a task switch in protected mode.
3887 *
3888 * In this task switch scenario, we would throw \#TS exceptions rather than
3889 * \#GPs.
3890 *
3891 * @returns VBox strict status code.
3892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3893 * @param pSReg Pointer to the segment register.
3894 * @param uSel The new selector value.
3895 *
3896 * @remarks This does _not_ handle CS or SS.
3897 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3898 */
3899IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3900{
3901 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3902
3903 /* Null data selector. */
3904 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3905 {
3906 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3907 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3908 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3909 return VINF_SUCCESS;
3910 }
3911
3912 /* Fetch the descriptor. */
3913 IEMSELDESC Desc;
3914 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3915 if (rcStrict != VINF_SUCCESS)
3916 {
3917 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3918 VBOXSTRICTRC_VAL(rcStrict)));
3919 return rcStrict;
3920 }
3921
3922 /* Must be a data segment or readable code segment. */
3923 if ( !Desc.Legacy.Gen.u1DescType
3924 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3925 {
3926 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3927 Desc.Legacy.Gen.u4Type));
3928 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3929 }
3930
3931 /* Check privileges for data segments and non-conforming code segments. */
3932 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3933 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3934 {
3935 /* The RPL and the new CPL must be less than or equal to the DPL. */
3936 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3937 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3938 {
3939 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3940 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3941 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3942 }
3943 }
3944
3945 /* Is it there? */
3946 if (!Desc.Legacy.Gen.u1Present)
3947 {
3948 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3949 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3950 }
3951
3952 /* The base and limit. */
3953 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3954 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3955
3956 /*
3957 * Ok, everything checked out fine. Now set the accessed bit before
3958 * committing the result into the registers.
3959 */
3960 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3961 {
3962 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3963 if (rcStrict != VINF_SUCCESS)
3964 return rcStrict;
3965 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3966 }
3967
3968 /* Commit */
3969 pSReg->Sel = uSel;
3970 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3971 pSReg->u32Limit = cbLimit;
3972 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3973 pSReg->ValidSel = uSel;
3974 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3975 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3976 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3977
3978 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3979 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3980 return VINF_SUCCESS;
3981}
3982
3983
3984/**
3985 * Performs a task switch.
3986 *
3987 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3988 * caller is responsible for performing the necessary checks (like DPL, TSS
3989 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3990 * reference for JMP, CALL, IRET.
3991 *
3992 * If the task switch is the due to a software interrupt or hardware exception,
3993 * the caller is responsible for validating the TSS selector and descriptor. See
3994 * Intel Instruction reference for INT n.
3995 *
3996 * @returns VBox strict status code.
3997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3998 * @param enmTaskSwitch The cause of the task switch.
3999 * @param uNextEip The EIP effective after the task switch.
4000 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4001 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4002 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4003 * @param SelTSS The TSS selector of the new task.
4004 * @param pNewDescTSS Pointer to the new TSS descriptor.
4005 */
4006IEM_STATIC VBOXSTRICTRC
4007iemTaskSwitch(PVMCPU pVCpu,
4008 IEMTASKSWITCH enmTaskSwitch,
4009 uint32_t uNextEip,
4010 uint32_t fFlags,
4011 uint16_t uErr,
4012 uint64_t uCr2,
4013 RTSEL SelTSS,
4014 PIEMSELDESC pNewDescTSS)
4015{
4016 Assert(!IEM_IS_REAL_MODE(pVCpu));
4017 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4018 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4019
4020 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4021 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4022 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4023 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4024 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4025
4026 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4027 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4028
4029 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4030 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4031
4032 /* Update CR2 in case it's a page-fault. */
4033 /** @todo This should probably be done much earlier in IEM/PGM. See
4034 * @bugref{5653#c49}. */
4035 if (fFlags & IEM_XCPT_FLAGS_CR2)
4036 pVCpu->cpum.GstCtx.cr2 = uCr2;
4037
4038 /*
4039 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4040 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4041 */
4042 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4043 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4044 if (uNewTSSLimit < uNewTSSLimitMin)
4045 {
4046 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4047 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4048 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4049 }
4050
4051 /*
4052 * Task switches in VMX non-root mode always cause task switches.
4053 * The new TSS must have been read and validated (DPL, limits etc.) before a
4054 * task-switch VM-exit commences.
4055 *
4056 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4057 */
4058 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4059 {
4060 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4061 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4062 }
4063
4064 /*
4065 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4066 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4067 */
4068 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4069 {
4070 uint32_t const uExitInfo1 = SelTSS;
4071 uint32_t uExitInfo2 = uErr;
4072 switch (enmTaskSwitch)
4073 {
4074 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4075 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4076 default: break;
4077 }
4078 if (fFlags & IEM_XCPT_FLAGS_ERR)
4079 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4080 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4081 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4082
4083 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4084 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4085 RT_NOREF2(uExitInfo1, uExitInfo2);
4086 }
4087
4088 /*
4089 * Check the current TSS limit. The last written byte to the current TSS during the
4090 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4091 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4092 *
4093 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4094 * end up with smaller than "legal" TSS limits.
4095 */
4096 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4097 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4098 if (uCurTSSLimit < uCurTSSLimitMin)
4099 {
4100 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4101 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4102 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4103 }
4104
4105 /*
4106 * Verify that the new TSS can be accessed and map it. Map only the required contents
4107 * and not the entire TSS.
4108 */
4109 void *pvNewTSS;
4110 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4111 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4112 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4113 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4114 * not perform correct translation if this happens. See Intel spec. 7.2.1
4115 * "Task-State Segment" */
4116 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4117 if (rcStrict != VINF_SUCCESS)
4118 {
4119 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4120 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4121 return rcStrict;
4122 }
4123
4124 /*
4125 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4126 */
4127 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4128 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4129 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4130 {
4131 PX86DESC pDescCurTSS;
4132 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4133 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4134 if (rcStrict != VINF_SUCCESS)
4135 {
4136 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4137 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4138 return rcStrict;
4139 }
4140
4141 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4142 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4143 if (rcStrict != VINF_SUCCESS)
4144 {
4145 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4146 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4147 return rcStrict;
4148 }
4149
4150 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4151 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4152 {
4153 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4154 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4155 u32EFlags &= ~X86_EFL_NT;
4156 }
4157 }
4158
4159 /*
4160 * Save the CPU state into the current TSS.
4161 */
4162 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4163 if (GCPtrNewTSS == GCPtrCurTSS)
4164 {
4165 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4166 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4167 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4168 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4169 pVCpu->cpum.GstCtx.ldtr.Sel));
4170 }
4171 if (fIsNewTSS386)
4172 {
4173 /*
4174 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4175 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4176 */
4177 void *pvCurTSS32;
4178 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4179 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4180 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4181 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4182 if (rcStrict != VINF_SUCCESS)
4183 {
4184 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4185 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4186 return rcStrict;
4187 }
4188
4189 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4190 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4191 pCurTSS32->eip = uNextEip;
4192 pCurTSS32->eflags = u32EFlags;
4193 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4194 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4195 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4196 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4197 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4198 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4199 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4200 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4201 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4202 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4203 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4204 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4205 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4206 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4207
4208 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4209 if (rcStrict != VINF_SUCCESS)
4210 {
4211 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4212 VBOXSTRICTRC_VAL(rcStrict)));
4213 return rcStrict;
4214 }
4215 }
4216 else
4217 {
4218 /*
4219 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4220 */
4221 void *pvCurTSS16;
4222 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4223 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4224 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4225 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4226 if (rcStrict != VINF_SUCCESS)
4227 {
4228 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4229 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4230 return rcStrict;
4231 }
4232
4233 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4234 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4235 pCurTSS16->ip = uNextEip;
4236 pCurTSS16->flags = u32EFlags;
4237 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4238 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4239 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4240 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4241 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4242 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4243 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4244 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4245 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4246 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4247 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4248 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4249
4250 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4251 if (rcStrict != VINF_SUCCESS)
4252 {
4253 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4254 VBOXSTRICTRC_VAL(rcStrict)));
4255 return rcStrict;
4256 }
4257 }
4258
4259 /*
4260 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4261 */
4262 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4263 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4264 {
4265 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4266 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4267 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4268 }
4269
4270 /*
4271 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4272 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4273 */
4274 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4275 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4276 bool fNewDebugTrap;
4277 if (fIsNewTSS386)
4278 {
4279 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4280 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4281 uNewEip = pNewTSS32->eip;
4282 uNewEflags = pNewTSS32->eflags;
4283 uNewEax = pNewTSS32->eax;
4284 uNewEcx = pNewTSS32->ecx;
4285 uNewEdx = pNewTSS32->edx;
4286 uNewEbx = pNewTSS32->ebx;
4287 uNewEsp = pNewTSS32->esp;
4288 uNewEbp = pNewTSS32->ebp;
4289 uNewEsi = pNewTSS32->esi;
4290 uNewEdi = pNewTSS32->edi;
4291 uNewES = pNewTSS32->es;
4292 uNewCS = pNewTSS32->cs;
4293 uNewSS = pNewTSS32->ss;
4294 uNewDS = pNewTSS32->ds;
4295 uNewFS = pNewTSS32->fs;
4296 uNewGS = pNewTSS32->gs;
4297 uNewLdt = pNewTSS32->selLdt;
4298 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4299 }
4300 else
4301 {
4302 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4303 uNewCr3 = 0;
4304 uNewEip = pNewTSS16->ip;
4305 uNewEflags = pNewTSS16->flags;
4306 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4307 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4308 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4309 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4310 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4311 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4312 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4313 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4314 uNewES = pNewTSS16->es;
4315 uNewCS = pNewTSS16->cs;
4316 uNewSS = pNewTSS16->ss;
4317 uNewDS = pNewTSS16->ds;
4318 uNewFS = 0;
4319 uNewGS = 0;
4320 uNewLdt = pNewTSS16->selLdt;
4321 fNewDebugTrap = false;
4322 }
4323
4324 if (GCPtrNewTSS == GCPtrCurTSS)
4325 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4326 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4327
4328 /*
4329 * We're done accessing the new TSS.
4330 */
4331 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4332 if (rcStrict != VINF_SUCCESS)
4333 {
4334 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4335 return rcStrict;
4336 }
4337
4338 /*
4339 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4340 */
4341 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4342 {
4343 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4344 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4345 if (rcStrict != VINF_SUCCESS)
4346 {
4347 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4348 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4349 return rcStrict;
4350 }
4351
4352 /* Check that the descriptor indicates the new TSS is available (not busy). */
4353 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4354 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4355 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4356
4357 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4358 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4359 if (rcStrict != VINF_SUCCESS)
4360 {
4361 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4362 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4363 return rcStrict;
4364 }
4365 }
4366
4367 /*
4368 * From this point on, we're technically in the new task. We will defer exceptions
4369 * until the completion of the task switch but before executing any instructions in the new task.
4370 */
4371 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4372 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4373 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4374 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4375 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4376 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4377 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4378
4379 /* Set the busy bit in TR. */
4380 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4381 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4382 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4383 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4384 {
4385 uNewEflags |= X86_EFL_NT;
4386 }
4387
4388 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4389 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4390 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4391
4392 pVCpu->cpum.GstCtx.eip = uNewEip;
4393 pVCpu->cpum.GstCtx.eax = uNewEax;
4394 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4395 pVCpu->cpum.GstCtx.edx = uNewEdx;
4396 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4397 pVCpu->cpum.GstCtx.esp = uNewEsp;
4398 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4399 pVCpu->cpum.GstCtx.esi = uNewEsi;
4400 pVCpu->cpum.GstCtx.edi = uNewEdi;
4401
4402 uNewEflags &= X86_EFL_LIVE_MASK;
4403 uNewEflags |= X86_EFL_RA1_MASK;
4404 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4405
4406 /*
4407 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4408 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4409 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4410 */
4411 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4412 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4413
4414 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4415 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4416
4417 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4418 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4419
4420 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4421 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4422
4423 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4424 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4425
4426 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4427 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4428 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4429
4430 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4431 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4432 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4433 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4434
4435 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4436 {
4437 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4438 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4439 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4440 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4441 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4442 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4443 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4444 }
4445
4446 /*
4447 * Switch CR3 for the new task.
4448 */
4449 if ( fIsNewTSS386
4450 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4451 {
4452 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4453 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4454 AssertRCSuccessReturn(rc, rc);
4455
4456 /* Inform PGM. */
4457 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4458 AssertRCReturn(rc, rc);
4459 /* ignore informational status codes */
4460
4461 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4462 }
4463
4464 /*
4465 * Switch LDTR for the new task.
4466 */
4467 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4468 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4469 else
4470 {
4471 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4472
4473 IEMSELDESC DescNewLdt;
4474 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4475 if (rcStrict != VINF_SUCCESS)
4476 {
4477 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4478 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4479 return rcStrict;
4480 }
4481 if ( !DescNewLdt.Legacy.Gen.u1Present
4482 || DescNewLdt.Legacy.Gen.u1DescType
4483 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4484 {
4485 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4486 uNewLdt, DescNewLdt.Legacy.u));
4487 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4488 }
4489
4490 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4491 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4492 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4493 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4494 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4495 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4496 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4497 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4498 }
4499
4500 IEMSELDESC DescSS;
4501 if (IEM_IS_V86_MODE(pVCpu))
4502 {
4503 pVCpu->iem.s.uCpl = 3;
4504 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4505 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4506 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4507 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4508 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4509 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4510
4511 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4512 DescSS.Legacy.u = 0;
4513 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4514 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4515 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4516 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4517 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4518 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4519 DescSS.Legacy.Gen.u2Dpl = 3;
4520 }
4521 else
4522 {
4523 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4524
4525 /*
4526 * Load the stack segment for the new task.
4527 */
4528 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4529 {
4530 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4531 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4532 }
4533
4534 /* Fetch the descriptor. */
4535 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4536 if (rcStrict != VINF_SUCCESS)
4537 {
4538 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4539 VBOXSTRICTRC_VAL(rcStrict)));
4540 return rcStrict;
4541 }
4542
4543 /* SS must be a data segment and writable. */
4544 if ( !DescSS.Legacy.Gen.u1DescType
4545 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4546 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4547 {
4548 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4549 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4550 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4551 }
4552
4553 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4554 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4555 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4556 {
4557 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4558 uNewCpl));
4559 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4560 }
4561
4562 /* Is it there? */
4563 if (!DescSS.Legacy.Gen.u1Present)
4564 {
4565 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4566 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4567 }
4568
4569 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4570 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4571
4572 /* Set the accessed bit before committing the result into SS. */
4573 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4574 {
4575 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4576 if (rcStrict != VINF_SUCCESS)
4577 return rcStrict;
4578 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4579 }
4580
4581 /* Commit SS. */
4582 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4583 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4584 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4585 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4586 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4587 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4588 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4589
4590 /* CPL has changed, update IEM before loading rest of segments. */
4591 pVCpu->iem.s.uCpl = uNewCpl;
4592
4593 /*
4594 * Load the data segments for the new task.
4595 */
4596 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4597 if (rcStrict != VINF_SUCCESS)
4598 return rcStrict;
4599 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4600 if (rcStrict != VINF_SUCCESS)
4601 return rcStrict;
4602 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4603 if (rcStrict != VINF_SUCCESS)
4604 return rcStrict;
4605 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4606 if (rcStrict != VINF_SUCCESS)
4607 return rcStrict;
4608
4609 /*
4610 * Load the code segment for the new task.
4611 */
4612 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4613 {
4614 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4615 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4616 }
4617
4618 /* Fetch the descriptor. */
4619 IEMSELDESC DescCS;
4620 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4621 if (rcStrict != VINF_SUCCESS)
4622 {
4623 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4624 return rcStrict;
4625 }
4626
4627 /* CS must be a code segment. */
4628 if ( !DescCS.Legacy.Gen.u1DescType
4629 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4630 {
4631 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4632 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4633 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4634 }
4635
4636 /* For conforming CS, DPL must be less than or equal to the RPL. */
4637 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4638 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4639 {
4640 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4641 DescCS.Legacy.Gen.u2Dpl));
4642 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4643 }
4644
4645 /* For non-conforming CS, DPL must match RPL. */
4646 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4647 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4648 {
4649 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4650 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4651 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4652 }
4653
4654 /* Is it there? */
4655 if (!DescCS.Legacy.Gen.u1Present)
4656 {
4657 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4658 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4659 }
4660
4661 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4662 u64Base = X86DESC_BASE(&DescCS.Legacy);
4663
4664 /* Set the accessed bit before committing the result into CS. */
4665 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4666 {
4667 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4668 if (rcStrict != VINF_SUCCESS)
4669 return rcStrict;
4670 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4671 }
4672
4673 /* Commit CS. */
4674 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4675 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4676 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4677 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4678 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4679 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4680 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4681 }
4682
4683 /** @todo Debug trap. */
4684 if (fIsNewTSS386 && fNewDebugTrap)
4685 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4686
4687 /*
4688 * Construct the error code masks based on what caused this task switch.
4689 * See Intel Instruction reference for INT.
4690 */
4691 uint16_t uExt;
4692 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4693 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4694 {
4695 uExt = 1;
4696 }
4697 else
4698 uExt = 0;
4699
4700 /*
4701 * Push any error code on to the new stack.
4702 */
4703 if (fFlags & IEM_XCPT_FLAGS_ERR)
4704 {
4705 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4706 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4707 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4708
4709 /* Check that there is sufficient space on the stack. */
4710 /** @todo Factor out segment limit checking for normal/expand down segments
4711 * into a separate function. */
4712 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4713 {
4714 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4715 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4716 {
4717 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4718 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4719 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4720 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4721 }
4722 }
4723 else
4724 {
4725 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4726 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4727 {
4728 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4729 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4730 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4731 }
4732 }
4733
4734
4735 if (fIsNewTSS386)
4736 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4737 else
4738 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4739 if (rcStrict != VINF_SUCCESS)
4740 {
4741 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4742 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4743 return rcStrict;
4744 }
4745 }
4746
4747 /* Check the new EIP against the new CS limit. */
4748 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4749 {
4750 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4751 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4752 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4753 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4754 }
4755
4756 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4757 pVCpu->cpum.GstCtx.ss.Sel));
4758 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4759}
4760
4761
4762/**
4763 * Implements exceptions and interrupts for protected mode.
4764 *
4765 * @returns VBox strict status code.
4766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4767 * @param cbInstr The number of bytes to offset rIP by in the return
4768 * address.
4769 * @param u8Vector The interrupt / exception vector number.
4770 * @param fFlags The flags.
4771 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4772 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4773 */
4774IEM_STATIC VBOXSTRICTRC
4775iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4776 uint8_t cbInstr,
4777 uint8_t u8Vector,
4778 uint32_t fFlags,
4779 uint16_t uErr,
4780 uint64_t uCr2)
4781{
4782 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4783
4784 /*
4785 * Read the IDT entry.
4786 */
4787 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4788 {
4789 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4790 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4791 }
4792 X86DESC Idte;
4793 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4794 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4795 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4796 {
4797 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4798 return rcStrict;
4799 }
4800 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4801 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4802 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4803
4804 /*
4805 * Check the descriptor type, DPL and such.
4806 * ASSUMES this is done in the same order as described for call-gate calls.
4807 */
4808 if (Idte.Gate.u1DescType)
4809 {
4810 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4811 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4812 }
4813 bool fTaskGate = false;
4814 uint8_t f32BitGate = true;
4815 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4816 switch (Idte.Gate.u4Type)
4817 {
4818 case X86_SEL_TYPE_SYS_UNDEFINED:
4819 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4820 case X86_SEL_TYPE_SYS_LDT:
4821 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4822 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4823 case X86_SEL_TYPE_SYS_UNDEFINED2:
4824 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4825 case X86_SEL_TYPE_SYS_UNDEFINED3:
4826 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4827 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4828 case X86_SEL_TYPE_SYS_UNDEFINED4:
4829 {
4830 /** @todo check what actually happens when the type is wrong...
4831 * esp. call gates. */
4832 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4833 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4834 }
4835
4836 case X86_SEL_TYPE_SYS_286_INT_GATE:
4837 f32BitGate = false;
4838 RT_FALL_THRU();
4839 case X86_SEL_TYPE_SYS_386_INT_GATE:
4840 fEflToClear |= X86_EFL_IF;
4841 break;
4842
4843 case X86_SEL_TYPE_SYS_TASK_GATE:
4844 fTaskGate = true;
4845#ifndef IEM_IMPLEMENTS_TASKSWITCH
4846 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4847#endif
4848 break;
4849
4850 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4851 f32BitGate = false;
4852 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4853 break;
4854
4855 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4856 }
4857
4858 /* Check DPL against CPL if applicable. */
4859 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4860 {
4861 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4862 {
4863 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4864 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4865 }
4866 }
4867
4868 /* Is it there? */
4869 if (!Idte.Gate.u1Present)
4870 {
4871 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4872 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4873 }
4874
4875 /* Is it a task-gate? */
4876 if (fTaskGate)
4877 {
4878 /*
4879 * Construct the error code masks based on what caused this task switch.
4880 * See Intel Instruction reference for INT.
4881 */
4882 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4883 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4884 RTSEL SelTSS = Idte.Gate.u16Sel;
4885
4886 /*
4887 * Fetch the TSS descriptor in the GDT.
4888 */
4889 IEMSELDESC DescTSS;
4890 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4891 if (rcStrict != VINF_SUCCESS)
4892 {
4893 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4894 VBOXSTRICTRC_VAL(rcStrict)));
4895 return rcStrict;
4896 }
4897
4898 /* The TSS descriptor must be a system segment and be available (not busy). */
4899 if ( DescTSS.Legacy.Gen.u1DescType
4900 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4901 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4902 {
4903 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4904 u8Vector, SelTSS, DescTSS.Legacy.au64));
4905 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4906 }
4907
4908 /* The TSS must be present. */
4909 if (!DescTSS.Legacy.Gen.u1Present)
4910 {
4911 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4912 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4913 }
4914
4915 /* Do the actual task switch. */
4916 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4917 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4918 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4919 }
4920
4921 /* A null CS is bad. */
4922 RTSEL NewCS = Idte.Gate.u16Sel;
4923 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4924 {
4925 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4926 return iemRaiseGeneralProtectionFault0(pVCpu);
4927 }
4928
4929 /* Fetch the descriptor for the new CS. */
4930 IEMSELDESC DescCS;
4931 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4932 if (rcStrict != VINF_SUCCESS)
4933 {
4934 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4935 return rcStrict;
4936 }
4937
4938 /* Must be a code segment. */
4939 if (!DescCS.Legacy.Gen.u1DescType)
4940 {
4941 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4942 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4943 }
4944 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4945 {
4946 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4947 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4948 }
4949
4950 /* Don't allow lowering the privilege level. */
4951 /** @todo Does the lowering of privileges apply to software interrupts
4952 * only? This has bearings on the more-privileged or
4953 * same-privilege stack behavior further down. A testcase would
4954 * be nice. */
4955 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4956 {
4957 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4958 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4959 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4960 }
4961
4962 /* Make sure the selector is present. */
4963 if (!DescCS.Legacy.Gen.u1Present)
4964 {
4965 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4966 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4967 }
4968
4969 /* Check the new EIP against the new CS limit. */
4970 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4971 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4972 ? Idte.Gate.u16OffsetLow
4973 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4974 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4975 if (uNewEip > cbLimitCS)
4976 {
4977 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4978 u8Vector, uNewEip, cbLimitCS, NewCS));
4979 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4980 }
4981 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4982
4983 /* Calc the flag image to push. */
4984 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4985 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4986 fEfl &= ~X86_EFL_RF;
4987 else
4988 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4989
4990 /* From V8086 mode only go to CPL 0. */
4991 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4992 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4993 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4994 {
4995 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4996 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4997 }
4998
4999 /*
5000 * If the privilege level changes, we need to get a new stack from the TSS.
5001 * This in turns means validating the new SS and ESP...
5002 */
5003 if (uNewCpl != pVCpu->iem.s.uCpl)
5004 {
5005 RTSEL NewSS;
5006 uint32_t uNewEsp;
5007 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5008 if (rcStrict != VINF_SUCCESS)
5009 return rcStrict;
5010
5011 IEMSELDESC DescSS;
5012 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5013 if (rcStrict != VINF_SUCCESS)
5014 return rcStrict;
5015 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5016 if (!DescSS.Legacy.Gen.u1DefBig)
5017 {
5018 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5019 uNewEsp = (uint16_t)uNewEsp;
5020 }
5021
5022 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5023
5024 /* Check that there is sufficient space for the stack frame. */
5025 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5026 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5027 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5028 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5029
5030 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5031 {
5032 if ( uNewEsp - 1 > cbLimitSS
5033 || uNewEsp < cbStackFrame)
5034 {
5035 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5036 u8Vector, NewSS, uNewEsp, cbStackFrame));
5037 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5038 }
5039 }
5040 else
5041 {
5042 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5043 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5044 {
5045 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5046 u8Vector, NewSS, uNewEsp, cbStackFrame));
5047 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5048 }
5049 }
5050
5051 /*
5052 * Start making changes.
5053 */
5054
5055 /* Set the new CPL so that stack accesses use it. */
5056 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5057 pVCpu->iem.s.uCpl = uNewCpl;
5058
5059 /* Create the stack frame. */
5060 RTPTRUNION uStackFrame;
5061 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5062 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5063 if (rcStrict != VINF_SUCCESS)
5064 return rcStrict;
5065 void * const pvStackFrame = uStackFrame.pv;
5066 if (f32BitGate)
5067 {
5068 if (fFlags & IEM_XCPT_FLAGS_ERR)
5069 *uStackFrame.pu32++ = uErr;
5070 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5071 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5072 uStackFrame.pu32[2] = fEfl;
5073 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5074 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5075 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5076 if (fEfl & X86_EFL_VM)
5077 {
5078 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5079 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5080 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5081 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5082 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5083 }
5084 }
5085 else
5086 {
5087 if (fFlags & IEM_XCPT_FLAGS_ERR)
5088 *uStackFrame.pu16++ = uErr;
5089 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5090 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5091 uStackFrame.pu16[2] = fEfl;
5092 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5093 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5094 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5095 if (fEfl & X86_EFL_VM)
5096 {
5097 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5098 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5099 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5100 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5101 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5102 }
5103 }
5104 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5105 if (rcStrict != VINF_SUCCESS)
5106 return rcStrict;
5107
5108 /* Mark the selectors 'accessed' (hope this is the correct time). */
5109 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5110 * after pushing the stack frame? (Write protect the gdt + stack to
5111 * find out.) */
5112 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5113 {
5114 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5115 if (rcStrict != VINF_SUCCESS)
5116 return rcStrict;
5117 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5118 }
5119
5120 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5121 {
5122 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5123 if (rcStrict != VINF_SUCCESS)
5124 return rcStrict;
5125 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5126 }
5127
5128 /*
5129 * Start comitting the register changes (joins with the DPL=CPL branch).
5130 */
5131 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5132 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5133 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5134 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5135 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5136 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5137 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5138 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5139 * SP is loaded).
5140 * Need to check the other combinations too:
5141 * - 16-bit TSS, 32-bit handler
5142 * - 32-bit TSS, 16-bit handler */
5143 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5144 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5145 else
5146 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5147
5148 if (fEfl & X86_EFL_VM)
5149 {
5150 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5151 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5152 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5153 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5154 }
5155 }
5156 /*
5157 * Same privilege, no stack change and smaller stack frame.
5158 */
5159 else
5160 {
5161 uint64_t uNewRsp;
5162 RTPTRUNION uStackFrame;
5163 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5164 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5165 if (rcStrict != VINF_SUCCESS)
5166 return rcStrict;
5167 void * const pvStackFrame = uStackFrame.pv;
5168
5169 if (f32BitGate)
5170 {
5171 if (fFlags & IEM_XCPT_FLAGS_ERR)
5172 *uStackFrame.pu32++ = uErr;
5173 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5174 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5175 uStackFrame.pu32[2] = fEfl;
5176 }
5177 else
5178 {
5179 if (fFlags & IEM_XCPT_FLAGS_ERR)
5180 *uStackFrame.pu16++ = uErr;
5181 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5182 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5183 uStackFrame.pu16[2] = fEfl;
5184 }
5185 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5186 if (rcStrict != VINF_SUCCESS)
5187 return rcStrict;
5188
5189 /* Mark the CS selector as 'accessed'. */
5190 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5191 {
5192 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5193 if (rcStrict != VINF_SUCCESS)
5194 return rcStrict;
5195 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5196 }
5197
5198 /*
5199 * Start committing the register changes (joins with the other branch).
5200 */
5201 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5202 }
5203
5204 /* ... register committing continues. */
5205 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5206 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5207 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5208 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5209 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5210 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5211
5212 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5213 fEfl &= ~fEflToClear;
5214 IEMMISC_SET_EFL(pVCpu, fEfl);
5215
5216 if (fFlags & IEM_XCPT_FLAGS_CR2)
5217 pVCpu->cpum.GstCtx.cr2 = uCr2;
5218
5219 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5220 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5221
5222 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5223}
5224
5225
5226/**
5227 * Implements exceptions and interrupts for long mode.
5228 *
5229 * @returns VBox strict status code.
5230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5231 * @param cbInstr The number of bytes to offset rIP by in the return
5232 * address.
5233 * @param u8Vector The interrupt / exception vector number.
5234 * @param fFlags The flags.
5235 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5236 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5237 */
5238IEM_STATIC VBOXSTRICTRC
5239iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5240 uint8_t cbInstr,
5241 uint8_t u8Vector,
5242 uint32_t fFlags,
5243 uint16_t uErr,
5244 uint64_t uCr2)
5245{
5246 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5247
5248 /*
5249 * Read the IDT entry.
5250 */
5251 uint16_t offIdt = (uint16_t)u8Vector << 4;
5252 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5253 {
5254 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5255 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5256 }
5257 X86DESC64 Idte;
5258 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5259 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5260 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5261 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5262 {
5263 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5264 return rcStrict;
5265 }
5266 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5267 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5268 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5269
5270 /*
5271 * Check the descriptor type, DPL and such.
5272 * ASSUMES this is done in the same order as described for call-gate calls.
5273 */
5274 if (Idte.Gate.u1DescType)
5275 {
5276 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5277 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5278 }
5279 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5280 switch (Idte.Gate.u4Type)
5281 {
5282 case AMD64_SEL_TYPE_SYS_INT_GATE:
5283 fEflToClear |= X86_EFL_IF;
5284 break;
5285 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5286 break;
5287
5288 default:
5289 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5290 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5291 }
5292
5293 /* Check DPL against CPL if applicable. */
5294 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5295 {
5296 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5297 {
5298 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5299 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5300 }
5301 }
5302
5303 /* Is it there? */
5304 if (!Idte.Gate.u1Present)
5305 {
5306 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5307 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5308 }
5309
5310 /* A null CS is bad. */
5311 RTSEL NewCS = Idte.Gate.u16Sel;
5312 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5313 {
5314 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5315 return iemRaiseGeneralProtectionFault0(pVCpu);
5316 }
5317
5318 /* Fetch the descriptor for the new CS. */
5319 IEMSELDESC DescCS;
5320 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5321 if (rcStrict != VINF_SUCCESS)
5322 {
5323 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5324 return rcStrict;
5325 }
5326
5327 /* Must be a 64-bit code segment. */
5328 if (!DescCS.Long.Gen.u1DescType)
5329 {
5330 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5331 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5332 }
5333 if ( !DescCS.Long.Gen.u1Long
5334 || DescCS.Long.Gen.u1DefBig
5335 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5336 {
5337 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5338 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5339 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5340 }
5341
5342 /* Don't allow lowering the privilege level. For non-conforming CS
5343 selectors, the CS.DPL sets the privilege level the trap/interrupt
5344 handler runs at. For conforming CS selectors, the CPL remains
5345 unchanged, but the CS.DPL must be <= CPL. */
5346 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5347 * when CPU in Ring-0. Result \#GP? */
5348 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5349 {
5350 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5351 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5352 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5353 }
5354
5355
5356 /* Make sure the selector is present. */
5357 if (!DescCS.Legacy.Gen.u1Present)
5358 {
5359 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5360 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5361 }
5362
5363 /* Check that the new RIP is canonical. */
5364 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5365 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5366 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5367 if (!IEM_IS_CANONICAL(uNewRip))
5368 {
5369 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5370 return iemRaiseGeneralProtectionFault0(pVCpu);
5371 }
5372
5373 /*
5374 * If the privilege level changes or if the IST isn't zero, we need to get
5375 * a new stack from the TSS.
5376 */
5377 uint64_t uNewRsp;
5378 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5379 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5380 if ( uNewCpl != pVCpu->iem.s.uCpl
5381 || Idte.Gate.u3IST != 0)
5382 {
5383 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5384 if (rcStrict != VINF_SUCCESS)
5385 return rcStrict;
5386 }
5387 else
5388 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5389 uNewRsp &= ~(uint64_t)0xf;
5390
5391 /*
5392 * Calc the flag image to push.
5393 */
5394 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5395 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5396 fEfl &= ~X86_EFL_RF;
5397 else
5398 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5399
5400 /*
5401 * Start making changes.
5402 */
5403 /* Set the new CPL so that stack accesses use it. */
5404 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5405 pVCpu->iem.s.uCpl = uNewCpl;
5406
5407 /* Create the stack frame. */
5408 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5409 RTPTRUNION uStackFrame;
5410 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5411 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5412 if (rcStrict != VINF_SUCCESS)
5413 return rcStrict;
5414 void * const pvStackFrame = uStackFrame.pv;
5415
5416 if (fFlags & IEM_XCPT_FLAGS_ERR)
5417 *uStackFrame.pu64++ = uErr;
5418 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5419 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5420 uStackFrame.pu64[2] = fEfl;
5421 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5422 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5423 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5424 if (rcStrict != VINF_SUCCESS)
5425 return rcStrict;
5426
5427 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5428 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5429 * after pushing the stack frame? (Write protect the gdt + stack to
5430 * find out.) */
5431 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5432 {
5433 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5434 if (rcStrict != VINF_SUCCESS)
5435 return rcStrict;
5436 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5437 }
5438
5439 /*
5440 * Start comitting the register changes.
5441 */
5442 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5443 * hidden registers when interrupting 32-bit or 16-bit code! */
5444 if (uNewCpl != uOldCpl)
5445 {
5446 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5447 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5448 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5449 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5450 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5451 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5452 }
5453 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5454 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5455 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5456 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5457 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5458 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5459 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5460 pVCpu->cpum.GstCtx.rip = uNewRip;
5461
5462 fEfl &= ~fEflToClear;
5463 IEMMISC_SET_EFL(pVCpu, fEfl);
5464
5465 if (fFlags & IEM_XCPT_FLAGS_CR2)
5466 pVCpu->cpum.GstCtx.cr2 = uCr2;
5467
5468 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5469 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5470
5471 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5472}
5473
5474
5475/**
5476 * Implements exceptions and interrupts.
5477 *
5478 * All exceptions and interrupts goes thru this function!
5479 *
5480 * @returns VBox strict status code.
5481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5482 * @param cbInstr The number of bytes to offset rIP by in the return
5483 * address.
5484 * @param u8Vector The interrupt / exception vector number.
5485 * @param fFlags The flags.
5486 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5487 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5488 */
5489DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5490iemRaiseXcptOrInt(PVMCPU pVCpu,
5491 uint8_t cbInstr,
5492 uint8_t u8Vector,
5493 uint32_t fFlags,
5494 uint16_t uErr,
5495 uint64_t uCr2)
5496{
5497 /*
5498 * Get all the state that we might need here.
5499 */
5500 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5501 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5502
5503#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5504 /*
5505 * Flush prefetch buffer
5506 */
5507 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5508#endif
5509
5510 /*
5511 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5512 */
5513 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5514 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5515 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5516 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5517 {
5518 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5519 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5520 u8Vector = X86_XCPT_GP;
5521 uErr = 0;
5522 }
5523#ifdef DBGFTRACE_ENABLED
5524 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5525 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5526 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5527#endif
5528
5529#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5530 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5531 {
5532 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5533 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5534 return rcStrict0;
5535 }
5536#endif
5537
5538#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5539 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5540 {
5541 /*
5542 * If the event is being injected as part of VMRUN, it isn't subject to event
5543 * intercepts in the nested-guest. However, secondary exceptions that occur
5544 * during injection of any event -are- subject to exception intercepts.
5545 *
5546 * See AMD spec. 15.20 "Event Injection".
5547 */
5548 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5549 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5550 else
5551 {
5552 /*
5553 * Check and handle if the event being raised is intercepted.
5554 */
5555 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5556 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5557 return rcStrict0;
5558 }
5559 }
5560#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5561
5562 /*
5563 * Do recursion accounting.
5564 */
5565 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5566 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5567 if (pVCpu->iem.s.cXcptRecursions == 0)
5568 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5569 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5570 else
5571 {
5572 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5573 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5574 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5575
5576 if (pVCpu->iem.s.cXcptRecursions >= 4)
5577 {
5578#ifdef DEBUG_bird
5579 AssertFailed();
5580#endif
5581 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5582 }
5583
5584 /*
5585 * Evaluate the sequence of recurring events.
5586 */
5587 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5588 NULL /* pXcptRaiseInfo */);
5589 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5590 { /* likely */ }
5591 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5592 {
5593 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5594 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5595 u8Vector = X86_XCPT_DF;
5596 uErr = 0;
5597 /** @todo NSTVMX: Do we need to do something here for VMX? */
5598 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5599 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5600 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5601 }
5602 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5603 {
5604 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5605 return iemInitiateCpuShutdown(pVCpu);
5606 }
5607 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5608 {
5609 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5610 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5611 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5612 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5613 return VERR_EM_GUEST_CPU_HANG;
5614 }
5615 else
5616 {
5617 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5618 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5619 return VERR_IEM_IPE_9;
5620 }
5621
5622 /*
5623 * The 'EXT' bit is set when an exception occurs during deliver of an external
5624 * event (such as an interrupt or earlier exception)[1]. Privileged software
5625 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5626 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5627 *
5628 * [1] - Intel spec. 6.13 "Error Code"
5629 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5630 * [3] - Intel Instruction reference for INT n.
5631 */
5632 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5633 && (fFlags & IEM_XCPT_FLAGS_ERR)
5634 && u8Vector != X86_XCPT_PF
5635 && u8Vector != X86_XCPT_DF)
5636 {
5637 uErr |= X86_TRAP_ERR_EXTERNAL;
5638 }
5639 }
5640
5641 pVCpu->iem.s.cXcptRecursions++;
5642 pVCpu->iem.s.uCurXcpt = u8Vector;
5643 pVCpu->iem.s.fCurXcpt = fFlags;
5644 pVCpu->iem.s.uCurXcptErr = uErr;
5645 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5646
5647 /*
5648 * Extensive logging.
5649 */
5650#if defined(LOG_ENABLED) && defined(IN_RING3)
5651 if (LogIs3Enabled())
5652 {
5653 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5654 PVM pVM = pVCpu->CTX_SUFF(pVM);
5655 char szRegs[4096];
5656 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5657 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5658 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5659 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5660 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5661 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5662 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5663 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5664 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5665 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5666 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5667 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5668 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5669 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5670 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5671 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5672 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5673 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5674 " efer=%016VR{efer}\n"
5675 " pat=%016VR{pat}\n"
5676 " sf_mask=%016VR{sf_mask}\n"
5677 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5678 " lstar=%016VR{lstar}\n"
5679 " star=%016VR{star} cstar=%016VR{cstar}\n"
5680 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5681 );
5682
5683 char szInstr[256];
5684 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5685 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5686 szInstr, sizeof(szInstr), NULL);
5687 Log3(("%s%s\n", szRegs, szInstr));
5688 }
5689#endif /* LOG_ENABLED */
5690
5691 /*
5692 * Call the mode specific worker function.
5693 */
5694 VBOXSTRICTRC rcStrict;
5695 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5696 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5697 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5698 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5699 else
5700 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5701
5702 /* Flush the prefetch buffer. */
5703#ifdef IEM_WITH_CODE_TLB
5704 pVCpu->iem.s.pbInstrBuf = NULL;
5705#else
5706 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5707#endif
5708
5709 /*
5710 * Unwind.
5711 */
5712 pVCpu->iem.s.cXcptRecursions--;
5713 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5714 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5715 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5716 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5717 pVCpu->iem.s.cXcptRecursions + 1));
5718 return rcStrict;
5719}
5720
5721#ifdef IEM_WITH_SETJMP
5722/**
5723 * See iemRaiseXcptOrInt. Will not return.
5724 */
5725IEM_STATIC DECL_NO_RETURN(void)
5726iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5727 uint8_t cbInstr,
5728 uint8_t u8Vector,
5729 uint32_t fFlags,
5730 uint16_t uErr,
5731 uint64_t uCr2)
5732{
5733 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5734 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5735}
5736#endif
5737
5738
5739/** \#DE - 00. */
5740DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5741{
5742 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5743}
5744
5745
5746/** \#DB - 01.
5747 * @note This automatically clear DR7.GD. */
5748DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5749{
5750 /** @todo set/clear RF. */
5751 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5752 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5753}
5754
5755
5756/** \#BR - 05. */
5757DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5758{
5759 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5760}
5761
5762
5763/** \#UD - 06. */
5764DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5765{
5766 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5767}
5768
5769
5770/** \#NM - 07. */
5771DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5772{
5773 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5774}
5775
5776
5777/** \#TS(err) - 0a. */
5778DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5779{
5780 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5781}
5782
5783
5784/** \#TS(tr) - 0a. */
5785DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5786{
5787 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5788 pVCpu->cpum.GstCtx.tr.Sel, 0);
5789}
5790
5791
5792/** \#TS(0) - 0a. */
5793DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5794{
5795 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5796 0, 0);
5797}
5798
5799
5800/** \#TS(err) - 0a. */
5801DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5802{
5803 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5804 uSel & X86_SEL_MASK_OFF_RPL, 0);
5805}
5806
5807
5808/** \#NP(err) - 0b. */
5809DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5810{
5811 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5812}
5813
5814
5815/** \#NP(sel) - 0b. */
5816DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5817{
5818 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5819 uSel & ~X86_SEL_RPL, 0);
5820}
5821
5822
5823/** \#SS(seg) - 0c. */
5824DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5825{
5826 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5827 uSel & ~X86_SEL_RPL, 0);
5828}
5829
5830
5831/** \#SS(err) - 0c. */
5832DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5833{
5834 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5835}
5836
5837
5838/** \#GP(n) - 0d. */
5839DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5840{
5841 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5842}
5843
5844
5845/** \#GP(0) - 0d. */
5846DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5847{
5848 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5849}
5850
5851#ifdef IEM_WITH_SETJMP
5852/** \#GP(0) - 0d. */
5853DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5854{
5855 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5856}
5857#endif
5858
5859
5860/** \#GP(sel) - 0d. */
5861DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5862{
5863 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5864 Sel & ~X86_SEL_RPL, 0);
5865}
5866
5867
5868/** \#GP(0) - 0d. */
5869DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5870{
5871 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5872}
5873
5874
5875/** \#GP(sel) - 0d. */
5876DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5877{
5878 NOREF(iSegReg); NOREF(fAccess);
5879 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5880 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5881}
5882
5883#ifdef IEM_WITH_SETJMP
5884/** \#GP(sel) - 0d, longjmp. */
5885DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5886{
5887 NOREF(iSegReg); NOREF(fAccess);
5888 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5889 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5890}
5891#endif
5892
5893/** \#GP(sel) - 0d. */
5894DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5895{
5896 NOREF(Sel);
5897 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5898}
5899
5900#ifdef IEM_WITH_SETJMP
5901/** \#GP(sel) - 0d, longjmp. */
5902DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5903{
5904 NOREF(Sel);
5905 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5906}
5907#endif
5908
5909
5910/** \#GP(sel) - 0d. */
5911DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5912{
5913 NOREF(iSegReg); NOREF(fAccess);
5914 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5915}
5916
5917#ifdef IEM_WITH_SETJMP
5918/** \#GP(sel) - 0d, longjmp. */
5919DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5920 uint32_t fAccess)
5921{
5922 NOREF(iSegReg); NOREF(fAccess);
5923 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5924}
5925#endif
5926
5927
5928/** \#PF(n) - 0e. */
5929DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5930{
5931 uint16_t uErr;
5932 switch (rc)
5933 {
5934 case VERR_PAGE_NOT_PRESENT:
5935 case VERR_PAGE_TABLE_NOT_PRESENT:
5936 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5937 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5938 uErr = 0;
5939 break;
5940
5941 default:
5942 AssertMsgFailed(("%Rrc\n", rc));
5943 RT_FALL_THRU();
5944 case VERR_ACCESS_DENIED:
5945 uErr = X86_TRAP_PF_P;
5946 break;
5947
5948 /** @todo reserved */
5949 }
5950
5951 if (pVCpu->iem.s.uCpl == 3)
5952 uErr |= X86_TRAP_PF_US;
5953
5954 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5955 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5956 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5957 uErr |= X86_TRAP_PF_ID;
5958
5959#if 0 /* This is so much non-sense, really. Why was it done like that? */
5960 /* Note! RW access callers reporting a WRITE protection fault, will clear
5961 the READ flag before calling. So, read-modify-write accesses (RW)
5962 can safely be reported as READ faults. */
5963 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5964 uErr |= X86_TRAP_PF_RW;
5965#else
5966 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5967 {
5968 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5969 uErr |= X86_TRAP_PF_RW;
5970 }
5971#endif
5972
5973 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5974 uErr, GCPtrWhere);
5975}
5976
5977#ifdef IEM_WITH_SETJMP
5978/** \#PF(n) - 0e, longjmp. */
5979IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5980{
5981 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5982}
5983#endif
5984
5985
5986/** \#MF(0) - 10. */
5987DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5988{
5989 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5990}
5991
5992
5993/** \#AC(0) - 11. */
5994DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5995{
5996 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5997}
5998
5999
6000/**
6001 * Macro for calling iemCImplRaiseDivideError().
6002 *
6003 * This enables us to add/remove arguments and force different levels of
6004 * inlining as we wish.
6005 *
6006 * @return Strict VBox status code.
6007 */
6008#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6009IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6010{
6011 NOREF(cbInstr);
6012 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6013}
6014
6015
6016/**
6017 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6018 *
6019 * This enables us to add/remove arguments and force different levels of
6020 * inlining as we wish.
6021 *
6022 * @return Strict VBox status code.
6023 */
6024#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6025IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6026{
6027 NOREF(cbInstr);
6028 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6029}
6030
6031
6032/**
6033 * Macro for calling iemCImplRaiseInvalidOpcode().
6034 *
6035 * This enables us to add/remove arguments and force different levels of
6036 * inlining as we wish.
6037 *
6038 * @return Strict VBox status code.
6039 */
6040#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6041IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6042{
6043 NOREF(cbInstr);
6044 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6045}
6046
6047
6048/** @} */
6049
6050
6051/*
6052 *
6053 * Helpers routines.
6054 * Helpers routines.
6055 * Helpers routines.
6056 *
6057 */
6058
6059/**
6060 * Recalculates the effective operand size.
6061 *
6062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6063 */
6064IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6065{
6066 switch (pVCpu->iem.s.enmCpuMode)
6067 {
6068 case IEMMODE_16BIT:
6069 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6070 break;
6071 case IEMMODE_32BIT:
6072 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6073 break;
6074 case IEMMODE_64BIT:
6075 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6076 {
6077 case 0:
6078 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6079 break;
6080 case IEM_OP_PRF_SIZE_OP:
6081 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6082 break;
6083 case IEM_OP_PRF_SIZE_REX_W:
6084 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6085 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6086 break;
6087 }
6088 break;
6089 default:
6090 AssertFailed();
6091 }
6092}
6093
6094
6095/**
6096 * Sets the default operand size to 64-bit and recalculates the effective
6097 * operand size.
6098 *
6099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6100 */
6101IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6102{
6103 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6104 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6105 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6106 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6107 else
6108 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6109}
6110
6111
6112/*
6113 *
6114 * Common opcode decoders.
6115 * Common opcode decoders.
6116 * Common opcode decoders.
6117 *
6118 */
6119//#include <iprt/mem.h>
6120
6121/**
6122 * Used to add extra details about a stub case.
6123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6124 */
6125IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6126{
6127#if defined(LOG_ENABLED) && defined(IN_RING3)
6128 PVM pVM = pVCpu->CTX_SUFF(pVM);
6129 char szRegs[4096];
6130 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6131 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6132 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6133 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6134 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6135 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6136 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6137 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6138 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6139 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6140 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6141 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6142 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6143 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6144 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6145 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6146 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6147 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6148 " efer=%016VR{efer}\n"
6149 " pat=%016VR{pat}\n"
6150 " sf_mask=%016VR{sf_mask}\n"
6151 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6152 " lstar=%016VR{lstar}\n"
6153 " star=%016VR{star} cstar=%016VR{cstar}\n"
6154 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6155 );
6156
6157 char szInstr[256];
6158 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6159 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6160 szInstr, sizeof(szInstr), NULL);
6161
6162 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6163#else
6164 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6165#endif
6166}
6167
6168/**
6169 * Complains about a stub.
6170 *
6171 * Providing two versions of this macro, one for daily use and one for use when
6172 * working on IEM.
6173 */
6174#if 0
6175# define IEMOP_BITCH_ABOUT_STUB() \
6176 do { \
6177 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6178 iemOpStubMsg2(pVCpu); \
6179 RTAssertPanic(); \
6180 } while (0)
6181#else
6182# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6183#endif
6184
6185/** Stubs an opcode. */
6186#define FNIEMOP_STUB(a_Name) \
6187 FNIEMOP_DEF(a_Name) \
6188 { \
6189 RT_NOREF_PV(pVCpu); \
6190 IEMOP_BITCH_ABOUT_STUB(); \
6191 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6192 } \
6193 typedef int ignore_semicolon
6194
6195/** Stubs an opcode. */
6196#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6197 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6198 { \
6199 RT_NOREF_PV(pVCpu); \
6200 RT_NOREF_PV(a_Name0); \
6201 IEMOP_BITCH_ABOUT_STUB(); \
6202 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6203 } \
6204 typedef int ignore_semicolon
6205
6206/** Stubs an opcode which currently should raise \#UD. */
6207#define FNIEMOP_UD_STUB(a_Name) \
6208 FNIEMOP_DEF(a_Name) \
6209 { \
6210 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6211 return IEMOP_RAISE_INVALID_OPCODE(); \
6212 } \
6213 typedef int ignore_semicolon
6214
6215/** Stubs an opcode which currently should raise \#UD. */
6216#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6217 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6218 { \
6219 RT_NOREF_PV(pVCpu); \
6220 RT_NOREF_PV(a_Name0); \
6221 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6222 return IEMOP_RAISE_INVALID_OPCODE(); \
6223 } \
6224 typedef int ignore_semicolon
6225
6226
6227
6228/** @name Register Access.
6229 * @{
6230 */
6231
6232/**
6233 * Gets a reference (pointer) to the specified hidden segment register.
6234 *
6235 * @returns Hidden register reference.
6236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6237 * @param iSegReg The segment register.
6238 */
6239IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6240{
6241 Assert(iSegReg < X86_SREG_COUNT);
6242 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6243 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6244
6245#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6246 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6247 { /* likely */ }
6248 else
6249 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6250#else
6251 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6252#endif
6253 return pSReg;
6254}
6255
6256
6257/**
6258 * Ensures that the given hidden segment register is up to date.
6259 *
6260 * @returns Hidden register reference.
6261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6262 * @param pSReg The segment register.
6263 */
6264IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6265{
6266#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6267 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6268 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6269#else
6270 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6271 NOREF(pVCpu);
6272#endif
6273 return pSReg;
6274}
6275
6276
6277/**
6278 * Gets a reference (pointer) to the specified segment register (the selector
6279 * value).
6280 *
6281 * @returns Pointer to the selector variable.
6282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6283 * @param iSegReg The segment register.
6284 */
6285DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6286{
6287 Assert(iSegReg < X86_SREG_COUNT);
6288 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6289 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6290}
6291
6292
6293/**
6294 * Fetches the selector value of a segment register.
6295 *
6296 * @returns The selector value.
6297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6298 * @param iSegReg The segment register.
6299 */
6300DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6301{
6302 Assert(iSegReg < X86_SREG_COUNT);
6303 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6304 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6305}
6306
6307
6308/**
6309 * Fetches the base address value of a segment register.
6310 *
6311 * @returns The selector value.
6312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6313 * @param iSegReg The segment register.
6314 */
6315DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6316{
6317 Assert(iSegReg < X86_SREG_COUNT);
6318 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6319 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6320}
6321
6322
6323/**
6324 * Gets a reference (pointer) to the specified general purpose register.
6325 *
6326 * @returns Register reference.
6327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6328 * @param iReg The general purpose register.
6329 */
6330DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6331{
6332 Assert(iReg < 16);
6333 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6334}
6335
6336
6337/**
6338 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6339 *
6340 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6341 *
6342 * @returns Register reference.
6343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6344 * @param iReg The register.
6345 */
6346DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6347{
6348 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6349 {
6350 Assert(iReg < 16);
6351 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6352 }
6353 /* high 8-bit register. */
6354 Assert(iReg < 8);
6355 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6356}
6357
6358
6359/**
6360 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6361 *
6362 * @returns Register reference.
6363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6364 * @param iReg The register.
6365 */
6366DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6367{
6368 Assert(iReg < 16);
6369 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6370}
6371
6372
6373/**
6374 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6375 *
6376 * @returns Register reference.
6377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6378 * @param iReg The register.
6379 */
6380DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6381{
6382 Assert(iReg < 16);
6383 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6384}
6385
6386
6387/**
6388 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6389 *
6390 * @returns Register reference.
6391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6392 * @param iReg The register.
6393 */
6394DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6395{
6396 Assert(iReg < 64);
6397 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6398}
6399
6400
6401/**
6402 * Gets a reference (pointer) to the specified segment register's base address.
6403 *
6404 * @returns Segment register base address reference.
6405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6406 * @param iSegReg The segment selector.
6407 */
6408DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6409{
6410 Assert(iSegReg < X86_SREG_COUNT);
6411 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6412 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6413}
6414
6415
6416/**
6417 * Fetches the value of a 8-bit general purpose register.
6418 *
6419 * @returns The register value.
6420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6421 * @param iReg The register.
6422 */
6423DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6424{
6425 return *iemGRegRefU8(pVCpu, iReg);
6426}
6427
6428
6429/**
6430 * Fetches the value of a 16-bit general purpose register.
6431 *
6432 * @returns The register value.
6433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6434 * @param iReg The register.
6435 */
6436DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6437{
6438 Assert(iReg < 16);
6439 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6440}
6441
6442
6443/**
6444 * Fetches the value of a 32-bit general purpose register.
6445 *
6446 * @returns The register value.
6447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6448 * @param iReg The register.
6449 */
6450DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6451{
6452 Assert(iReg < 16);
6453 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6454}
6455
6456
6457/**
6458 * Fetches the value of a 64-bit general purpose register.
6459 *
6460 * @returns The register value.
6461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6462 * @param iReg The register.
6463 */
6464DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6465{
6466 Assert(iReg < 16);
6467 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6468}
6469
6470
6471/**
6472 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6473 *
6474 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6475 * segment limit.
6476 *
6477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6478 * @param offNextInstr The offset of the next instruction.
6479 */
6480IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6481{
6482 switch (pVCpu->iem.s.enmEffOpSize)
6483 {
6484 case IEMMODE_16BIT:
6485 {
6486 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6487 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6488 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6489 return iemRaiseGeneralProtectionFault0(pVCpu);
6490 pVCpu->cpum.GstCtx.rip = uNewIp;
6491 break;
6492 }
6493
6494 case IEMMODE_32BIT:
6495 {
6496 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6497 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6498
6499 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6500 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6501 return iemRaiseGeneralProtectionFault0(pVCpu);
6502 pVCpu->cpum.GstCtx.rip = uNewEip;
6503 break;
6504 }
6505
6506 case IEMMODE_64BIT:
6507 {
6508 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6509
6510 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6511 if (!IEM_IS_CANONICAL(uNewRip))
6512 return iemRaiseGeneralProtectionFault0(pVCpu);
6513 pVCpu->cpum.GstCtx.rip = uNewRip;
6514 break;
6515 }
6516
6517 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6518 }
6519
6520 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6521
6522#ifndef IEM_WITH_CODE_TLB
6523 /* Flush the prefetch buffer. */
6524 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6525#endif
6526
6527 return VINF_SUCCESS;
6528}
6529
6530
6531/**
6532 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6533 *
6534 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6535 * segment limit.
6536 *
6537 * @returns Strict VBox status code.
6538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6539 * @param offNextInstr The offset of the next instruction.
6540 */
6541IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6542{
6543 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6544
6545 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6546 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6547 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6548 return iemRaiseGeneralProtectionFault0(pVCpu);
6549 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6550 pVCpu->cpum.GstCtx.rip = uNewIp;
6551 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6552
6553#ifndef IEM_WITH_CODE_TLB
6554 /* Flush the prefetch buffer. */
6555 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6556#endif
6557
6558 return VINF_SUCCESS;
6559}
6560
6561
6562/**
6563 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6564 *
6565 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6566 * segment limit.
6567 *
6568 * @returns Strict VBox status code.
6569 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6570 * @param offNextInstr The offset of the next instruction.
6571 */
6572IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6573{
6574 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6575
6576 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6577 {
6578 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6579
6580 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6581 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6582 return iemRaiseGeneralProtectionFault0(pVCpu);
6583 pVCpu->cpum.GstCtx.rip = uNewEip;
6584 }
6585 else
6586 {
6587 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6588
6589 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6590 if (!IEM_IS_CANONICAL(uNewRip))
6591 return iemRaiseGeneralProtectionFault0(pVCpu);
6592 pVCpu->cpum.GstCtx.rip = uNewRip;
6593 }
6594 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6595
6596#ifndef IEM_WITH_CODE_TLB
6597 /* Flush the prefetch buffer. */
6598 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6599#endif
6600
6601 return VINF_SUCCESS;
6602}
6603
6604
6605/**
6606 * Performs a near jump to the specified address.
6607 *
6608 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6609 * segment limit.
6610 *
6611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6612 * @param uNewRip The new RIP value.
6613 */
6614IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6615{
6616 switch (pVCpu->iem.s.enmEffOpSize)
6617 {
6618 case IEMMODE_16BIT:
6619 {
6620 Assert(uNewRip <= UINT16_MAX);
6621 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6622 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6623 return iemRaiseGeneralProtectionFault0(pVCpu);
6624 /** @todo Test 16-bit jump in 64-bit mode. */
6625 pVCpu->cpum.GstCtx.rip = uNewRip;
6626 break;
6627 }
6628
6629 case IEMMODE_32BIT:
6630 {
6631 Assert(uNewRip <= UINT32_MAX);
6632 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6633 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6634
6635 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6636 return iemRaiseGeneralProtectionFault0(pVCpu);
6637 pVCpu->cpum.GstCtx.rip = uNewRip;
6638 break;
6639 }
6640
6641 case IEMMODE_64BIT:
6642 {
6643 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6644
6645 if (!IEM_IS_CANONICAL(uNewRip))
6646 return iemRaiseGeneralProtectionFault0(pVCpu);
6647 pVCpu->cpum.GstCtx.rip = uNewRip;
6648 break;
6649 }
6650
6651 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6652 }
6653
6654 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6655
6656#ifndef IEM_WITH_CODE_TLB
6657 /* Flush the prefetch buffer. */
6658 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6659#endif
6660
6661 return VINF_SUCCESS;
6662}
6663
6664
6665/**
6666 * Get the address of the top of the stack.
6667 *
6668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6669 */
6670DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6671{
6672 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6673 return pVCpu->cpum.GstCtx.rsp;
6674 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6675 return pVCpu->cpum.GstCtx.esp;
6676 return pVCpu->cpum.GstCtx.sp;
6677}
6678
6679
6680/**
6681 * Updates the RIP/EIP/IP to point to the next instruction.
6682 *
6683 * This function leaves the EFLAGS.RF flag alone.
6684 *
6685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6686 * @param cbInstr The number of bytes to add.
6687 */
6688IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6689{
6690 switch (pVCpu->iem.s.enmCpuMode)
6691 {
6692 case IEMMODE_16BIT:
6693 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6694 pVCpu->cpum.GstCtx.eip += cbInstr;
6695 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6696 break;
6697
6698 case IEMMODE_32BIT:
6699 pVCpu->cpum.GstCtx.eip += cbInstr;
6700 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6701 break;
6702
6703 case IEMMODE_64BIT:
6704 pVCpu->cpum.GstCtx.rip += cbInstr;
6705 break;
6706 default: AssertFailed();
6707 }
6708}
6709
6710
6711#if 0
6712/**
6713 * Updates the RIP/EIP/IP to point to the next instruction.
6714 *
6715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6716 */
6717IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6718{
6719 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6720}
6721#endif
6722
6723
6724
6725/**
6726 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6727 *
6728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6729 * @param cbInstr The number of bytes to add.
6730 */
6731IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6732{
6733 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6734
6735 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6736#if ARCH_BITS >= 64
6737 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6738 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6739 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6740#else
6741 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6742 pVCpu->cpum.GstCtx.rip += cbInstr;
6743 else
6744 pVCpu->cpum.GstCtx.eip += cbInstr;
6745#endif
6746}
6747
6748
6749/**
6750 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6751 *
6752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6753 */
6754IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6755{
6756 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6757}
6758
6759
6760/**
6761 * Adds to the stack pointer.
6762 *
6763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6764 * @param cbToAdd The number of bytes to add (8-bit!).
6765 */
6766DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6767{
6768 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6769 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6770 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6771 pVCpu->cpum.GstCtx.esp += cbToAdd;
6772 else
6773 pVCpu->cpum.GstCtx.sp += cbToAdd;
6774}
6775
6776
6777/**
6778 * Subtracts from the stack pointer.
6779 *
6780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6781 * @param cbToSub The number of bytes to subtract (8-bit!).
6782 */
6783DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6784{
6785 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6786 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6787 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6788 pVCpu->cpum.GstCtx.esp -= cbToSub;
6789 else
6790 pVCpu->cpum.GstCtx.sp -= cbToSub;
6791}
6792
6793
6794/**
6795 * Adds to the temporary stack pointer.
6796 *
6797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6798 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6799 * @param cbToAdd The number of bytes to add (16-bit).
6800 */
6801DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6802{
6803 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6804 pTmpRsp->u += cbToAdd;
6805 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6806 pTmpRsp->DWords.dw0 += cbToAdd;
6807 else
6808 pTmpRsp->Words.w0 += cbToAdd;
6809}
6810
6811
6812/**
6813 * Subtracts from the temporary stack pointer.
6814 *
6815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6816 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6817 * @param cbToSub The number of bytes to subtract.
6818 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6819 * expecting that.
6820 */
6821DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6822{
6823 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6824 pTmpRsp->u -= cbToSub;
6825 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6826 pTmpRsp->DWords.dw0 -= cbToSub;
6827 else
6828 pTmpRsp->Words.w0 -= cbToSub;
6829}
6830
6831
6832/**
6833 * Calculates the effective stack address for a push of the specified size as
6834 * well as the new RSP value (upper bits may be masked).
6835 *
6836 * @returns Effective stack addressf for the push.
6837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6838 * @param cbItem The size of the stack item to pop.
6839 * @param puNewRsp Where to return the new RSP value.
6840 */
6841DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6842{
6843 RTUINT64U uTmpRsp;
6844 RTGCPTR GCPtrTop;
6845 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6846
6847 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6848 GCPtrTop = uTmpRsp.u -= cbItem;
6849 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6850 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6851 else
6852 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6853 *puNewRsp = uTmpRsp.u;
6854 return GCPtrTop;
6855}
6856
6857
6858/**
6859 * Gets the current stack pointer and calculates the value after a pop of the
6860 * specified size.
6861 *
6862 * @returns Current stack pointer.
6863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6864 * @param cbItem The size of the stack item to pop.
6865 * @param puNewRsp Where to return the new RSP value.
6866 */
6867DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6868{
6869 RTUINT64U uTmpRsp;
6870 RTGCPTR GCPtrTop;
6871 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6872
6873 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6874 {
6875 GCPtrTop = uTmpRsp.u;
6876 uTmpRsp.u += cbItem;
6877 }
6878 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6879 {
6880 GCPtrTop = uTmpRsp.DWords.dw0;
6881 uTmpRsp.DWords.dw0 += cbItem;
6882 }
6883 else
6884 {
6885 GCPtrTop = uTmpRsp.Words.w0;
6886 uTmpRsp.Words.w0 += cbItem;
6887 }
6888 *puNewRsp = uTmpRsp.u;
6889 return GCPtrTop;
6890}
6891
6892
6893/**
6894 * Calculates the effective stack address for a push of the specified size as
6895 * well as the new temporary RSP value (upper bits may be masked).
6896 *
6897 * @returns Effective stack addressf for the push.
6898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6899 * @param pTmpRsp The temporary stack pointer. This is updated.
6900 * @param cbItem The size of the stack item to pop.
6901 */
6902DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6903{
6904 RTGCPTR GCPtrTop;
6905
6906 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6907 GCPtrTop = pTmpRsp->u -= cbItem;
6908 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6909 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6910 else
6911 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6912 return GCPtrTop;
6913}
6914
6915
6916/**
6917 * Gets the effective stack address for a pop of the specified size and
6918 * calculates and updates the temporary RSP.
6919 *
6920 * @returns Current stack pointer.
6921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6922 * @param pTmpRsp The temporary stack pointer. This is updated.
6923 * @param cbItem The size of the stack item to pop.
6924 */
6925DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6926{
6927 RTGCPTR GCPtrTop;
6928 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6929 {
6930 GCPtrTop = pTmpRsp->u;
6931 pTmpRsp->u += cbItem;
6932 }
6933 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6934 {
6935 GCPtrTop = pTmpRsp->DWords.dw0;
6936 pTmpRsp->DWords.dw0 += cbItem;
6937 }
6938 else
6939 {
6940 GCPtrTop = pTmpRsp->Words.w0;
6941 pTmpRsp->Words.w0 += cbItem;
6942 }
6943 return GCPtrTop;
6944}
6945
6946/** @} */
6947
6948
6949/** @name FPU access and helpers.
6950 *
6951 * @{
6952 */
6953
6954
6955/**
6956 * Hook for preparing to use the host FPU.
6957 *
6958 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6959 *
6960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6961 */
6962DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6963{
6964#ifdef IN_RING3
6965 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6966#else
6967 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6968#endif
6969 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6970}
6971
6972
6973/**
6974 * Hook for preparing to use the host FPU for SSE.
6975 *
6976 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6977 *
6978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6979 */
6980DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6981{
6982 iemFpuPrepareUsage(pVCpu);
6983}
6984
6985
6986/**
6987 * Hook for preparing to use the host FPU for AVX.
6988 *
6989 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6990 *
6991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6992 */
6993DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6994{
6995 iemFpuPrepareUsage(pVCpu);
6996}
6997
6998
6999/**
7000 * Hook for actualizing the guest FPU state before the interpreter reads it.
7001 *
7002 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7003 *
7004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7005 */
7006DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7007{
7008#ifdef IN_RING3
7009 NOREF(pVCpu);
7010#else
7011 CPUMRZFpuStateActualizeForRead(pVCpu);
7012#endif
7013 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7014}
7015
7016
7017/**
7018 * Hook for actualizing the guest FPU state before the interpreter changes it.
7019 *
7020 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7021 *
7022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7023 */
7024DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7025{
7026#ifdef IN_RING3
7027 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7028#else
7029 CPUMRZFpuStateActualizeForChange(pVCpu);
7030#endif
7031 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7032}
7033
7034
7035/**
7036 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7037 * only.
7038 *
7039 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7040 *
7041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7042 */
7043DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7044{
7045#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7046 NOREF(pVCpu);
7047#else
7048 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7049#endif
7050 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7051}
7052
7053
7054/**
7055 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7056 * read+write.
7057 *
7058 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7059 *
7060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7061 */
7062DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7063{
7064#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7065 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7066#else
7067 CPUMRZFpuStateActualizeForChange(pVCpu);
7068#endif
7069 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7070}
7071
7072
7073/**
7074 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7075 * only.
7076 *
7077 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7078 *
7079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7080 */
7081DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7082{
7083#ifdef IN_RING3
7084 NOREF(pVCpu);
7085#else
7086 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7087#endif
7088 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7089}
7090
7091
7092/**
7093 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7094 * read+write.
7095 *
7096 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7097 *
7098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7099 */
7100DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7101{
7102#ifdef IN_RING3
7103 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7104#else
7105 CPUMRZFpuStateActualizeForChange(pVCpu);
7106#endif
7107 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7108}
7109
7110
7111/**
7112 * Stores a QNaN value into a FPU register.
7113 *
7114 * @param pReg Pointer to the register.
7115 */
7116DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7117{
7118 pReg->au32[0] = UINT32_C(0x00000000);
7119 pReg->au32[1] = UINT32_C(0xc0000000);
7120 pReg->au16[4] = UINT16_C(0xffff);
7121}
7122
7123
7124/**
7125 * Updates the FOP, FPU.CS and FPUIP registers.
7126 *
7127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7128 * @param pFpuCtx The FPU context.
7129 */
7130DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7131{
7132 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7133 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7134 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7135 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7136 {
7137 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7138 * happens in real mode here based on the fnsave and fnstenv images. */
7139 pFpuCtx->CS = 0;
7140 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7141 }
7142 else
7143 {
7144 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7145 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7146 }
7147}
7148
7149
7150/**
7151 * Updates the x87.DS and FPUDP registers.
7152 *
7153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7154 * @param pFpuCtx The FPU context.
7155 * @param iEffSeg The effective segment register.
7156 * @param GCPtrEff The effective address relative to @a iEffSeg.
7157 */
7158DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7159{
7160 RTSEL sel;
7161 switch (iEffSeg)
7162 {
7163 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7164 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7165 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7166 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7167 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7168 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7169 default:
7170 AssertMsgFailed(("%d\n", iEffSeg));
7171 sel = pVCpu->cpum.GstCtx.ds.Sel;
7172 }
7173 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7174 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7175 {
7176 pFpuCtx->DS = 0;
7177 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7178 }
7179 else
7180 {
7181 pFpuCtx->DS = sel;
7182 pFpuCtx->FPUDP = GCPtrEff;
7183 }
7184}
7185
7186
7187/**
7188 * Rotates the stack registers in the push direction.
7189 *
7190 * @param pFpuCtx The FPU context.
7191 * @remarks This is a complete waste of time, but fxsave stores the registers in
7192 * stack order.
7193 */
7194DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7195{
7196 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7197 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7198 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7199 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7200 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7201 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7202 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7203 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7204 pFpuCtx->aRegs[0].r80 = r80Tmp;
7205}
7206
7207
7208/**
7209 * Rotates the stack registers in the pop direction.
7210 *
7211 * @param pFpuCtx The FPU context.
7212 * @remarks This is a complete waste of time, but fxsave stores the registers in
7213 * stack order.
7214 */
7215DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7216{
7217 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7218 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7219 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7220 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7221 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7222 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7223 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7224 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7225 pFpuCtx->aRegs[7].r80 = r80Tmp;
7226}
7227
7228
7229/**
7230 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7231 * exception prevents it.
7232 *
7233 * @param pResult The FPU operation result to push.
7234 * @param pFpuCtx The FPU context.
7235 */
7236IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7237{
7238 /* Update FSW and bail if there are pending exceptions afterwards. */
7239 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7240 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7241 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7242 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7243 {
7244 pFpuCtx->FSW = fFsw;
7245 return;
7246 }
7247
7248 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7249 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7250 {
7251 /* All is fine, push the actual value. */
7252 pFpuCtx->FTW |= RT_BIT(iNewTop);
7253 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7254 }
7255 else if (pFpuCtx->FCW & X86_FCW_IM)
7256 {
7257 /* Masked stack overflow, push QNaN. */
7258 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7259 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7260 }
7261 else
7262 {
7263 /* Raise stack overflow, don't push anything. */
7264 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7265 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7266 return;
7267 }
7268
7269 fFsw &= ~X86_FSW_TOP_MASK;
7270 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7271 pFpuCtx->FSW = fFsw;
7272
7273 iemFpuRotateStackPush(pFpuCtx);
7274}
7275
7276
7277/**
7278 * Stores a result in a FPU register and updates the FSW and FTW.
7279 *
7280 * @param pFpuCtx The FPU context.
7281 * @param pResult The result to store.
7282 * @param iStReg Which FPU register to store it in.
7283 */
7284IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7285{
7286 Assert(iStReg < 8);
7287 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7288 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7289 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7290 pFpuCtx->FTW |= RT_BIT(iReg);
7291 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7292}
7293
7294
7295/**
7296 * Only updates the FPU status word (FSW) with the result of the current
7297 * instruction.
7298 *
7299 * @param pFpuCtx The FPU context.
7300 * @param u16FSW The FSW output of the current instruction.
7301 */
7302IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7303{
7304 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7305 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7306}
7307
7308
7309/**
7310 * Pops one item off the FPU stack if no pending exception prevents it.
7311 *
7312 * @param pFpuCtx The FPU context.
7313 */
7314IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7315{
7316 /* Check pending exceptions. */
7317 uint16_t uFSW = pFpuCtx->FSW;
7318 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7319 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7320 return;
7321
7322 /* TOP--. */
7323 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7324 uFSW &= ~X86_FSW_TOP_MASK;
7325 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7326 pFpuCtx->FSW = uFSW;
7327
7328 /* Mark the previous ST0 as empty. */
7329 iOldTop >>= X86_FSW_TOP_SHIFT;
7330 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7331
7332 /* Rotate the registers. */
7333 iemFpuRotateStackPop(pFpuCtx);
7334}
7335
7336
7337/**
7338 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7339 *
7340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7341 * @param pResult The FPU operation result to push.
7342 */
7343IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7344{
7345 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7346 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7347 iemFpuMaybePushResult(pResult, pFpuCtx);
7348}
7349
7350
7351/**
7352 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7353 * and sets FPUDP and FPUDS.
7354 *
7355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7356 * @param pResult The FPU operation result to push.
7357 * @param iEffSeg The effective segment register.
7358 * @param GCPtrEff The effective address relative to @a iEffSeg.
7359 */
7360IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7361{
7362 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7363 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7364 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7365 iemFpuMaybePushResult(pResult, pFpuCtx);
7366}
7367
7368
7369/**
7370 * Replace ST0 with the first value and push the second onto the FPU stack,
7371 * unless a pending exception prevents it.
7372 *
7373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7374 * @param pResult The FPU operation result to store and push.
7375 */
7376IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7377{
7378 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7379 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7380
7381 /* Update FSW and bail if there are pending exceptions afterwards. */
7382 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7383 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7384 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7385 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7386 {
7387 pFpuCtx->FSW = fFsw;
7388 return;
7389 }
7390
7391 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7392 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7393 {
7394 /* All is fine, push the actual value. */
7395 pFpuCtx->FTW |= RT_BIT(iNewTop);
7396 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7397 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7398 }
7399 else if (pFpuCtx->FCW & X86_FCW_IM)
7400 {
7401 /* Masked stack overflow, push QNaN. */
7402 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7403 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7404 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7405 }
7406 else
7407 {
7408 /* Raise stack overflow, don't push anything. */
7409 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7410 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7411 return;
7412 }
7413
7414 fFsw &= ~X86_FSW_TOP_MASK;
7415 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7416 pFpuCtx->FSW = fFsw;
7417
7418 iemFpuRotateStackPush(pFpuCtx);
7419}
7420
7421
7422/**
7423 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7424 * FOP.
7425 *
7426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7427 * @param pResult The result to store.
7428 * @param iStReg Which FPU register to store it in.
7429 */
7430IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7431{
7432 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7433 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7434 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7435}
7436
7437
7438/**
7439 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7440 * FOP, and then pops the stack.
7441 *
7442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7443 * @param pResult The result to store.
7444 * @param iStReg Which FPU register to store it in.
7445 */
7446IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7447{
7448 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7449 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7450 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7451 iemFpuMaybePopOne(pFpuCtx);
7452}
7453
7454
7455/**
7456 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7457 * FPUDP, and FPUDS.
7458 *
7459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7460 * @param pResult The result to store.
7461 * @param iStReg Which FPU register to store it in.
7462 * @param iEffSeg The effective memory operand selector register.
7463 * @param GCPtrEff The effective memory operand offset.
7464 */
7465IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7466 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7467{
7468 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7469 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7470 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7471 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7472}
7473
7474
7475/**
7476 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7477 * FPUDP, and FPUDS, and then pops the stack.
7478 *
7479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7480 * @param pResult The result to store.
7481 * @param iStReg Which FPU register to store it in.
7482 * @param iEffSeg The effective memory operand selector register.
7483 * @param GCPtrEff The effective memory operand offset.
7484 */
7485IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7486 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7487{
7488 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7489 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7490 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7491 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7492 iemFpuMaybePopOne(pFpuCtx);
7493}
7494
7495
7496/**
7497 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7498 *
7499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7500 */
7501IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7502{
7503 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7504 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7505}
7506
7507
7508/**
7509 * Marks the specified stack register as free (for FFREE).
7510 *
7511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7512 * @param iStReg The register to free.
7513 */
7514IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7515{
7516 Assert(iStReg < 8);
7517 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7518 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7519 pFpuCtx->FTW &= ~RT_BIT(iReg);
7520}
7521
7522
7523/**
7524 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7525 *
7526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7527 */
7528IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7529{
7530 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7531 uint16_t uFsw = pFpuCtx->FSW;
7532 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7533 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7534 uFsw &= ~X86_FSW_TOP_MASK;
7535 uFsw |= uTop;
7536 pFpuCtx->FSW = uFsw;
7537}
7538
7539
7540/**
7541 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7542 *
7543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7544 */
7545IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7546{
7547 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7548 uint16_t uFsw = pFpuCtx->FSW;
7549 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7550 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7551 uFsw &= ~X86_FSW_TOP_MASK;
7552 uFsw |= uTop;
7553 pFpuCtx->FSW = uFsw;
7554}
7555
7556
7557/**
7558 * Updates the FSW, FOP, FPUIP, and FPUCS.
7559 *
7560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7561 * @param u16FSW The FSW from the current instruction.
7562 */
7563IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7564{
7565 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7566 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7567 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7568}
7569
7570
7571/**
7572 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7573 *
7574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7575 * @param u16FSW The FSW from the current instruction.
7576 */
7577IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7578{
7579 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7580 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7581 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7582 iemFpuMaybePopOne(pFpuCtx);
7583}
7584
7585
7586/**
7587 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7588 *
7589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7590 * @param u16FSW The FSW from the current instruction.
7591 * @param iEffSeg The effective memory operand selector register.
7592 * @param GCPtrEff The effective memory operand offset.
7593 */
7594IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7595{
7596 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7597 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7598 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7599 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7600}
7601
7602
7603/**
7604 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7605 *
7606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7607 * @param u16FSW The FSW from the current instruction.
7608 */
7609IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7610{
7611 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7612 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7613 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7614 iemFpuMaybePopOne(pFpuCtx);
7615 iemFpuMaybePopOne(pFpuCtx);
7616}
7617
7618
7619/**
7620 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7621 *
7622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7623 * @param u16FSW The FSW from the current instruction.
7624 * @param iEffSeg The effective memory operand selector register.
7625 * @param GCPtrEff The effective memory operand offset.
7626 */
7627IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7628{
7629 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7630 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7631 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7632 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7633 iemFpuMaybePopOne(pFpuCtx);
7634}
7635
7636
7637/**
7638 * Worker routine for raising an FPU stack underflow exception.
7639 *
7640 * @param pFpuCtx The FPU context.
7641 * @param iStReg The stack register being accessed.
7642 */
7643IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7644{
7645 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7646 if (pFpuCtx->FCW & X86_FCW_IM)
7647 {
7648 /* Masked underflow. */
7649 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7650 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7651 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7652 if (iStReg != UINT8_MAX)
7653 {
7654 pFpuCtx->FTW |= RT_BIT(iReg);
7655 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7656 }
7657 }
7658 else
7659 {
7660 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7661 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7662 }
7663}
7664
7665
7666/**
7667 * Raises a FPU stack underflow exception.
7668 *
7669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7670 * @param iStReg The destination register that should be loaded
7671 * with QNaN if \#IS is not masked. Specify
7672 * UINT8_MAX if none (like for fcom).
7673 */
7674DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7675{
7676 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7677 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7678 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7679}
7680
7681
7682DECL_NO_INLINE(IEM_STATIC, void)
7683iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7684{
7685 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7686 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7687 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7688 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7689}
7690
7691
7692DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7693{
7694 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7695 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7696 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7697 iemFpuMaybePopOne(pFpuCtx);
7698}
7699
7700
7701DECL_NO_INLINE(IEM_STATIC, void)
7702iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7703{
7704 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7705 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7706 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7707 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7708 iemFpuMaybePopOne(pFpuCtx);
7709}
7710
7711
7712DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7713{
7714 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7715 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7716 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7717 iemFpuMaybePopOne(pFpuCtx);
7718 iemFpuMaybePopOne(pFpuCtx);
7719}
7720
7721
7722DECL_NO_INLINE(IEM_STATIC, void)
7723iemFpuStackPushUnderflow(PVMCPU pVCpu)
7724{
7725 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7726 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7727
7728 if (pFpuCtx->FCW & X86_FCW_IM)
7729 {
7730 /* Masked overflow - Push QNaN. */
7731 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7732 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7733 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7734 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7735 pFpuCtx->FTW |= RT_BIT(iNewTop);
7736 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7737 iemFpuRotateStackPush(pFpuCtx);
7738 }
7739 else
7740 {
7741 /* Exception pending - don't change TOP or the register stack. */
7742 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7743 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7744 }
7745}
7746
7747
7748DECL_NO_INLINE(IEM_STATIC, void)
7749iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7750{
7751 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7752 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7753
7754 if (pFpuCtx->FCW & X86_FCW_IM)
7755 {
7756 /* Masked overflow - Push QNaN. */
7757 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7758 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7759 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7760 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7761 pFpuCtx->FTW |= RT_BIT(iNewTop);
7762 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7763 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7764 iemFpuRotateStackPush(pFpuCtx);
7765 }
7766 else
7767 {
7768 /* Exception pending - don't change TOP or the register stack. */
7769 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7770 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7771 }
7772}
7773
7774
7775/**
7776 * Worker routine for raising an FPU stack overflow exception on a push.
7777 *
7778 * @param pFpuCtx The FPU context.
7779 */
7780IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7781{
7782 if (pFpuCtx->FCW & X86_FCW_IM)
7783 {
7784 /* Masked overflow. */
7785 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7786 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7787 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7788 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7789 pFpuCtx->FTW |= RT_BIT(iNewTop);
7790 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7791 iemFpuRotateStackPush(pFpuCtx);
7792 }
7793 else
7794 {
7795 /* Exception pending - don't change TOP or the register stack. */
7796 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7797 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7798 }
7799}
7800
7801
7802/**
7803 * Raises a FPU stack overflow exception on a push.
7804 *
7805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7806 */
7807DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7808{
7809 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7810 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7811 iemFpuStackPushOverflowOnly(pFpuCtx);
7812}
7813
7814
7815/**
7816 * Raises a FPU stack overflow exception on a push with a memory operand.
7817 *
7818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7819 * @param iEffSeg The effective memory operand selector register.
7820 * @param GCPtrEff The effective memory operand offset.
7821 */
7822DECL_NO_INLINE(IEM_STATIC, void)
7823iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7824{
7825 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7826 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7827 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7828 iemFpuStackPushOverflowOnly(pFpuCtx);
7829}
7830
7831
7832IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7833{
7834 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7835 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7836 if (pFpuCtx->FTW & RT_BIT(iReg))
7837 return VINF_SUCCESS;
7838 return VERR_NOT_FOUND;
7839}
7840
7841
7842IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7843{
7844 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7845 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7846 if (pFpuCtx->FTW & RT_BIT(iReg))
7847 {
7848 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7849 return VINF_SUCCESS;
7850 }
7851 return VERR_NOT_FOUND;
7852}
7853
7854
7855IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7856 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7857{
7858 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7859 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7860 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7861 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7862 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7863 {
7864 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7865 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7866 return VINF_SUCCESS;
7867 }
7868 return VERR_NOT_FOUND;
7869}
7870
7871
7872IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7873{
7874 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7875 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7876 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7877 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7878 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7879 {
7880 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7881 return VINF_SUCCESS;
7882 }
7883 return VERR_NOT_FOUND;
7884}
7885
7886
7887/**
7888 * Updates the FPU exception status after FCW is changed.
7889 *
7890 * @param pFpuCtx The FPU context.
7891 */
7892IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7893{
7894 uint16_t u16Fsw = pFpuCtx->FSW;
7895 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7896 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7897 else
7898 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7899 pFpuCtx->FSW = u16Fsw;
7900}
7901
7902
7903/**
7904 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7905 *
7906 * @returns The full FTW.
7907 * @param pFpuCtx The FPU context.
7908 */
7909IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7910{
7911 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7912 uint16_t u16Ftw = 0;
7913 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7914 for (unsigned iSt = 0; iSt < 8; iSt++)
7915 {
7916 unsigned const iReg = (iSt + iTop) & 7;
7917 if (!(u8Ftw & RT_BIT(iReg)))
7918 u16Ftw |= 3 << (iReg * 2); /* empty */
7919 else
7920 {
7921 uint16_t uTag;
7922 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7923 if (pr80Reg->s.uExponent == 0x7fff)
7924 uTag = 2; /* Exponent is all 1's => Special. */
7925 else if (pr80Reg->s.uExponent == 0x0000)
7926 {
7927 if (pr80Reg->s.u64Mantissa == 0x0000)
7928 uTag = 1; /* All bits are zero => Zero. */
7929 else
7930 uTag = 2; /* Must be special. */
7931 }
7932 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7933 uTag = 0; /* Valid. */
7934 else
7935 uTag = 2; /* Must be special. */
7936
7937 u16Ftw |= uTag << (iReg * 2); /* empty */
7938 }
7939 }
7940
7941 return u16Ftw;
7942}
7943
7944
7945/**
7946 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7947 *
7948 * @returns The compressed FTW.
7949 * @param u16FullFtw The full FTW to convert.
7950 */
7951IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7952{
7953 uint8_t u8Ftw = 0;
7954 for (unsigned i = 0; i < 8; i++)
7955 {
7956 if ((u16FullFtw & 3) != 3 /*empty*/)
7957 u8Ftw |= RT_BIT(i);
7958 u16FullFtw >>= 2;
7959 }
7960
7961 return u8Ftw;
7962}
7963
7964/** @} */
7965
7966
7967/** @name Memory access.
7968 *
7969 * @{
7970 */
7971
7972
7973/**
7974 * Updates the IEMCPU::cbWritten counter if applicable.
7975 *
7976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7977 * @param fAccess The access being accounted for.
7978 * @param cbMem The access size.
7979 */
7980DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7981{
7982 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7983 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7984 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7985}
7986
7987
7988/**
7989 * Checks if the given segment can be written to, raise the appropriate
7990 * exception if not.
7991 *
7992 * @returns VBox strict status code.
7993 *
7994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7995 * @param pHid Pointer to the hidden register.
7996 * @param iSegReg The register number.
7997 * @param pu64BaseAddr Where to return the base address to use for the
7998 * segment. (In 64-bit code it may differ from the
7999 * base in the hidden segment.)
8000 */
8001IEM_STATIC VBOXSTRICTRC
8002iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8003{
8004 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8005
8006 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8007 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8008 else
8009 {
8010 if (!pHid->Attr.n.u1Present)
8011 {
8012 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8013 AssertRelease(uSel == 0);
8014 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8015 return iemRaiseGeneralProtectionFault0(pVCpu);
8016 }
8017
8018 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8019 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8020 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8021 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8022 *pu64BaseAddr = pHid->u64Base;
8023 }
8024 return VINF_SUCCESS;
8025}
8026
8027
8028/**
8029 * Checks if the given segment can be read from, raise the appropriate
8030 * exception if not.
8031 *
8032 * @returns VBox strict status code.
8033 *
8034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8035 * @param pHid Pointer to the hidden register.
8036 * @param iSegReg The register number.
8037 * @param pu64BaseAddr Where to return the base address to use for the
8038 * segment. (In 64-bit code it may differ from the
8039 * base in the hidden segment.)
8040 */
8041IEM_STATIC VBOXSTRICTRC
8042iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8043{
8044 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8045
8046 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8047 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8048 else
8049 {
8050 if (!pHid->Attr.n.u1Present)
8051 {
8052 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8053 AssertRelease(uSel == 0);
8054 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8055 return iemRaiseGeneralProtectionFault0(pVCpu);
8056 }
8057
8058 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8059 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8060 *pu64BaseAddr = pHid->u64Base;
8061 }
8062 return VINF_SUCCESS;
8063}
8064
8065
8066/**
8067 * Applies the segment limit, base and attributes.
8068 *
8069 * This may raise a \#GP or \#SS.
8070 *
8071 * @returns VBox strict status code.
8072 *
8073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8074 * @param fAccess The kind of access which is being performed.
8075 * @param iSegReg The index of the segment register to apply.
8076 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8077 * TSS, ++).
8078 * @param cbMem The access size.
8079 * @param pGCPtrMem Pointer to the guest memory address to apply
8080 * segmentation to. Input and output parameter.
8081 */
8082IEM_STATIC VBOXSTRICTRC
8083iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8084{
8085 if (iSegReg == UINT8_MAX)
8086 return VINF_SUCCESS;
8087
8088 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8089 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8090 switch (pVCpu->iem.s.enmCpuMode)
8091 {
8092 case IEMMODE_16BIT:
8093 case IEMMODE_32BIT:
8094 {
8095 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8096 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8097
8098 if ( pSel->Attr.n.u1Present
8099 && !pSel->Attr.n.u1Unusable)
8100 {
8101 Assert(pSel->Attr.n.u1DescType);
8102 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8103 {
8104 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8105 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8106 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8107
8108 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8109 {
8110 /** @todo CPL check. */
8111 }
8112
8113 /*
8114 * There are two kinds of data selectors, normal and expand down.
8115 */
8116 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8117 {
8118 if ( GCPtrFirst32 > pSel->u32Limit
8119 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8120 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8121 }
8122 else
8123 {
8124 /*
8125 * The upper boundary is defined by the B bit, not the G bit!
8126 */
8127 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8128 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8129 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8130 }
8131 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8132 }
8133 else
8134 {
8135
8136 /*
8137 * Code selector and usually be used to read thru, writing is
8138 * only permitted in real and V8086 mode.
8139 */
8140 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8141 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8142 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8143 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8144 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8145
8146 if ( GCPtrFirst32 > pSel->u32Limit
8147 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8148 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8149
8150 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8151 {
8152 /** @todo CPL check. */
8153 }
8154
8155 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8156 }
8157 }
8158 else
8159 return iemRaiseGeneralProtectionFault0(pVCpu);
8160 return VINF_SUCCESS;
8161 }
8162
8163 case IEMMODE_64BIT:
8164 {
8165 RTGCPTR GCPtrMem = *pGCPtrMem;
8166 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8167 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8168
8169 Assert(cbMem >= 1);
8170 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8171 return VINF_SUCCESS;
8172 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8173 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8174 return iemRaiseGeneralProtectionFault0(pVCpu);
8175 }
8176
8177 default:
8178 AssertFailedReturn(VERR_IEM_IPE_7);
8179 }
8180}
8181
8182
8183/**
8184 * Translates a virtual address to a physical physical address and checks if we
8185 * can access the page as specified.
8186 *
8187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8188 * @param GCPtrMem The virtual address.
8189 * @param fAccess The intended access.
8190 * @param pGCPhysMem Where to return the physical address.
8191 */
8192IEM_STATIC VBOXSTRICTRC
8193iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8194{
8195 /** @todo Need a different PGM interface here. We're currently using
8196 * generic / REM interfaces. this won't cut it for R0 & RC. */
8197 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8198 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8199 RTGCPHYS GCPhys;
8200 uint64_t fFlags;
8201 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8202 if (RT_FAILURE(rc))
8203 {
8204 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8205 /** @todo Check unassigned memory in unpaged mode. */
8206 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8207 *pGCPhysMem = NIL_RTGCPHYS;
8208 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8209 }
8210
8211 /* If the page is writable and does not have the no-exec bit set, all
8212 access is allowed. Otherwise we'll have to check more carefully... */
8213 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8214 {
8215 /* Write to read only memory? */
8216 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8217 && !(fFlags & X86_PTE_RW)
8218 && ( (pVCpu->iem.s.uCpl == 3
8219 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8220 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8221 {
8222 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8223 *pGCPhysMem = NIL_RTGCPHYS;
8224 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8225 }
8226
8227 /* Kernel memory accessed by userland? */
8228 if ( !(fFlags & X86_PTE_US)
8229 && pVCpu->iem.s.uCpl == 3
8230 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8231 {
8232 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8233 *pGCPhysMem = NIL_RTGCPHYS;
8234 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8235 }
8236
8237 /* Executing non-executable memory? */
8238 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8239 && (fFlags & X86_PTE_PAE_NX)
8240 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8241 {
8242 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8243 *pGCPhysMem = NIL_RTGCPHYS;
8244 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8245 VERR_ACCESS_DENIED);
8246 }
8247 }
8248
8249 /*
8250 * Set the dirty / access flags.
8251 * ASSUMES this is set when the address is translated rather than on committ...
8252 */
8253 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8254 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8255 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8256 {
8257 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8258 AssertRC(rc2);
8259 }
8260
8261 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8262 *pGCPhysMem = GCPhys;
8263 return VINF_SUCCESS;
8264}
8265
8266
8267
8268/**
8269 * Maps a physical page.
8270 *
8271 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8273 * @param GCPhysMem The physical address.
8274 * @param fAccess The intended access.
8275 * @param ppvMem Where to return the mapping address.
8276 * @param pLock The PGM lock.
8277 */
8278IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8279{
8280#ifdef IEM_LOG_MEMORY_WRITES
8281 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8282 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8283#endif
8284
8285 /** @todo This API may require some improving later. A private deal with PGM
8286 * regarding locking and unlocking needs to be struct. A couple of TLBs
8287 * living in PGM, but with publicly accessible inlined access methods
8288 * could perhaps be an even better solution. */
8289 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8290 GCPhysMem,
8291 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8292 pVCpu->iem.s.fBypassHandlers,
8293 ppvMem,
8294 pLock);
8295 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8296 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8297
8298 return rc;
8299}
8300
8301
8302/**
8303 * Unmap a page previously mapped by iemMemPageMap.
8304 *
8305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8306 * @param GCPhysMem The physical address.
8307 * @param fAccess The intended access.
8308 * @param pvMem What iemMemPageMap returned.
8309 * @param pLock The PGM lock.
8310 */
8311DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8312{
8313 NOREF(pVCpu);
8314 NOREF(GCPhysMem);
8315 NOREF(fAccess);
8316 NOREF(pvMem);
8317 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8318}
8319
8320
8321/**
8322 * Looks up a memory mapping entry.
8323 *
8324 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8326 * @param pvMem The memory address.
8327 * @param fAccess The access to.
8328 */
8329DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8330{
8331 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8332 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8333 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8334 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8335 return 0;
8336 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8337 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8338 return 1;
8339 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8340 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8341 return 2;
8342 return VERR_NOT_FOUND;
8343}
8344
8345
8346/**
8347 * Finds a free memmap entry when using iNextMapping doesn't work.
8348 *
8349 * @returns Memory mapping index, 1024 on failure.
8350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8351 */
8352IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8353{
8354 /*
8355 * The easy case.
8356 */
8357 if (pVCpu->iem.s.cActiveMappings == 0)
8358 {
8359 pVCpu->iem.s.iNextMapping = 1;
8360 return 0;
8361 }
8362
8363 /* There should be enough mappings for all instructions. */
8364 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8365
8366 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8367 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8368 return i;
8369
8370 AssertFailedReturn(1024);
8371}
8372
8373
8374/**
8375 * Commits a bounce buffer that needs writing back and unmaps it.
8376 *
8377 * @returns Strict VBox status code.
8378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8379 * @param iMemMap The index of the buffer to commit.
8380 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8381 * Always false in ring-3, obviously.
8382 */
8383IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8384{
8385 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8386 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8387#ifdef IN_RING3
8388 Assert(!fPostponeFail);
8389 RT_NOREF_PV(fPostponeFail);
8390#endif
8391
8392 /*
8393 * Do the writing.
8394 */
8395 PVM pVM = pVCpu->CTX_SUFF(pVM);
8396 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8397 {
8398 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8399 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8400 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8401 if (!pVCpu->iem.s.fBypassHandlers)
8402 {
8403 /*
8404 * Carefully and efficiently dealing with access handler return
8405 * codes make this a little bloated.
8406 */
8407 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8408 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8409 pbBuf,
8410 cbFirst,
8411 PGMACCESSORIGIN_IEM);
8412 if (rcStrict == VINF_SUCCESS)
8413 {
8414 if (cbSecond)
8415 {
8416 rcStrict = PGMPhysWrite(pVM,
8417 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8418 pbBuf + cbFirst,
8419 cbSecond,
8420 PGMACCESSORIGIN_IEM);
8421 if (rcStrict == VINF_SUCCESS)
8422 { /* nothing */ }
8423 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8424 {
8425 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8427 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8428 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8429 }
8430#ifndef IN_RING3
8431 else if (fPostponeFail)
8432 {
8433 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8435 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8436 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8437 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8438 return iemSetPassUpStatus(pVCpu, rcStrict);
8439 }
8440#endif
8441 else
8442 {
8443 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8444 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8445 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8446 return rcStrict;
8447 }
8448 }
8449 }
8450 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8451 {
8452 if (!cbSecond)
8453 {
8454 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8455 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8456 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8457 }
8458 else
8459 {
8460 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8461 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8462 pbBuf + cbFirst,
8463 cbSecond,
8464 PGMACCESSORIGIN_IEM);
8465 if (rcStrict2 == VINF_SUCCESS)
8466 {
8467 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8468 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8469 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8470 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8471 }
8472 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8473 {
8474 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8475 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8476 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8477 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8478 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8479 }
8480#ifndef IN_RING3
8481 else if (fPostponeFail)
8482 {
8483 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8484 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8485 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8486 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8487 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8488 return iemSetPassUpStatus(pVCpu, rcStrict);
8489 }
8490#endif
8491 else
8492 {
8493 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8494 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8495 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8496 return rcStrict2;
8497 }
8498 }
8499 }
8500#ifndef IN_RING3
8501 else if (fPostponeFail)
8502 {
8503 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8504 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8505 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8506 if (!cbSecond)
8507 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8508 else
8509 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8510 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8511 return iemSetPassUpStatus(pVCpu, rcStrict);
8512 }
8513#endif
8514 else
8515 {
8516 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8517 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8518 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8519 return rcStrict;
8520 }
8521 }
8522 else
8523 {
8524 /*
8525 * No access handlers, much simpler.
8526 */
8527 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8528 if (RT_SUCCESS(rc))
8529 {
8530 if (cbSecond)
8531 {
8532 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8533 if (RT_SUCCESS(rc))
8534 { /* likely */ }
8535 else
8536 {
8537 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8538 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8539 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8540 return rc;
8541 }
8542 }
8543 }
8544 else
8545 {
8546 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8547 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8548 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8549 return rc;
8550 }
8551 }
8552 }
8553
8554#if defined(IEM_LOG_MEMORY_WRITES)
8555 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8556 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8557 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8558 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8559 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8560 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8561
8562 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8563 g_cbIemWrote = cbWrote;
8564 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8565#endif
8566
8567 /*
8568 * Free the mapping entry.
8569 */
8570 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8571 Assert(pVCpu->iem.s.cActiveMappings != 0);
8572 pVCpu->iem.s.cActiveMappings--;
8573 return VINF_SUCCESS;
8574}
8575
8576
8577/**
8578 * iemMemMap worker that deals with a request crossing pages.
8579 */
8580IEM_STATIC VBOXSTRICTRC
8581iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8582{
8583 /*
8584 * Do the address translations.
8585 */
8586 RTGCPHYS GCPhysFirst;
8587 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8588 if (rcStrict != VINF_SUCCESS)
8589 return rcStrict;
8590
8591 RTGCPHYS GCPhysSecond;
8592 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8593 fAccess, &GCPhysSecond);
8594 if (rcStrict != VINF_SUCCESS)
8595 return rcStrict;
8596 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8597
8598 PVM pVM = pVCpu->CTX_SUFF(pVM);
8599
8600 /*
8601 * Read in the current memory content if it's a read, execute or partial
8602 * write access.
8603 */
8604 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8605 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8606 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8607
8608 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8609 {
8610 if (!pVCpu->iem.s.fBypassHandlers)
8611 {
8612 /*
8613 * Must carefully deal with access handler status codes here,
8614 * makes the code a bit bloated.
8615 */
8616 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8617 if (rcStrict == VINF_SUCCESS)
8618 {
8619 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8620 if (rcStrict == VINF_SUCCESS)
8621 { /*likely */ }
8622 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8623 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8624 else
8625 {
8626 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8627 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8628 return rcStrict;
8629 }
8630 }
8631 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8632 {
8633 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8634 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8635 {
8636 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8637 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8638 }
8639 else
8640 {
8641 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8642 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8643 return rcStrict2;
8644 }
8645 }
8646 else
8647 {
8648 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8649 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8650 return rcStrict;
8651 }
8652 }
8653 else
8654 {
8655 /*
8656 * No informational status codes here, much more straight forward.
8657 */
8658 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8659 if (RT_SUCCESS(rc))
8660 {
8661 Assert(rc == VINF_SUCCESS);
8662 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8663 if (RT_SUCCESS(rc))
8664 Assert(rc == VINF_SUCCESS);
8665 else
8666 {
8667 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8668 return rc;
8669 }
8670 }
8671 else
8672 {
8673 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8674 return rc;
8675 }
8676 }
8677 }
8678#ifdef VBOX_STRICT
8679 else
8680 memset(pbBuf, 0xcc, cbMem);
8681 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8682 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8683#endif
8684
8685 /*
8686 * Commit the bounce buffer entry.
8687 */
8688 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8689 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8690 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8691 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8692 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8693 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8694 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8695 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8696 pVCpu->iem.s.cActiveMappings++;
8697
8698 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8699 *ppvMem = pbBuf;
8700 return VINF_SUCCESS;
8701}
8702
8703
8704/**
8705 * iemMemMap woker that deals with iemMemPageMap failures.
8706 */
8707IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8708 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8709{
8710 /*
8711 * Filter out conditions we can handle and the ones which shouldn't happen.
8712 */
8713 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8714 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8715 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8716 {
8717 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8718 return rcMap;
8719 }
8720 pVCpu->iem.s.cPotentialExits++;
8721
8722 /*
8723 * Read in the current memory content if it's a read, execute or partial
8724 * write access.
8725 */
8726 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8727 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8728 {
8729 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8730 memset(pbBuf, 0xff, cbMem);
8731 else
8732 {
8733 int rc;
8734 if (!pVCpu->iem.s.fBypassHandlers)
8735 {
8736 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8737 if (rcStrict == VINF_SUCCESS)
8738 { /* nothing */ }
8739 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8740 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8741 else
8742 {
8743 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8744 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8745 return rcStrict;
8746 }
8747 }
8748 else
8749 {
8750 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8751 if (RT_SUCCESS(rc))
8752 { /* likely */ }
8753 else
8754 {
8755 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8756 GCPhysFirst, rc));
8757 return rc;
8758 }
8759 }
8760 }
8761 }
8762#ifdef VBOX_STRICT
8763 else
8764 memset(pbBuf, 0xcc, cbMem);
8765#endif
8766#ifdef VBOX_STRICT
8767 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8768 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8769#endif
8770
8771 /*
8772 * Commit the bounce buffer entry.
8773 */
8774 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8775 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8776 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8777 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8778 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8779 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8780 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8781 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8782 pVCpu->iem.s.cActiveMappings++;
8783
8784 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8785 *ppvMem = pbBuf;
8786 return VINF_SUCCESS;
8787}
8788
8789
8790
8791/**
8792 * Maps the specified guest memory for the given kind of access.
8793 *
8794 * This may be using bounce buffering of the memory if it's crossing a page
8795 * boundary or if there is an access handler installed for any of it. Because
8796 * of lock prefix guarantees, we're in for some extra clutter when this
8797 * happens.
8798 *
8799 * This may raise a \#GP, \#SS, \#PF or \#AC.
8800 *
8801 * @returns VBox strict status code.
8802 *
8803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8804 * @param ppvMem Where to return the pointer to the mapped
8805 * memory.
8806 * @param cbMem The number of bytes to map. This is usually 1,
8807 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8808 * string operations it can be up to a page.
8809 * @param iSegReg The index of the segment register to use for
8810 * this access. The base and limits are checked.
8811 * Use UINT8_MAX to indicate that no segmentation
8812 * is required (for IDT, GDT and LDT accesses).
8813 * @param GCPtrMem The address of the guest memory.
8814 * @param fAccess How the memory is being accessed. The
8815 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8816 * how to map the memory, while the
8817 * IEM_ACCESS_WHAT_XXX bit is used when raising
8818 * exceptions.
8819 */
8820IEM_STATIC VBOXSTRICTRC
8821iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8822{
8823 /*
8824 * Check the input and figure out which mapping entry to use.
8825 */
8826 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8827 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8828 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8829
8830 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8831 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8832 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8833 {
8834 iMemMap = iemMemMapFindFree(pVCpu);
8835 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8836 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8837 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8838 pVCpu->iem.s.aMemMappings[2].fAccess),
8839 VERR_IEM_IPE_9);
8840 }
8841
8842 /*
8843 * Map the memory, checking that we can actually access it. If something
8844 * slightly complicated happens, fall back on bounce buffering.
8845 */
8846 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8847 if (rcStrict != VINF_SUCCESS)
8848 return rcStrict;
8849
8850 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8851 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8852
8853 RTGCPHYS GCPhysFirst;
8854 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8855 if (rcStrict != VINF_SUCCESS)
8856 return rcStrict;
8857
8858 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8859 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8860 if (fAccess & IEM_ACCESS_TYPE_READ)
8861 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8862
8863 void *pvMem;
8864 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8865 if (rcStrict != VINF_SUCCESS)
8866 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8867
8868 /*
8869 * Fill in the mapping table entry.
8870 */
8871 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8872 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8873 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8874 pVCpu->iem.s.cActiveMappings++;
8875
8876 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8877 *ppvMem = pvMem;
8878 return VINF_SUCCESS;
8879}
8880
8881
8882/**
8883 * Commits the guest memory if bounce buffered and unmaps it.
8884 *
8885 * @returns Strict VBox status code.
8886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8887 * @param pvMem The mapping.
8888 * @param fAccess The kind of access.
8889 */
8890IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8891{
8892 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8893 AssertReturn(iMemMap >= 0, iMemMap);
8894
8895 /* If it's bounce buffered, we may need to write back the buffer. */
8896 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8897 {
8898 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8899 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8900 }
8901 /* Otherwise unlock it. */
8902 else
8903 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8904
8905 /* Free the entry. */
8906 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8907 Assert(pVCpu->iem.s.cActiveMappings != 0);
8908 pVCpu->iem.s.cActiveMappings--;
8909 return VINF_SUCCESS;
8910}
8911
8912#ifdef IEM_WITH_SETJMP
8913
8914/**
8915 * Maps the specified guest memory for the given kind of access, longjmp on
8916 * error.
8917 *
8918 * This may be using bounce buffering of the memory if it's crossing a page
8919 * boundary or if there is an access handler installed for any of it. Because
8920 * of lock prefix guarantees, we're in for some extra clutter when this
8921 * happens.
8922 *
8923 * This may raise a \#GP, \#SS, \#PF or \#AC.
8924 *
8925 * @returns Pointer to the mapped memory.
8926 *
8927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8928 * @param cbMem The number of bytes to map. This is usually 1,
8929 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8930 * string operations it can be up to a page.
8931 * @param iSegReg The index of the segment register to use for
8932 * this access. The base and limits are checked.
8933 * Use UINT8_MAX to indicate that no segmentation
8934 * is required (for IDT, GDT and LDT accesses).
8935 * @param GCPtrMem The address of the guest memory.
8936 * @param fAccess How the memory is being accessed. The
8937 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8938 * how to map the memory, while the
8939 * IEM_ACCESS_WHAT_XXX bit is used when raising
8940 * exceptions.
8941 */
8942IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8943{
8944 /*
8945 * Check the input and figure out which mapping entry to use.
8946 */
8947 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8948 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8949 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8950
8951 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8952 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8953 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8954 {
8955 iMemMap = iemMemMapFindFree(pVCpu);
8956 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8957 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8958 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8959 pVCpu->iem.s.aMemMappings[2].fAccess),
8960 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8961 }
8962
8963 /*
8964 * Map the memory, checking that we can actually access it. If something
8965 * slightly complicated happens, fall back on bounce buffering.
8966 */
8967 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8968 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8969 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8970
8971 /* Crossing a page boundary? */
8972 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8973 { /* No (likely). */ }
8974 else
8975 {
8976 void *pvMem;
8977 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8978 if (rcStrict == VINF_SUCCESS)
8979 return pvMem;
8980 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8981 }
8982
8983 RTGCPHYS GCPhysFirst;
8984 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8985 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8986 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8987
8988 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8989 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8990 if (fAccess & IEM_ACCESS_TYPE_READ)
8991 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8992
8993 void *pvMem;
8994 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8995 if (rcStrict == VINF_SUCCESS)
8996 { /* likely */ }
8997 else
8998 {
8999 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9000 if (rcStrict == VINF_SUCCESS)
9001 return pvMem;
9002 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9003 }
9004
9005 /*
9006 * Fill in the mapping table entry.
9007 */
9008 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9009 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9010 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9011 pVCpu->iem.s.cActiveMappings++;
9012
9013 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9014 return pvMem;
9015}
9016
9017
9018/**
9019 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9020 *
9021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9022 * @param pvMem The mapping.
9023 * @param fAccess The kind of access.
9024 */
9025IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9026{
9027 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9028 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9029
9030 /* If it's bounce buffered, we may need to write back the buffer. */
9031 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9032 {
9033 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9034 {
9035 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9036 if (rcStrict == VINF_SUCCESS)
9037 return;
9038 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9039 }
9040 }
9041 /* Otherwise unlock it. */
9042 else
9043 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9044
9045 /* Free the entry. */
9046 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9047 Assert(pVCpu->iem.s.cActiveMappings != 0);
9048 pVCpu->iem.s.cActiveMappings--;
9049}
9050
9051#endif /* IEM_WITH_SETJMP */
9052
9053#ifndef IN_RING3
9054/**
9055 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9056 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9057 *
9058 * Allows the instruction to be completed and retired, while the IEM user will
9059 * return to ring-3 immediately afterwards and do the postponed writes there.
9060 *
9061 * @returns VBox status code (no strict statuses). Caller must check
9062 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9064 * @param pvMem The mapping.
9065 * @param fAccess The kind of access.
9066 */
9067IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9068{
9069 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9070 AssertReturn(iMemMap >= 0, iMemMap);
9071
9072 /* If it's bounce buffered, we may need to write back the buffer. */
9073 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9074 {
9075 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9076 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9077 }
9078 /* Otherwise unlock it. */
9079 else
9080 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9081
9082 /* Free the entry. */
9083 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9084 Assert(pVCpu->iem.s.cActiveMappings != 0);
9085 pVCpu->iem.s.cActiveMappings--;
9086 return VINF_SUCCESS;
9087}
9088#endif
9089
9090
9091/**
9092 * Rollbacks mappings, releasing page locks and such.
9093 *
9094 * The caller shall only call this after checking cActiveMappings.
9095 *
9096 * @returns Strict VBox status code to pass up.
9097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9098 */
9099IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9100{
9101 Assert(pVCpu->iem.s.cActiveMappings > 0);
9102
9103 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9104 while (iMemMap-- > 0)
9105 {
9106 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9107 if (fAccess != IEM_ACCESS_INVALID)
9108 {
9109 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9110 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9111 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9112 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9113 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9114 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9115 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9116 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9117 pVCpu->iem.s.cActiveMappings--;
9118 }
9119 }
9120}
9121
9122
9123/**
9124 * Fetches a data byte.
9125 *
9126 * @returns Strict VBox status code.
9127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9128 * @param pu8Dst Where to return the byte.
9129 * @param iSegReg The index of the segment register to use for
9130 * this access. The base and limits are checked.
9131 * @param GCPtrMem The address of the guest memory.
9132 */
9133IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9134{
9135 /* The lazy approach for now... */
9136 uint8_t const *pu8Src;
9137 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9138 if (rc == VINF_SUCCESS)
9139 {
9140 *pu8Dst = *pu8Src;
9141 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9142 }
9143 return rc;
9144}
9145
9146
9147#ifdef IEM_WITH_SETJMP
9148/**
9149 * Fetches a data byte, longjmp on error.
9150 *
9151 * @returns The byte.
9152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9153 * @param iSegReg The index of the segment register to use for
9154 * this access. The base and limits are checked.
9155 * @param GCPtrMem The address of the guest memory.
9156 */
9157DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9158{
9159 /* The lazy approach for now... */
9160 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9161 uint8_t const bRet = *pu8Src;
9162 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9163 return bRet;
9164}
9165#endif /* IEM_WITH_SETJMP */
9166
9167
9168/**
9169 * Fetches a data word.
9170 *
9171 * @returns Strict VBox status code.
9172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9173 * @param pu16Dst Where to return the word.
9174 * @param iSegReg The index of the segment register to use for
9175 * this access. The base and limits are checked.
9176 * @param GCPtrMem The address of the guest memory.
9177 */
9178IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9179{
9180 /* The lazy approach for now... */
9181 uint16_t const *pu16Src;
9182 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9183 if (rc == VINF_SUCCESS)
9184 {
9185 *pu16Dst = *pu16Src;
9186 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9187 }
9188 return rc;
9189}
9190
9191
9192#ifdef IEM_WITH_SETJMP
9193/**
9194 * Fetches a data word, longjmp on error.
9195 *
9196 * @returns The word
9197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9198 * @param iSegReg The index of the segment register to use for
9199 * this access. The base and limits are checked.
9200 * @param GCPtrMem The address of the guest memory.
9201 */
9202DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9203{
9204 /* The lazy approach for now... */
9205 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9206 uint16_t const u16Ret = *pu16Src;
9207 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9208 return u16Ret;
9209}
9210#endif
9211
9212
9213/**
9214 * Fetches a data dword.
9215 *
9216 * @returns Strict VBox status code.
9217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9218 * @param pu32Dst Where to return the dword.
9219 * @param iSegReg The index of the segment register to use for
9220 * this access. The base and limits are checked.
9221 * @param GCPtrMem The address of the guest memory.
9222 */
9223IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9224{
9225 /* The lazy approach for now... */
9226 uint32_t const *pu32Src;
9227 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9228 if (rc == VINF_SUCCESS)
9229 {
9230 *pu32Dst = *pu32Src;
9231 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9232 }
9233 return rc;
9234}
9235
9236
9237#ifdef IEM_WITH_SETJMP
9238
9239IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9240{
9241 Assert(cbMem >= 1);
9242 Assert(iSegReg < X86_SREG_COUNT);
9243
9244 /*
9245 * 64-bit mode is simpler.
9246 */
9247 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9248 {
9249 if (iSegReg >= X86_SREG_FS)
9250 {
9251 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9252 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9253 GCPtrMem += pSel->u64Base;
9254 }
9255
9256 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9257 return GCPtrMem;
9258 }
9259 /*
9260 * 16-bit and 32-bit segmentation.
9261 */
9262 else
9263 {
9264 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9265 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9266 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9267 == X86DESCATTR_P /* data, expand up */
9268 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9269 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9270 {
9271 /* expand up */
9272 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9273 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9274 && GCPtrLast32 > (uint32_t)GCPtrMem))
9275 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9276 }
9277 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9278 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9279 {
9280 /* expand down */
9281 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9282 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9283 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9284 && GCPtrLast32 > (uint32_t)GCPtrMem))
9285 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9286 }
9287 else
9288 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9289 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9290 }
9291 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9292}
9293
9294
9295IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9296{
9297 Assert(cbMem >= 1);
9298 Assert(iSegReg < X86_SREG_COUNT);
9299
9300 /*
9301 * 64-bit mode is simpler.
9302 */
9303 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9304 {
9305 if (iSegReg >= X86_SREG_FS)
9306 {
9307 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9308 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9309 GCPtrMem += pSel->u64Base;
9310 }
9311
9312 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9313 return GCPtrMem;
9314 }
9315 /*
9316 * 16-bit and 32-bit segmentation.
9317 */
9318 else
9319 {
9320 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9321 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9322 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9323 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9324 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9325 {
9326 /* expand up */
9327 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9328 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9329 && GCPtrLast32 > (uint32_t)GCPtrMem))
9330 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9331 }
9332 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9333 {
9334 /* expand down */
9335 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9336 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9337 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9338 && GCPtrLast32 > (uint32_t)GCPtrMem))
9339 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9340 }
9341 else
9342 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9343 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9344 }
9345 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9346}
9347
9348
9349/**
9350 * Fetches a data dword, longjmp on error, fallback/safe version.
9351 *
9352 * @returns The dword
9353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9354 * @param iSegReg The index of the segment register to use for
9355 * this access. The base and limits are checked.
9356 * @param GCPtrMem The address of the guest memory.
9357 */
9358IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9359{
9360 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9361 uint32_t const u32Ret = *pu32Src;
9362 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9363 return u32Ret;
9364}
9365
9366
9367/**
9368 * Fetches a data dword, longjmp on error.
9369 *
9370 * @returns The dword
9371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9372 * @param iSegReg The index of the segment register to use for
9373 * this access. The base and limits are checked.
9374 * @param GCPtrMem The address of the guest memory.
9375 */
9376DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9377{
9378# ifdef IEM_WITH_DATA_TLB
9379 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9380 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9381 {
9382 /// @todo more later.
9383 }
9384
9385 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9386# else
9387 /* The lazy approach. */
9388 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9389 uint32_t const u32Ret = *pu32Src;
9390 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9391 return u32Ret;
9392# endif
9393}
9394#endif
9395
9396
9397#ifdef SOME_UNUSED_FUNCTION
9398/**
9399 * Fetches a data dword and sign extends it to a qword.
9400 *
9401 * @returns Strict VBox status code.
9402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9403 * @param pu64Dst Where to return the sign extended value.
9404 * @param iSegReg The index of the segment register to use for
9405 * this access. The base and limits are checked.
9406 * @param GCPtrMem The address of the guest memory.
9407 */
9408IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9409{
9410 /* The lazy approach for now... */
9411 int32_t const *pi32Src;
9412 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9413 if (rc == VINF_SUCCESS)
9414 {
9415 *pu64Dst = *pi32Src;
9416 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9417 }
9418#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9419 else
9420 *pu64Dst = 0;
9421#endif
9422 return rc;
9423}
9424#endif
9425
9426
9427/**
9428 * Fetches a data qword.
9429 *
9430 * @returns Strict VBox status code.
9431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9432 * @param pu64Dst Where to return the qword.
9433 * @param iSegReg The index of the segment register to use for
9434 * this access. The base and limits are checked.
9435 * @param GCPtrMem The address of the guest memory.
9436 */
9437IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9438{
9439 /* The lazy approach for now... */
9440 uint64_t const *pu64Src;
9441 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9442 if (rc == VINF_SUCCESS)
9443 {
9444 *pu64Dst = *pu64Src;
9445 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9446 }
9447 return rc;
9448}
9449
9450
9451#ifdef IEM_WITH_SETJMP
9452/**
9453 * Fetches a data qword, longjmp on error.
9454 *
9455 * @returns The qword.
9456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9457 * @param iSegReg The index of the segment register to use for
9458 * this access. The base and limits are checked.
9459 * @param GCPtrMem The address of the guest memory.
9460 */
9461DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9462{
9463 /* The lazy approach for now... */
9464 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9465 uint64_t const u64Ret = *pu64Src;
9466 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9467 return u64Ret;
9468}
9469#endif
9470
9471
9472/**
9473 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9474 *
9475 * @returns Strict VBox status code.
9476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9477 * @param pu64Dst Where to return the qword.
9478 * @param iSegReg The index of the segment register to use for
9479 * this access. The base and limits are checked.
9480 * @param GCPtrMem The address of the guest memory.
9481 */
9482IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9483{
9484 /* The lazy approach for now... */
9485 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9486 if (RT_UNLIKELY(GCPtrMem & 15))
9487 return iemRaiseGeneralProtectionFault0(pVCpu);
9488
9489 uint64_t const *pu64Src;
9490 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9491 if (rc == VINF_SUCCESS)
9492 {
9493 *pu64Dst = *pu64Src;
9494 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9495 }
9496 return rc;
9497}
9498
9499
9500#ifdef IEM_WITH_SETJMP
9501/**
9502 * Fetches a data qword, longjmp on error.
9503 *
9504 * @returns The qword.
9505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9506 * @param iSegReg The index of the segment register to use for
9507 * this access. The base and limits are checked.
9508 * @param GCPtrMem The address of the guest memory.
9509 */
9510DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9511{
9512 /* The lazy approach for now... */
9513 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9514 if (RT_LIKELY(!(GCPtrMem & 15)))
9515 {
9516 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9517 uint64_t const u64Ret = *pu64Src;
9518 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9519 return u64Ret;
9520 }
9521
9522 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9523 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9524}
9525#endif
9526
9527
9528/**
9529 * Fetches a data tword.
9530 *
9531 * @returns Strict VBox status code.
9532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9533 * @param pr80Dst Where to return the tword.
9534 * @param iSegReg The index of the segment register to use for
9535 * this access. The base and limits are checked.
9536 * @param GCPtrMem The address of the guest memory.
9537 */
9538IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9539{
9540 /* The lazy approach for now... */
9541 PCRTFLOAT80U pr80Src;
9542 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9543 if (rc == VINF_SUCCESS)
9544 {
9545 *pr80Dst = *pr80Src;
9546 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9547 }
9548 return rc;
9549}
9550
9551
9552#ifdef IEM_WITH_SETJMP
9553/**
9554 * Fetches a data tword, longjmp on error.
9555 *
9556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9557 * @param pr80Dst Where to return the tword.
9558 * @param iSegReg The index of the segment register to use for
9559 * this access. The base and limits are checked.
9560 * @param GCPtrMem The address of the guest memory.
9561 */
9562DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9563{
9564 /* The lazy approach for now... */
9565 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9566 *pr80Dst = *pr80Src;
9567 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9568}
9569#endif
9570
9571
9572/**
9573 * Fetches a data dqword (double qword), generally SSE related.
9574 *
9575 * @returns Strict VBox status code.
9576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9577 * @param pu128Dst Where to return the qword.
9578 * @param iSegReg The index of the segment register to use for
9579 * this access. The base and limits are checked.
9580 * @param GCPtrMem The address of the guest memory.
9581 */
9582IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9583{
9584 /* The lazy approach for now... */
9585 PCRTUINT128U pu128Src;
9586 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9587 if (rc == VINF_SUCCESS)
9588 {
9589 pu128Dst->au64[0] = pu128Src->au64[0];
9590 pu128Dst->au64[1] = pu128Src->au64[1];
9591 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9592 }
9593 return rc;
9594}
9595
9596
9597#ifdef IEM_WITH_SETJMP
9598/**
9599 * Fetches a data dqword (double qword), generally SSE related.
9600 *
9601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9602 * @param pu128Dst Where to return the qword.
9603 * @param iSegReg The index of the segment register to use for
9604 * this access. The base and limits are checked.
9605 * @param GCPtrMem The address of the guest memory.
9606 */
9607IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9608{
9609 /* The lazy approach for now... */
9610 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9611 pu128Dst->au64[0] = pu128Src->au64[0];
9612 pu128Dst->au64[1] = pu128Src->au64[1];
9613 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9614}
9615#endif
9616
9617
9618/**
9619 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9620 * related.
9621 *
9622 * Raises \#GP(0) if not aligned.
9623 *
9624 * @returns Strict VBox status code.
9625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9626 * @param pu128Dst Where to return the qword.
9627 * @param iSegReg The index of the segment register to use for
9628 * this access. The base and limits are checked.
9629 * @param GCPtrMem The address of the guest memory.
9630 */
9631IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9632{
9633 /* The lazy approach for now... */
9634 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9635 if ( (GCPtrMem & 15)
9636 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9637 return iemRaiseGeneralProtectionFault0(pVCpu);
9638
9639 PCRTUINT128U pu128Src;
9640 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9641 if (rc == VINF_SUCCESS)
9642 {
9643 pu128Dst->au64[0] = pu128Src->au64[0];
9644 pu128Dst->au64[1] = pu128Src->au64[1];
9645 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9646 }
9647 return rc;
9648}
9649
9650
9651#ifdef IEM_WITH_SETJMP
9652/**
9653 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9654 * related, longjmp on error.
9655 *
9656 * Raises \#GP(0) if not aligned.
9657 *
9658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9659 * @param pu128Dst Where to return the qword.
9660 * @param iSegReg The index of the segment register to use for
9661 * this access. The base and limits are checked.
9662 * @param GCPtrMem The address of the guest memory.
9663 */
9664DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9665{
9666 /* The lazy approach for now... */
9667 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9668 if ( (GCPtrMem & 15) == 0
9669 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9670 {
9671 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9672 pu128Dst->au64[0] = pu128Src->au64[0];
9673 pu128Dst->au64[1] = pu128Src->au64[1];
9674 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9675 return;
9676 }
9677
9678 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9679 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9680}
9681#endif
9682
9683
9684/**
9685 * Fetches a data oword (octo word), generally AVX related.
9686 *
9687 * @returns Strict VBox status code.
9688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9689 * @param pu256Dst Where to return the qword.
9690 * @param iSegReg The index of the segment register to use for
9691 * this access. The base and limits are checked.
9692 * @param GCPtrMem The address of the guest memory.
9693 */
9694IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9695{
9696 /* The lazy approach for now... */
9697 PCRTUINT256U pu256Src;
9698 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9699 if (rc == VINF_SUCCESS)
9700 {
9701 pu256Dst->au64[0] = pu256Src->au64[0];
9702 pu256Dst->au64[1] = pu256Src->au64[1];
9703 pu256Dst->au64[2] = pu256Src->au64[2];
9704 pu256Dst->au64[3] = pu256Src->au64[3];
9705 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9706 }
9707 return rc;
9708}
9709
9710
9711#ifdef IEM_WITH_SETJMP
9712/**
9713 * Fetches a data oword (octo word), generally AVX related.
9714 *
9715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9716 * @param pu256Dst Where to return the qword.
9717 * @param iSegReg The index of the segment register to use for
9718 * this access. The base and limits are checked.
9719 * @param GCPtrMem The address of the guest memory.
9720 */
9721IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9722{
9723 /* The lazy approach for now... */
9724 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9725 pu256Dst->au64[0] = pu256Src->au64[0];
9726 pu256Dst->au64[1] = pu256Src->au64[1];
9727 pu256Dst->au64[2] = pu256Src->au64[2];
9728 pu256Dst->au64[3] = pu256Src->au64[3];
9729 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9730}
9731#endif
9732
9733
9734/**
9735 * Fetches a data oword (octo word) at an aligned address, generally AVX
9736 * related.
9737 *
9738 * Raises \#GP(0) if not aligned.
9739 *
9740 * @returns Strict VBox status code.
9741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9742 * @param pu256Dst Where to return the qword.
9743 * @param iSegReg The index of the segment register to use for
9744 * this access. The base and limits are checked.
9745 * @param GCPtrMem The address of the guest memory.
9746 */
9747IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9748{
9749 /* The lazy approach for now... */
9750 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9751 if (GCPtrMem & 31)
9752 return iemRaiseGeneralProtectionFault0(pVCpu);
9753
9754 PCRTUINT256U pu256Src;
9755 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9756 if (rc == VINF_SUCCESS)
9757 {
9758 pu256Dst->au64[0] = pu256Src->au64[0];
9759 pu256Dst->au64[1] = pu256Src->au64[1];
9760 pu256Dst->au64[2] = pu256Src->au64[2];
9761 pu256Dst->au64[3] = pu256Src->au64[3];
9762 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9763 }
9764 return rc;
9765}
9766
9767
9768#ifdef IEM_WITH_SETJMP
9769/**
9770 * Fetches a data oword (octo word) at an aligned address, generally AVX
9771 * related, longjmp on error.
9772 *
9773 * Raises \#GP(0) if not aligned.
9774 *
9775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9776 * @param pu256Dst Where to return the qword.
9777 * @param iSegReg The index of the segment register to use for
9778 * this access. The base and limits are checked.
9779 * @param GCPtrMem The address of the guest memory.
9780 */
9781DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9782{
9783 /* The lazy approach for now... */
9784 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9785 if ((GCPtrMem & 31) == 0)
9786 {
9787 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9788 pu256Dst->au64[0] = pu256Src->au64[0];
9789 pu256Dst->au64[1] = pu256Src->au64[1];
9790 pu256Dst->au64[2] = pu256Src->au64[2];
9791 pu256Dst->au64[3] = pu256Src->au64[3];
9792 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9793 return;
9794 }
9795
9796 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9797 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9798}
9799#endif
9800
9801
9802
9803/**
9804 * Fetches a descriptor register (lgdt, lidt).
9805 *
9806 * @returns Strict VBox status code.
9807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9808 * @param pcbLimit Where to return the limit.
9809 * @param pGCPtrBase Where to return the base.
9810 * @param iSegReg The index of the segment register to use for
9811 * this access. The base and limits are checked.
9812 * @param GCPtrMem The address of the guest memory.
9813 * @param enmOpSize The effective operand size.
9814 */
9815IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9816 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9817{
9818 /*
9819 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9820 * little special:
9821 * - The two reads are done separately.
9822 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9823 * - We suspect the 386 to actually commit the limit before the base in
9824 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9825 * don't try emulate this eccentric behavior, because it's not well
9826 * enough understood and rather hard to trigger.
9827 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9828 */
9829 VBOXSTRICTRC rcStrict;
9830 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9831 {
9832 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9833 if (rcStrict == VINF_SUCCESS)
9834 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9835 }
9836 else
9837 {
9838 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9839 if (enmOpSize == IEMMODE_32BIT)
9840 {
9841 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9842 {
9843 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9844 if (rcStrict == VINF_SUCCESS)
9845 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9846 }
9847 else
9848 {
9849 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9850 if (rcStrict == VINF_SUCCESS)
9851 {
9852 *pcbLimit = (uint16_t)uTmp;
9853 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9854 }
9855 }
9856 if (rcStrict == VINF_SUCCESS)
9857 *pGCPtrBase = uTmp;
9858 }
9859 else
9860 {
9861 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9862 if (rcStrict == VINF_SUCCESS)
9863 {
9864 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9865 if (rcStrict == VINF_SUCCESS)
9866 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9867 }
9868 }
9869 }
9870 return rcStrict;
9871}
9872
9873
9874
9875/**
9876 * Stores a data byte.
9877 *
9878 * @returns Strict VBox status code.
9879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9880 * @param iSegReg The index of the segment register to use for
9881 * this access. The base and limits are checked.
9882 * @param GCPtrMem The address of the guest memory.
9883 * @param u8Value The value to store.
9884 */
9885IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9886{
9887 /* The lazy approach for now... */
9888 uint8_t *pu8Dst;
9889 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9890 if (rc == VINF_SUCCESS)
9891 {
9892 *pu8Dst = u8Value;
9893 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9894 }
9895 return rc;
9896}
9897
9898
9899#ifdef IEM_WITH_SETJMP
9900/**
9901 * Stores a data byte, longjmp on error.
9902 *
9903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9904 * @param iSegReg The index of the segment register to use for
9905 * this access. The base and limits are checked.
9906 * @param GCPtrMem The address of the guest memory.
9907 * @param u8Value The value to store.
9908 */
9909IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9910{
9911 /* The lazy approach for now... */
9912 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9913 *pu8Dst = u8Value;
9914 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9915}
9916#endif
9917
9918
9919/**
9920 * Stores a data word.
9921 *
9922 * @returns Strict VBox status code.
9923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9924 * @param iSegReg The index of the segment register to use for
9925 * this access. The base and limits are checked.
9926 * @param GCPtrMem The address of the guest memory.
9927 * @param u16Value The value to store.
9928 */
9929IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9930{
9931 /* The lazy approach for now... */
9932 uint16_t *pu16Dst;
9933 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9934 if (rc == VINF_SUCCESS)
9935 {
9936 *pu16Dst = u16Value;
9937 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9938 }
9939 return rc;
9940}
9941
9942
9943#ifdef IEM_WITH_SETJMP
9944/**
9945 * Stores a data word, longjmp on error.
9946 *
9947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9948 * @param iSegReg The index of the segment register to use for
9949 * this access. The base and limits are checked.
9950 * @param GCPtrMem The address of the guest memory.
9951 * @param u16Value The value to store.
9952 */
9953IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9954{
9955 /* The lazy approach for now... */
9956 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9957 *pu16Dst = u16Value;
9958 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9959}
9960#endif
9961
9962
9963/**
9964 * Stores a data dword.
9965 *
9966 * @returns Strict VBox status code.
9967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9968 * @param iSegReg The index of the segment register to use for
9969 * this access. The base and limits are checked.
9970 * @param GCPtrMem The address of the guest memory.
9971 * @param u32Value The value to store.
9972 */
9973IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9974{
9975 /* The lazy approach for now... */
9976 uint32_t *pu32Dst;
9977 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9978 if (rc == VINF_SUCCESS)
9979 {
9980 *pu32Dst = u32Value;
9981 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9982 }
9983 return rc;
9984}
9985
9986
9987#ifdef IEM_WITH_SETJMP
9988/**
9989 * Stores a data dword.
9990 *
9991 * @returns Strict VBox status code.
9992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9993 * @param iSegReg The index of the segment register to use for
9994 * this access. The base and limits are checked.
9995 * @param GCPtrMem The address of the guest memory.
9996 * @param u32Value The value to store.
9997 */
9998IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9999{
10000 /* The lazy approach for now... */
10001 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10002 *pu32Dst = u32Value;
10003 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10004}
10005#endif
10006
10007
10008/**
10009 * Stores a data qword.
10010 *
10011 * @returns Strict VBox status code.
10012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10013 * @param iSegReg The index of the segment register to use for
10014 * this access. The base and limits are checked.
10015 * @param GCPtrMem The address of the guest memory.
10016 * @param u64Value The value to store.
10017 */
10018IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10019{
10020 /* The lazy approach for now... */
10021 uint64_t *pu64Dst;
10022 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10023 if (rc == VINF_SUCCESS)
10024 {
10025 *pu64Dst = u64Value;
10026 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10027 }
10028 return rc;
10029}
10030
10031
10032#ifdef IEM_WITH_SETJMP
10033/**
10034 * Stores a data qword, longjmp on error.
10035 *
10036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10037 * @param iSegReg The index of the segment register to use for
10038 * this access. The base and limits are checked.
10039 * @param GCPtrMem The address of the guest memory.
10040 * @param u64Value The value to store.
10041 */
10042IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10043{
10044 /* The lazy approach for now... */
10045 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10046 *pu64Dst = u64Value;
10047 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10048}
10049#endif
10050
10051
10052/**
10053 * Stores a data dqword.
10054 *
10055 * @returns Strict VBox status code.
10056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10057 * @param iSegReg The index of the segment register to use for
10058 * this access. The base and limits are checked.
10059 * @param GCPtrMem The address of the guest memory.
10060 * @param u128Value The value to store.
10061 */
10062IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10063{
10064 /* The lazy approach for now... */
10065 PRTUINT128U pu128Dst;
10066 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10067 if (rc == VINF_SUCCESS)
10068 {
10069 pu128Dst->au64[0] = u128Value.au64[0];
10070 pu128Dst->au64[1] = u128Value.au64[1];
10071 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10072 }
10073 return rc;
10074}
10075
10076
10077#ifdef IEM_WITH_SETJMP
10078/**
10079 * Stores a data dqword, longjmp on error.
10080 *
10081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10082 * @param iSegReg The index of the segment register to use for
10083 * this access. The base and limits are checked.
10084 * @param GCPtrMem The address of the guest memory.
10085 * @param u128Value The value to store.
10086 */
10087IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10088{
10089 /* The lazy approach for now... */
10090 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10091 pu128Dst->au64[0] = u128Value.au64[0];
10092 pu128Dst->au64[1] = u128Value.au64[1];
10093 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10094}
10095#endif
10096
10097
10098/**
10099 * Stores a data dqword, SSE aligned.
10100 *
10101 * @returns Strict VBox status code.
10102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10103 * @param iSegReg The index of the segment register to use for
10104 * this access. The base and limits are checked.
10105 * @param GCPtrMem The address of the guest memory.
10106 * @param u128Value The value to store.
10107 */
10108IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10109{
10110 /* The lazy approach for now... */
10111 if ( (GCPtrMem & 15)
10112 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10113 return iemRaiseGeneralProtectionFault0(pVCpu);
10114
10115 PRTUINT128U pu128Dst;
10116 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10117 if (rc == VINF_SUCCESS)
10118 {
10119 pu128Dst->au64[0] = u128Value.au64[0];
10120 pu128Dst->au64[1] = u128Value.au64[1];
10121 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10122 }
10123 return rc;
10124}
10125
10126
10127#ifdef IEM_WITH_SETJMP
10128/**
10129 * Stores a data dqword, SSE aligned.
10130 *
10131 * @returns Strict VBox status code.
10132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10133 * @param iSegReg The index of the segment register to use for
10134 * this access. The base and limits are checked.
10135 * @param GCPtrMem The address of the guest memory.
10136 * @param u128Value The value to store.
10137 */
10138DECL_NO_INLINE(IEM_STATIC, void)
10139iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10140{
10141 /* The lazy approach for now... */
10142 if ( (GCPtrMem & 15) == 0
10143 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10144 {
10145 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10146 pu128Dst->au64[0] = u128Value.au64[0];
10147 pu128Dst->au64[1] = u128Value.au64[1];
10148 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10149 return;
10150 }
10151
10152 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10153 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10154}
10155#endif
10156
10157
10158/**
10159 * Stores a data dqword.
10160 *
10161 * @returns Strict VBox status code.
10162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10163 * @param iSegReg The index of the segment register to use for
10164 * this access. The base and limits are checked.
10165 * @param GCPtrMem The address of the guest memory.
10166 * @param pu256Value Pointer to the value to store.
10167 */
10168IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10169{
10170 /* The lazy approach for now... */
10171 PRTUINT256U pu256Dst;
10172 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10173 if (rc == VINF_SUCCESS)
10174 {
10175 pu256Dst->au64[0] = pu256Value->au64[0];
10176 pu256Dst->au64[1] = pu256Value->au64[1];
10177 pu256Dst->au64[2] = pu256Value->au64[2];
10178 pu256Dst->au64[3] = pu256Value->au64[3];
10179 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10180 }
10181 return rc;
10182}
10183
10184
10185#ifdef IEM_WITH_SETJMP
10186/**
10187 * Stores a data dqword, longjmp on error.
10188 *
10189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10190 * @param iSegReg The index of the segment register to use for
10191 * this access. The base and limits are checked.
10192 * @param GCPtrMem The address of the guest memory.
10193 * @param pu256Value Pointer to the value to store.
10194 */
10195IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10196{
10197 /* The lazy approach for now... */
10198 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10199 pu256Dst->au64[0] = pu256Value->au64[0];
10200 pu256Dst->au64[1] = pu256Value->au64[1];
10201 pu256Dst->au64[2] = pu256Value->au64[2];
10202 pu256Dst->au64[3] = pu256Value->au64[3];
10203 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10204}
10205#endif
10206
10207
10208/**
10209 * Stores a data dqword, AVX aligned.
10210 *
10211 * @returns Strict VBox status code.
10212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10213 * @param iSegReg The index of the segment register to use for
10214 * this access. The base and limits are checked.
10215 * @param GCPtrMem The address of the guest memory.
10216 * @param pu256Value Pointer to the value to store.
10217 */
10218IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10219{
10220 /* The lazy approach for now... */
10221 if (GCPtrMem & 31)
10222 return iemRaiseGeneralProtectionFault0(pVCpu);
10223
10224 PRTUINT256U pu256Dst;
10225 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10226 if (rc == VINF_SUCCESS)
10227 {
10228 pu256Dst->au64[0] = pu256Value->au64[0];
10229 pu256Dst->au64[1] = pu256Value->au64[1];
10230 pu256Dst->au64[2] = pu256Value->au64[2];
10231 pu256Dst->au64[3] = pu256Value->au64[3];
10232 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10233 }
10234 return rc;
10235}
10236
10237
10238#ifdef IEM_WITH_SETJMP
10239/**
10240 * Stores a data dqword, AVX aligned.
10241 *
10242 * @returns Strict VBox status code.
10243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10244 * @param iSegReg The index of the segment register to use for
10245 * this access. The base and limits are checked.
10246 * @param GCPtrMem The address of the guest memory.
10247 * @param pu256Value Pointer to the value to store.
10248 */
10249DECL_NO_INLINE(IEM_STATIC, void)
10250iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10251{
10252 /* The lazy approach for now... */
10253 if ((GCPtrMem & 31) == 0)
10254 {
10255 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10256 pu256Dst->au64[0] = pu256Value->au64[0];
10257 pu256Dst->au64[1] = pu256Value->au64[1];
10258 pu256Dst->au64[2] = pu256Value->au64[2];
10259 pu256Dst->au64[3] = pu256Value->au64[3];
10260 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10261 return;
10262 }
10263
10264 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10265 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10266}
10267#endif
10268
10269
10270/**
10271 * Stores a descriptor register (sgdt, sidt).
10272 *
10273 * @returns Strict VBox status code.
10274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10275 * @param cbLimit The limit.
10276 * @param GCPtrBase The base address.
10277 * @param iSegReg The index of the segment register to use for
10278 * this access. The base and limits are checked.
10279 * @param GCPtrMem The address of the guest memory.
10280 */
10281IEM_STATIC VBOXSTRICTRC
10282iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10283{
10284 /*
10285 * The SIDT and SGDT instructions actually stores the data using two
10286 * independent writes. The instructions does not respond to opsize prefixes.
10287 */
10288 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10289 if (rcStrict == VINF_SUCCESS)
10290 {
10291 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10292 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10293 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10294 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10295 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10296 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10297 else
10298 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10299 }
10300 return rcStrict;
10301}
10302
10303
10304/**
10305 * Pushes a word onto the stack.
10306 *
10307 * @returns Strict VBox status code.
10308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10309 * @param u16Value The value to push.
10310 */
10311IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10312{
10313 /* Increment the stack pointer. */
10314 uint64_t uNewRsp;
10315 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10316
10317 /* Write the word the lazy way. */
10318 uint16_t *pu16Dst;
10319 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10320 if (rc == VINF_SUCCESS)
10321 {
10322 *pu16Dst = u16Value;
10323 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10324 }
10325
10326 /* Commit the new RSP value unless we an access handler made trouble. */
10327 if (rc == VINF_SUCCESS)
10328 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10329
10330 return rc;
10331}
10332
10333
10334/**
10335 * Pushes a dword onto the stack.
10336 *
10337 * @returns Strict VBox status code.
10338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10339 * @param u32Value The value to push.
10340 */
10341IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10342{
10343 /* Increment the stack pointer. */
10344 uint64_t uNewRsp;
10345 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10346
10347 /* Write the dword the lazy way. */
10348 uint32_t *pu32Dst;
10349 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10350 if (rc == VINF_SUCCESS)
10351 {
10352 *pu32Dst = u32Value;
10353 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10354 }
10355
10356 /* Commit the new RSP value unless we an access handler made trouble. */
10357 if (rc == VINF_SUCCESS)
10358 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10359
10360 return rc;
10361}
10362
10363
10364/**
10365 * Pushes a dword segment register value onto the stack.
10366 *
10367 * @returns Strict VBox status code.
10368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10369 * @param u32Value The value to push.
10370 */
10371IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10372{
10373 /* Increment the stack pointer. */
10374 uint64_t uNewRsp;
10375 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10376
10377 /* The intel docs talks about zero extending the selector register
10378 value. My actual intel CPU here might be zero extending the value
10379 but it still only writes the lower word... */
10380 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10381 * happens when crossing an electric page boundrary, is the high word checked
10382 * for write accessibility or not? Probably it is. What about segment limits?
10383 * It appears this behavior is also shared with trap error codes.
10384 *
10385 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10386 * ancient hardware when it actually did change. */
10387 uint16_t *pu16Dst;
10388 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10389 if (rc == VINF_SUCCESS)
10390 {
10391 *pu16Dst = (uint16_t)u32Value;
10392 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10393 }
10394
10395 /* Commit the new RSP value unless we an access handler made trouble. */
10396 if (rc == VINF_SUCCESS)
10397 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10398
10399 return rc;
10400}
10401
10402
10403/**
10404 * Pushes a qword onto the stack.
10405 *
10406 * @returns Strict VBox status code.
10407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10408 * @param u64Value The value to push.
10409 */
10410IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10411{
10412 /* Increment the stack pointer. */
10413 uint64_t uNewRsp;
10414 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10415
10416 /* Write the word the lazy way. */
10417 uint64_t *pu64Dst;
10418 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10419 if (rc == VINF_SUCCESS)
10420 {
10421 *pu64Dst = u64Value;
10422 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10423 }
10424
10425 /* Commit the new RSP value unless we an access handler made trouble. */
10426 if (rc == VINF_SUCCESS)
10427 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10428
10429 return rc;
10430}
10431
10432
10433/**
10434 * Pops a word from the stack.
10435 *
10436 * @returns Strict VBox status code.
10437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10438 * @param pu16Value Where to store the popped value.
10439 */
10440IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10441{
10442 /* Increment the stack pointer. */
10443 uint64_t uNewRsp;
10444 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10445
10446 /* Write the word the lazy way. */
10447 uint16_t const *pu16Src;
10448 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10449 if (rc == VINF_SUCCESS)
10450 {
10451 *pu16Value = *pu16Src;
10452 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10453
10454 /* Commit the new RSP value. */
10455 if (rc == VINF_SUCCESS)
10456 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10457 }
10458
10459 return rc;
10460}
10461
10462
10463/**
10464 * Pops a dword from the stack.
10465 *
10466 * @returns Strict VBox status code.
10467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10468 * @param pu32Value Where to store the popped value.
10469 */
10470IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10471{
10472 /* Increment the stack pointer. */
10473 uint64_t uNewRsp;
10474 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10475
10476 /* Write the word the lazy way. */
10477 uint32_t const *pu32Src;
10478 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10479 if (rc == VINF_SUCCESS)
10480 {
10481 *pu32Value = *pu32Src;
10482 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10483
10484 /* Commit the new RSP value. */
10485 if (rc == VINF_SUCCESS)
10486 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10487 }
10488
10489 return rc;
10490}
10491
10492
10493/**
10494 * Pops a qword from the stack.
10495 *
10496 * @returns Strict VBox status code.
10497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10498 * @param pu64Value Where to store the popped value.
10499 */
10500IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10501{
10502 /* Increment the stack pointer. */
10503 uint64_t uNewRsp;
10504 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10505
10506 /* Write the word the lazy way. */
10507 uint64_t const *pu64Src;
10508 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10509 if (rc == VINF_SUCCESS)
10510 {
10511 *pu64Value = *pu64Src;
10512 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10513
10514 /* Commit the new RSP value. */
10515 if (rc == VINF_SUCCESS)
10516 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10517 }
10518
10519 return rc;
10520}
10521
10522
10523/**
10524 * Pushes a word onto the stack, using a temporary stack pointer.
10525 *
10526 * @returns Strict VBox status code.
10527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10528 * @param u16Value The value to push.
10529 * @param pTmpRsp Pointer to the temporary stack pointer.
10530 */
10531IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10532{
10533 /* Increment the stack pointer. */
10534 RTUINT64U NewRsp = *pTmpRsp;
10535 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10536
10537 /* Write the word the lazy way. */
10538 uint16_t *pu16Dst;
10539 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10540 if (rc == VINF_SUCCESS)
10541 {
10542 *pu16Dst = u16Value;
10543 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10544 }
10545
10546 /* Commit the new RSP value unless we an access handler made trouble. */
10547 if (rc == VINF_SUCCESS)
10548 *pTmpRsp = NewRsp;
10549
10550 return rc;
10551}
10552
10553
10554/**
10555 * Pushes a dword onto the stack, using a temporary stack pointer.
10556 *
10557 * @returns Strict VBox status code.
10558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10559 * @param u32Value The value to push.
10560 * @param pTmpRsp Pointer to the temporary stack pointer.
10561 */
10562IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10563{
10564 /* Increment the stack pointer. */
10565 RTUINT64U NewRsp = *pTmpRsp;
10566 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10567
10568 /* Write the word the lazy way. */
10569 uint32_t *pu32Dst;
10570 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10571 if (rc == VINF_SUCCESS)
10572 {
10573 *pu32Dst = u32Value;
10574 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10575 }
10576
10577 /* Commit the new RSP value unless we an access handler made trouble. */
10578 if (rc == VINF_SUCCESS)
10579 *pTmpRsp = NewRsp;
10580
10581 return rc;
10582}
10583
10584
10585/**
10586 * Pushes a dword onto the stack, using a temporary stack pointer.
10587 *
10588 * @returns Strict VBox status code.
10589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10590 * @param u64Value The value to push.
10591 * @param pTmpRsp Pointer to the temporary stack pointer.
10592 */
10593IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10594{
10595 /* Increment the stack pointer. */
10596 RTUINT64U NewRsp = *pTmpRsp;
10597 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10598
10599 /* Write the word the lazy way. */
10600 uint64_t *pu64Dst;
10601 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10602 if (rc == VINF_SUCCESS)
10603 {
10604 *pu64Dst = u64Value;
10605 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10606 }
10607
10608 /* Commit the new RSP value unless we an access handler made trouble. */
10609 if (rc == VINF_SUCCESS)
10610 *pTmpRsp = NewRsp;
10611
10612 return rc;
10613}
10614
10615
10616/**
10617 * Pops a word from the stack, using a temporary stack pointer.
10618 *
10619 * @returns Strict VBox status code.
10620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10621 * @param pu16Value Where to store the popped value.
10622 * @param pTmpRsp Pointer to the temporary stack pointer.
10623 */
10624IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10625{
10626 /* Increment the stack pointer. */
10627 RTUINT64U NewRsp = *pTmpRsp;
10628 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10629
10630 /* Write the word the lazy way. */
10631 uint16_t const *pu16Src;
10632 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10633 if (rc == VINF_SUCCESS)
10634 {
10635 *pu16Value = *pu16Src;
10636 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10637
10638 /* Commit the new RSP value. */
10639 if (rc == VINF_SUCCESS)
10640 *pTmpRsp = NewRsp;
10641 }
10642
10643 return rc;
10644}
10645
10646
10647/**
10648 * Pops a dword from the stack, using a temporary stack pointer.
10649 *
10650 * @returns Strict VBox status code.
10651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10652 * @param pu32Value Where to store the popped value.
10653 * @param pTmpRsp Pointer to the temporary stack pointer.
10654 */
10655IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10656{
10657 /* Increment the stack pointer. */
10658 RTUINT64U NewRsp = *pTmpRsp;
10659 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10660
10661 /* Write the word the lazy way. */
10662 uint32_t const *pu32Src;
10663 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10664 if (rc == VINF_SUCCESS)
10665 {
10666 *pu32Value = *pu32Src;
10667 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10668
10669 /* Commit the new RSP value. */
10670 if (rc == VINF_SUCCESS)
10671 *pTmpRsp = NewRsp;
10672 }
10673
10674 return rc;
10675}
10676
10677
10678/**
10679 * Pops a qword from the stack, using a temporary stack pointer.
10680 *
10681 * @returns Strict VBox status code.
10682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10683 * @param pu64Value Where to store the popped value.
10684 * @param pTmpRsp Pointer to the temporary stack pointer.
10685 */
10686IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10687{
10688 /* Increment the stack pointer. */
10689 RTUINT64U NewRsp = *pTmpRsp;
10690 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10691
10692 /* Write the word the lazy way. */
10693 uint64_t const *pu64Src;
10694 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10695 if (rcStrict == VINF_SUCCESS)
10696 {
10697 *pu64Value = *pu64Src;
10698 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10699
10700 /* Commit the new RSP value. */
10701 if (rcStrict == VINF_SUCCESS)
10702 *pTmpRsp = NewRsp;
10703 }
10704
10705 return rcStrict;
10706}
10707
10708
10709/**
10710 * Begin a special stack push (used by interrupt, exceptions and such).
10711 *
10712 * This will raise \#SS or \#PF if appropriate.
10713 *
10714 * @returns Strict VBox status code.
10715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10716 * @param cbMem The number of bytes to push onto the stack.
10717 * @param ppvMem Where to return the pointer to the stack memory.
10718 * As with the other memory functions this could be
10719 * direct access or bounce buffered access, so
10720 * don't commit register until the commit call
10721 * succeeds.
10722 * @param puNewRsp Where to return the new RSP value. This must be
10723 * passed unchanged to
10724 * iemMemStackPushCommitSpecial().
10725 */
10726IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10727{
10728 Assert(cbMem < UINT8_MAX);
10729 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10730 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10731}
10732
10733
10734/**
10735 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10736 *
10737 * This will update the rSP.
10738 *
10739 * @returns Strict VBox status code.
10740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10741 * @param pvMem The pointer returned by
10742 * iemMemStackPushBeginSpecial().
10743 * @param uNewRsp The new RSP value returned by
10744 * iemMemStackPushBeginSpecial().
10745 */
10746IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10747{
10748 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10749 if (rcStrict == VINF_SUCCESS)
10750 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10751 return rcStrict;
10752}
10753
10754
10755/**
10756 * Begin a special stack pop (used by iret, retf and such).
10757 *
10758 * This will raise \#SS or \#PF if appropriate.
10759 *
10760 * @returns Strict VBox status code.
10761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10762 * @param cbMem The number of bytes to pop from the stack.
10763 * @param ppvMem Where to return the pointer to the stack memory.
10764 * @param puNewRsp Where to return the new RSP value. This must be
10765 * assigned to CPUMCTX::rsp manually some time
10766 * after iemMemStackPopDoneSpecial() has been
10767 * called.
10768 */
10769IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10770{
10771 Assert(cbMem < UINT8_MAX);
10772 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10773 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10774}
10775
10776
10777/**
10778 * Continue a special stack pop (used by iret and retf).
10779 *
10780 * This will raise \#SS or \#PF if appropriate.
10781 *
10782 * @returns Strict VBox status code.
10783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10784 * @param cbMem The number of bytes to pop from the stack.
10785 * @param ppvMem Where to return the pointer to the stack memory.
10786 * @param puNewRsp Where to return the new RSP value. This must be
10787 * assigned to CPUMCTX::rsp manually some time
10788 * after iemMemStackPopDoneSpecial() has been
10789 * called.
10790 */
10791IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10792{
10793 Assert(cbMem < UINT8_MAX);
10794 RTUINT64U NewRsp;
10795 NewRsp.u = *puNewRsp;
10796 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10797 *puNewRsp = NewRsp.u;
10798 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10799}
10800
10801
10802/**
10803 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10804 * iemMemStackPopContinueSpecial).
10805 *
10806 * The caller will manually commit the rSP.
10807 *
10808 * @returns Strict VBox status code.
10809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10810 * @param pvMem The pointer returned by
10811 * iemMemStackPopBeginSpecial() or
10812 * iemMemStackPopContinueSpecial().
10813 */
10814IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10815{
10816 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10817}
10818
10819
10820/**
10821 * Fetches a system table byte.
10822 *
10823 * @returns Strict VBox status code.
10824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10825 * @param pbDst Where to return the byte.
10826 * @param iSegReg The index of the segment register to use for
10827 * this access. The base and limits are checked.
10828 * @param GCPtrMem The address of the guest memory.
10829 */
10830IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10831{
10832 /* The lazy approach for now... */
10833 uint8_t const *pbSrc;
10834 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10835 if (rc == VINF_SUCCESS)
10836 {
10837 *pbDst = *pbSrc;
10838 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10839 }
10840 return rc;
10841}
10842
10843
10844/**
10845 * Fetches a system table word.
10846 *
10847 * @returns Strict VBox status code.
10848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10849 * @param pu16Dst Where to return the word.
10850 * @param iSegReg The index of the segment register to use for
10851 * this access. The base and limits are checked.
10852 * @param GCPtrMem The address of the guest memory.
10853 */
10854IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10855{
10856 /* The lazy approach for now... */
10857 uint16_t const *pu16Src;
10858 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10859 if (rc == VINF_SUCCESS)
10860 {
10861 *pu16Dst = *pu16Src;
10862 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10863 }
10864 return rc;
10865}
10866
10867
10868/**
10869 * Fetches a system table dword.
10870 *
10871 * @returns Strict VBox status code.
10872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10873 * @param pu32Dst Where to return the dword.
10874 * @param iSegReg The index of the segment register to use for
10875 * this access. The base and limits are checked.
10876 * @param GCPtrMem The address of the guest memory.
10877 */
10878IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10879{
10880 /* The lazy approach for now... */
10881 uint32_t const *pu32Src;
10882 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10883 if (rc == VINF_SUCCESS)
10884 {
10885 *pu32Dst = *pu32Src;
10886 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10887 }
10888 return rc;
10889}
10890
10891
10892/**
10893 * Fetches a system table qword.
10894 *
10895 * @returns Strict VBox status code.
10896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10897 * @param pu64Dst Where to return the qword.
10898 * @param iSegReg The index of the segment register to use for
10899 * this access. The base and limits are checked.
10900 * @param GCPtrMem The address of the guest memory.
10901 */
10902IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10903{
10904 /* The lazy approach for now... */
10905 uint64_t const *pu64Src;
10906 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10907 if (rc == VINF_SUCCESS)
10908 {
10909 *pu64Dst = *pu64Src;
10910 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10911 }
10912 return rc;
10913}
10914
10915
10916/**
10917 * Fetches a descriptor table entry with caller specified error code.
10918 *
10919 * @returns Strict VBox status code.
10920 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10921 * @param pDesc Where to return the descriptor table entry.
10922 * @param uSel The selector which table entry to fetch.
10923 * @param uXcpt The exception to raise on table lookup error.
10924 * @param uErrorCode The error code associated with the exception.
10925 */
10926IEM_STATIC VBOXSTRICTRC
10927iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10928{
10929 AssertPtr(pDesc);
10930 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10931
10932 /** @todo did the 286 require all 8 bytes to be accessible? */
10933 /*
10934 * Get the selector table base and check bounds.
10935 */
10936 RTGCPTR GCPtrBase;
10937 if (uSel & X86_SEL_LDT)
10938 {
10939 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10940 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10941 {
10942 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10943 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10944 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10945 uErrorCode, 0);
10946 }
10947
10948 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10949 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10950 }
10951 else
10952 {
10953 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10954 {
10955 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10956 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10957 uErrorCode, 0);
10958 }
10959 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10960 }
10961
10962 /*
10963 * Read the legacy descriptor and maybe the long mode extensions if
10964 * required.
10965 */
10966 VBOXSTRICTRC rcStrict;
10967 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10968 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10969 else
10970 {
10971 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10972 if (rcStrict == VINF_SUCCESS)
10973 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10974 if (rcStrict == VINF_SUCCESS)
10975 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10976 if (rcStrict == VINF_SUCCESS)
10977 pDesc->Legacy.au16[3] = 0;
10978 else
10979 return rcStrict;
10980 }
10981
10982 if (rcStrict == VINF_SUCCESS)
10983 {
10984 if ( !IEM_IS_LONG_MODE(pVCpu)
10985 || pDesc->Legacy.Gen.u1DescType)
10986 pDesc->Long.au64[1] = 0;
10987 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10988 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10989 else
10990 {
10991 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10992 /** @todo is this the right exception? */
10993 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10994 }
10995 }
10996 return rcStrict;
10997}
10998
10999
11000/**
11001 * Fetches a descriptor table entry.
11002 *
11003 * @returns Strict VBox status code.
11004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11005 * @param pDesc Where to return the descriptor table entry.
11006 * @param uSel The selector which table entry to fetch.
11007 * @param uXcpt The exception to raise on table lookup error.
11008 */
11009IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11010{
11011 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11012}
11013
11014
11015/**
11016 * Fakes a long mode stack selector for SS = 0.
11017 *
11018 * @param pDescSs Where to return the fake stack descriptor.
11019 * @param uDpl The DPL we want.
11020 */
11021IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11022{
11023 pDescSs->Long.au64[0] = 0;
11024 pDescSs->Long.au64[1] = 0;
11025 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11026 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11027 pDescSs->Long.Gen.u2Dpl = uDpl;
11028 pDescSs->Long.Gen.u1Present = 1;
11029 pDescSs->Long.Gen.u1Long = 1;
11030}
11031
11032
11033/**
11034 * Marks the selector descriptor as accessed (only non-system descriptors).
11035 *
11036 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11037 * will therefore skip the limit checks.
11038 *
11039 * @returns Strict VBox status code.
11040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11041 * @param uSel The selector.
11042 */
11043IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11044{
11045 /*
11046 * Get the selector table base and calculate the entry address.
11047 */
11048 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11049 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11050 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11051 GCPtr += uSel & X86_SEL_MASK;
11052
11053 /*
11054 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11055 * ugly stuff to avoid this. This will make sure it's an atomic access
11056 * as well more or less remove any question about 8-bit or 32-bit accesss.
11057 */
11058 VBOXSTRICTRC rcStrict;
11059 uint32_t volatile *pu32;
11060 if ((GCPtr & 3) == 0)
11061 {
11062 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11063 GCPtr += 2 + 2;
11064 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11065 if (rcStrict != VINF_SUCCESS)
11066 return rcStrict;
11067 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11068 }
11069 else
11070 {
11071 /* The misaligned GDT/LDT case, map the whole thing. */
11072 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11073 if (rcStrict != VINF_SUCCESS)
11074 return rcStrict;
11075 switch ((uintptr_t)pu32 & 3)
11076 {
11077 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11078 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11079 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11080 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11081 }
11082 }
11083
11084 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11085}
11086
11087/** @} */
11088
11089
11090/*
11091 * Include the C/C++ implementation of instruction.
11092 */
11093#include "IEMAllCImpl.cpp.h"
11094
11095
11096
11097/** @name "Microcode" macros.
11098 *
11099 * The idea is that we should be able to use the same code to interpret
11100 * instructions as well as recompiler instructions. Thus this obfuscation.
11101 *
11102 * @{
11103 */
11104#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11105#define IEM_MC_END() }
11106#define IEM_MC_PAUSE() do {} while (0)
11107#define IEM_MC_CONTINUE() do {} while (0)
11108
11109/** Internal macro. */
11110#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11111 do \
11112 { \
11113 VBOXSTRICTRC rcStrict2 = a_Expr; \
11114 if (rcStrict2 != VINF_SUCCESS) \
11115 return rcStrict2; \
11116 } while (0)
11117
11118
11119#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11120#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11121#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11122#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11123#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11124#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11125#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11126#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11127#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11128 do { \
11129 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11130 return iemRaiseDeviceNotAvailable(pVCpu); \
11131 } while (0)
11132#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11133 do { \
11134 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11135 return iemRaiseDeviceNotAvailable(pVCpu); \
11136 } while (0)
11137#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11138 do { \
11139 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11140 return iemRaiseMathFault(pVCpu); \
11141 } while (0)
11142#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11143 do { \
11144 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11145 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11146 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11147 return iemRaiseUndefinedOpcode(pVCpu); \
11148 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11149 return iemRaiseDeviceNotAvailable(pVCpu); \
11150 } while (0)
11151#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11152 do { \
11153 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11154 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11155 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11156 return iemRaiseUndefinedOpcode(pVCpu); \
11157 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11158 return iemRaiseDeviceNotAvailable(pVCpu); \
11159 } while (0)
11160#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11161 do { \
11162 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11163 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11164 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11165 return iemRaiseUndefinedOpcode(pVCpu); \
11166 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11167 return iemRaiseDeviceNotAvailable(pVCpu); \
11168 } while (0)
11169#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11170 do { \
11171 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11172 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11173 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11174 return iemRaiseUndefinedOpcode(pVCpu); \
11175 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11176 return iemRaiseDeviceNotAvailable(pVCpu); \
11177 } while (0)
11178#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11179 do { \
11180 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11181 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11182 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11183 return iemRaiseUndefinedOpcode(pVCpu); \
11184 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11185 return iemRaiseDeviceNotAvailable(pVCpu); \
11186 } while (0)
11187#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11188 do { \
11189 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11190 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11191 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11192 return iemRaiseUndefinedOpcode(pVCpu); \
11193 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11194 return iemRaiseDeviceNotAvailable(pVCpu); \
11195 } while (0)
11196#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11197 do { \
11198 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11199 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11200 return iemRaiseUndefinedOpcode(pVCpu); \
11201 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11202 return iemRaiseDeviceNotAvailable(pVCpu); \
11203 } while (0)
11204#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11205 do { \
11206 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11207 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11208 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11209 return iemRaiseUndefinedOpcode(pVCpu); \
11210 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11211 return iemRaiseDeviceNotAvailable(pVCpu); \
11212 } while (0)
11213#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11214 do { \
11215 if (pVCpu->iem.s.uCpl != 0) \
11216 return iemRaiseGeneralProtectionFault0(pVCpu); \
11217 } while (0)
11218#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11219 do { \
11220 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11221 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11222 } while (0)
11223#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11224 do { \
11225 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11226 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11227 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11228 return iemRaiseUndefinedOpcode(pVCpu); \
11229 } while (0)
11230#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11231 do { \
11232 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11233 return iemRaiseGeneralProtectionFault0(pVCpu); \
11234 } while (0)
11235
11236
11237#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11238#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11239#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11240#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11241#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11242#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11243#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11244 uint32_t a_Name; \
11245 uint32_t *a_pName = &a_Name
11246#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11247 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11248
11249#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11250#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11251
11252#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11253#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11254#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11255#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11256#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11257#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11258#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11259#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11260#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11261#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11262#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11263#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11264#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11265#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11266#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11267#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11268#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11269#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11270 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11271 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11272 } while (0)
11273#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11274 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11275 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11276 } while (0)
11277#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11278 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11279 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11280 } while (0)
11281/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11282#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11283 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11284 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11285 } while (0)
11286#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11287 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11288 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11289 } while (0)
11290/** @note Not for IOPL or IF testing or modification. */
11291#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11292#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11293#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11294#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11295
11296#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11297#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11298#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11299#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11300#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11301#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11302#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11303#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11304#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11305#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11306/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11307#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11308 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11309 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11310 } while (0)
11311#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11312 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11313 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11314 } while (0)
11315#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11316 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11317
11318
11319#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11320#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11321/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11322 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11323#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11324#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11325/** @note Not for IOPL or IF testing or modification. */
11326#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11327
11328#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11329#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11330#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11331 do { \
11332 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11333 *pu32Reg += (a_u32Value); \
11334 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11335 } while (0)
11336#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11337
11338#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11339#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11340#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11341 do { \
11342 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11343 *pu32Reg -= (a_u32Value); \
11344 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11345 } while (0)
11346#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11347#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11348
11349#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11350#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11351#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11352#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11353#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11354#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11355#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11356
11357#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11358#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11359#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11360#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11361
11362#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11363#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11364#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11365
11366#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11367#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11368#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11369
11370#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11371#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11372#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11373
11374#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11375#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11376#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11377
11378#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11379
11380#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11381
11382#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11383#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11384#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11385 do { \
11386 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11387 *pu32Reg &= (a_u32Value); \
11388 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11389 } while (0)
11390#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11391
11392#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11393#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11394#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11395 do { \
11396 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11397 *pu32Reg |= (a_u32Value); \
11398 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11399 } while (0)
11400#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11401
11402
11403/** @note Not for IOPL or IF modification. */
11404#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11405/** @note Not for IOPL or IF modification. */
11406#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11407/** @note Not for IOPL or IF modification. */
11408#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11409
11410#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11411
11412/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11413#define IEM_MC_FPU_TO_MMX_MODE() do { \
11414 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11415 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11416 } while (0)
11417
11418/** Switches the FPU state from MMX mode (FTW=0xffff). */
11419#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11420 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11421 } while (0)
11422
11423#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11424 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11425#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11426 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11427#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11428 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11429 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11430 } while (0)
11431#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11432 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11433 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11434 } while (0)
11435#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11436 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11437#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11438 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11439#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11440 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11441
11442#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11443 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11444 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11445 } while (0)
11446#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11447 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11448#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11449 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11450#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11451 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11452#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11453 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11454 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11455 } while (0)
11456#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11457 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11458#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11459 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11460 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11461 } while (0)
11462#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11463 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11464#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11465 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11466 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11467 } while (0)
11468#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11469 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11470#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11471 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11472#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11473 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11474#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11475 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11476#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11477 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11478 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11479 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11480 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11481 } while (0)
11482
11483#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11484 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11485 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11486 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11487 } while (0)
11488#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11489 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11490 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11491 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11492 } while (0)
11493#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11494 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11495 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11496 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11497 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11498 } while (0)
11499#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11500 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11501 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11502 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11503 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11504 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11505 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11506 } while (0)
11507
11508#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11509#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11510 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11511 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11512 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11513 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11514 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11515 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11516 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11517 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11518 } while (0)
11519#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11520 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11521 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11522 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11523 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11524 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11525 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11526 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11527 } while (0)
11528#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11529 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11530 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11531 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11532 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11533 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11534 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11535 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11536 } while (0)
11537#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11538 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11539 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11540 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11541 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11542 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11543 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11544 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11545 } while (0)
11546
11547#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11548 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11549#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11550 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11551#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11552 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11553#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11554 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11555 uintptr_t const iYRegTmp = (a_iYReg); \
11556 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11557 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11558 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11559 } while (0)
11560
11561#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11562 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11563 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11564 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11565 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11566 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11567 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11568 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11569 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11570 } while (0)
11571#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11572 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11573 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11574 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11575 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11576 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11577 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11578 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11579 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11580 } while (0)
11581#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11582 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11583 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11584 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11585 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11586 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11587 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11588 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11589 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11590 } while (0)
11591
11592#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11593 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11594 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11595 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11596 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11597 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11598 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11599 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11600 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11601 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11602 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11603 } while (0)
11604#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11605 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11606 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11607 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11608 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11609 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11610 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11611 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11612 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11613 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11614 } while (0)
11615#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11616 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11617 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11618 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11619 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11620 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11621 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11622 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11623 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11624 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11625 } while (0)
11626#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11627 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11628 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11629 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11630 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11631 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11632 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11633 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11634 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11635 } while (0)
11636
11637#ifndef IEM_WITH_SETJMP
11638# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11639 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11640# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11641 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11642# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11643 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11644#else
11645# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11646 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11647# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11648 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11649# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11650 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11651#endif
11652
11653#ifndef IEM_WITH_SETJMP
11654# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11655 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11656# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11657 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11658# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11659 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11660#else
11661# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11662 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11663# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11664 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11665# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11666 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11667#endif
11668
11669#ifndef IEM_WITH_SETJMP
11670# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11671 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11672# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11673 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11674# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11675 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11676#else
11677# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11678 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11679# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11680 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11681# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11682 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11683#endif
11684
11685#ifdef SOME_UNUSED_FUNCTION
11686# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11687 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11688#endif
11689
11690#ifndef IEM_WITH_SETJMP
11691# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11693# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11695# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11696 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11697# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11699#else
11700# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11701 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11702# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11703 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11704# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11705 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11706# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11707 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11708#endif
11709
11710#ifndef IEM_WITH_SETJMP
11711# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11712 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11713# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11715# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11716 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11717#else
11718# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11719 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11720# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11721 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11722# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11723 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11724#endif
11725
11726#ifndef IEM_WITH_SETJMP
11727# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11728 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11729# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11730 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11731#else
11732# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11733 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11734# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11735 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11736#endif
11737
11738#ifndef IEM_WITH_SETJMP
11739# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11741# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11742 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11743#else
11744# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11745 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11746# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11747 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11748#endif
11749
11750
11751
11752#ifndef IEM_WITH_SETJMP
11753# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11754 do { \
11755 uint8_t u8Tmp; \
11756 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11757 (a_u16Dst) = u8Tmp; \
11758 } while (0)
11759# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11760 do { \
11761 uint8_t u8Tmp; \
11762 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11763 (a_u32Dst) = u8Tmp; \
11764 } while (0)
11765# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11766 do { \
11767 uint8_t u8Tmp; \
11768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11769 (a_u64Dst) = u8Tmp; \
11770 } while (0)
11771# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11772 do { \
11773 uint16_t u16Tmp; \
11774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11775 (a_u32Dst) = u16Tmp; \
11776 } while (0)
11777# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11778 do { \
11779 uint16_t u16Tmp; \
11780 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11781 (a_u64Dst) = u16Tmp; \
11782 } while (0)
11783# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11784 do { \
11785 uint32_t u32Tmp; \
11786 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11787 (a_u64Dst) = u32Tmp; \
11788 } while (0)
11789#else /* IEM_WITH_SETJMP */
11790# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11791 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11792# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11793 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11794# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11795 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11796# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11797 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11798# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11799 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11800# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11801 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11802#endif /* IEM_WITH_SETJMP */
11803
11804#ifndef IEM_WITH_SETJMP
11805# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11806 do { \
11807 uint8_t u8Tmp; \
11808 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11809 (a_u16Dst) = (int8_t)u8Tmp; \
11810 } while (0)
11811# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11812 do { \
11813 uint8_t u8Tmp; \
11814 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11815 (a_u32Dst) = (int8_t)u8Tmp; \
11816 } while (0)
11817# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11818 do { \
11819 uint8_t u8Tmp; \
11820 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11821 (a_u64Dst) = (int8_t)u8Tmp; \
11822 } while (0)
11823# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11824 do { \
11825 uint16_t u16Tmp; \
11826 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11827 (a_u32Dst) = (int16_t)u16Tmp; \
11828 } while (0)
11829# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11830 do { \
11831 uint16_t u16Tmp; \
11832 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11833 (a_u64Dst) = (int16_t)u16Tmp; \
11834 } while (0)
11835# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11836 do { \
11837 uint32_t u32Tmp; \
11838 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11839 (a_u64Dst) = (int32_t)u32Tmp; \
11840 } while (0)
11841#else /* IEM_WITH_SETJMP */
11842# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11843 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11844# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11845 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11846# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11847 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11848# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11849 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11850# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11851 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11852# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11853 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11854#endif /* IEM_WITH_SETJMP */
11855
11856#ifndef IEM_WITH_SETJMP
11857# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11858 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11859# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11860 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11861# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11862 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11863# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11864 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11865#else
11866# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11867 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11868# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11869 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11870# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11871 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11872# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11873 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11874#endif
11875
11876#ifndef IEM_WITH_SETJMP
11877# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11878 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11879# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11880 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11881# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11882 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11883# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11884 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11885#else
11886# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11887 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11888# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11889 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11890# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11891 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11892# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11893 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11894#endif
11895
11896#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11897#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11898#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11899#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11900#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11901#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11902#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11903 do { \
11904 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11905 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11906 } while (0)
11907
11908#ifndef IEM_WITH_SETJMP
11909# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11910 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11911# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11912 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11913#else
11914# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11915 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11916# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11917 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11918#endif
11919
11920#ifndef IEM_WITH_SETJMP
11921# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11922 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11923# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11924 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11925#else
11926# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11927 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11928# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11929 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11930#endif
11931
11932
11933#define IEM_MC_PUSH_U16(a_u16Value) \
11934 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11935#define IEM_MC_PUSH_U32(a_u32Value) \
11936 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11937#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11938 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11939#define IEM_MC_PUSH_U64(a_u64Value) \
11940 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11941
11942#define IEM_MC_POP_U16(a_pu16Value) \
11943 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11944#define IEM_MC_POP_U32(a_pu32Value) \
11945 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11946#define IEM_MC_POP_U64(a_pu64Value) \
11947 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11948
11949/** Maps guest memory for direct or bounce buffered access.
11950 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11951 * @remarks May return.
11952 */
11953#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11954 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11955
11956/** Maps guest memory for direct or bounce buffered access.
11957 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11958 * @remarks May return.
11959 */
11960#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11961 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11962
11963/** Commits the memory and unmaps the guest memory.
11964 * @remarks May return.
11965 */
11966#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11967 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11968
11969/** Commits the memory and unmaps the guest memory unless the FPU status word
11970 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11971 * that would cause FLD not to store.
11972 *
11973 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11974 * store, while \#P will not.
11975 *
11976 * @remarks May in theory return - for now.
11977 */
11978#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11979 do { \
11980 if ( !(a_u16FSW & X86_FSW_ES) \
11981 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11982 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11983 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11984 } while (0)
11985
11986/** Calculate efficient address from R/M. */
11987#ifndef IEM_WITH_SETJMP
11988# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11989 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11990#else
11991# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11992 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11993#endif
11994
11995#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11996#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11997#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11998#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11999#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12000#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12001#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12002
12003/**
12004 * Defers the rest of the instruction emulation to a C implementation routine
12005 * and returns, only taking the standard parameters.
12006 *
12007 * @param a_pfnCImpl The pointer to the C routine.
12008 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12009 */
12010#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12011
12012/**
12013 * Defers the rest of instruction emulation to a C implementation routine and
12014 * returns, taking one argument in addition to the standard ones.
12015 *
12016 * @param a_pfnCImpl The pointer to the C routine.
12017 * @param a0 The argument.
12018 */
12019#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12020
12021/**
12022 * Defers the rest of the instruction emulation to a C implementation routine
12023 * and returns, taking two arguments in addition to the standard ones.
12024 *
12025 * @param a_pfnCImpl The pointer to the C routine.
12026 * @param a0 The first extra argument.
12027 * @param a1 The second extra argument.
12028 */
12029#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12030
12031/**
12032 * Defers the rest of the instruction emulation to a C implementation routine
12033 * and returns, taking three arguments in addition to the standard ones.
12034 *
12035 * @param a_pfnCImpl The pointer to the C routine.
12036 * @param a0 The first extra argument.
12037 * @param a1 The second extra argument.
12038 * @param a2 The third extra argument.
12039 */
12040#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12041
12042/**
12043 * Defers the rest of the instruction emulation to a C implementation routine
12044 * and returns, taking four arguments in addition to the standard ones.
12045 *
12046 * @param a_pfnCImpl The pointer to the C routine.
12047 * @param a0 The first extra argument.
12048 * @param a1 The second extra argument.
12049 * @param a2 The third extra argument.
12050 * @param a3 The fourth extra argument.
12051 */
12052#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12053
12054/**
12055 * Defers the rest of the instruction emulation to a C implementation routine
12056 * and returns, taking two arguments in addition to the standard ones.
12057 *
12058 * @param a_pfnCImpl The pointer to the C routine.
12059 * @param a0 The first extra argument.
12060 * @param a1 The second extra argument.
12061 * @param a2 The third extra argument.
12062 * @param a3 The fourth extra argument.
12063 * @param a4 The fifth extra argument.
12064 */
12065#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12066
12067/**
12068 * Defers the entire instruction emulation to a C implementation routine and
12069 * returns, only taking the standard parameters.
12070 *
12071 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12072 *
12073 * @param a_pfnCImpl The pointer to the C routine.
12074 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12075 */
12076#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12077
12078/**
12079 * Defers the entire instruction emulation to a C implementation routine and
12080 * returns, taking one argument in addition to the standard ones.
12081 *
12082 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12083 *
12084 * @param a_pfnCImpl The pointer to the C routine.
12085 * @param a0 The argument.
12086 */
12087#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12088
12089/**
12090 * Defers the entire instruction emulation to a C implementation routine and
12091 * returns, taking two arguments in addition to the standard ones.
12092 *
12093 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12094 *
12095 * @param a_pfnCImpl The pointer to the C routine.
12096 * @param a0 The first extra argument.
12097 * @param a1 The second extra argument.
12098 */
12099#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12100
12101/**
12102 * Defers the entire instruction emulation to a C implementation routine and
12103 * returns, taking three arguments in addition to the standard ones.
12104 *
12105 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12106 *
12107 * @param a_pfnCImpl The pointer to the C routine.
12108 * @param a0 The first extra argument.
12109 * @param a1 The second extra argument.
12110 * @param a2 The third extra argument.
12111 */
12112#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12113
12114/**
12115 * Calls a FPU assembly implementation taking one visible argument.
12116 *
12117 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12118 * @param a0 The first extra argument.
12119 */
12120#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12121 do { \
12122 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12123 } while (0)
12124
12125/**
12126 * Calls a FPU assembly implementation taking two visible arguments.
12127 *
12128 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12129 * @param a0 The first extra argument.
12130 * @param a1 The second extra argument.
12131 */
12132#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12133 do { \
12134 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12135 } while (0)
12136
12137/**
12138 * Calls a FPU assembly implementation taking three visible arguments.
12139 *
12140 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12141 * @param a0 The first extra argument.
12142 * @param a1 The second extra argument.
12143 * @param a2 The third extra argument.
12144 */
12145#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12146 do { \
12147 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12148 } while (0)
12149
12150#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12151 do { \
12152 (a_FpuData).FSW = (a_FSW); \
12153 (a_FpuData).r80Result = *(a_pr80Value); \
12154 } while (0)
12155
12156/** Pushes FPU result onto the stack. */
12157#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12158 iemFpuPushResult(pVCpu, &a_FpuData)
12159/** Pushes FPU result onto the stack and sets the FPUDP. */
12160#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12161 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12162
12163/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12164#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12165 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12166
12167/** Stores FPU result in a stack register. */
12168#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12169 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12170/** Stores FPU result in a stack register and pops the stack. */
12171#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12172 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12173/** Stores FPU result in a stack register and sets the FPUDP. */
12174#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12175 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12176/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12177 * stack. */
12178#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12179 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12180
12181/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12182#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12183 iemFpuUpdateOpcodeAndIp(pVCpu)
12184/** Free a stack register (for FFREE and FFREEP). */
12185#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12186 iemFpuStackFree(pVCpu, a_iStReg)
12187/** Increment the FPU stack pointer. */
12188#define IEM_MC_FPU_STACK_INC_TOP() \
12189 iemFpuStackIncTop(pVCpu)
12190/** Decrement the FPU stack pointer. */
12191#define IEM_MC_FPU_STACK_DEC_TOP() \
12192 iemFpuStackDecTop(pVCpu)
12193
12194/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12195#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12196 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12197/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12198#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12199 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12200/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12201#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12202 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12203/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12204#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12205 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12206/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12207 * stack. */
12208#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12209 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12210/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12211#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12212 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12213
12214/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12215#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12216 iemFpuStackUnderflow(pVCpu, a_iStDst)
12217/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12218 * stack. */
12219#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12220 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12221/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12222 * FPUDS. */
12223#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12224 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12225/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12226 * FPUDS. Pops stack. */
12227#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12228 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12229/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12230 * stack twice. */
12231#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12232 iemFpuStackUnderflowThenPopPop(pVCpu)
12233/** Raises a FPU stack underflow exception for an instruction pushing a result
12234 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12235#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12236 iemFpuStackPushUnderflow(pVCpu)
12237/** Raises a FPU stack underflow exception for an instruction pushing a result
12238 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12239#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12240 iemFpuStackPushUnderflowTwo(pVCpu)
12241
12242/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12243 * FPUIP, FPUCS and FOP. */
12244#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12245 iemFpuStackPushOverflow(pVCpu)
12246/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12247 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12248#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12249 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12250/** Prepares for using the FPU state.
12251 * Ensures that we can use the host FPU in the current context (RC+R0.
12252 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12253#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12254/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12255#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12256/** Actualizes the guest FPU state so it can be accessed and modified. */
12257#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12258
12259/** Prepares for using the SSE state.
12260 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12261 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12262#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12263/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12264#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12265/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12266#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12267
12268/** Prepares for using the AVX state.
12269 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12270 * Ensures the guest AVX state in the CPUMCTX is up to date.
12271 * @note This will include the AVX512 state too when support for it is added
12272 * due to the zero extending feature of VEX instruction. */
12273#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12274/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12275#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12276/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12277#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12278
12279/**
12280 * Calls a MMX assembly implementation taking two visible arguments.
12281 *
12282 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12283 * @param a0 The first extra argument.
12284 * @param a1 The second extra argument.
12285 */
12286#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12287 do { \
12288 IEM_MC_PREPARE_FPU_USAGE(); \
12289 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12290 } while (0)
12291
12292/**
12293 * Calls a MMX assembly implementation taking three visible arguments.
12294 *
12295 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12296 * @param a0 The first extra argument.
12297 * @param a1 The second extra argument.
12298 * @param a2 The third extra argument.
12299 */
12300#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12301 do { \
12302 IEM_MC_PREPARE_FPU_USAGE(); \
12303 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12304 } while (0)
12305
12306
12307/**
12308 * Calls a SSE assembly implementation taking two visible arguments.
12309 *
12310 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12311 * @param a0 The first extra argument.
12312 * @param a1 The second extra argument.
12313 */
12314#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12315 do { \
12316 IEM_MC_PREPARE_SSE_USAGE(); \
12317 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12318 } while (0)
12319
12320/**
12321 * Calls a SSE assembly implementation taking three visible arguments.
12322 *
12323 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12324 * @param a0 The first extra argument.
12325 * @param a1 The second extra argument.
12326 * @param a2 The third extra argument.
12327 */
12328#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12329 do { \
12330 IEM_MC_PREPARE_SSE_USAGE(); \
12331 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12332 } while (0)
12333
12334
12335/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12336 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12337#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12338 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12339
12340/**
12341 * Calls a AVX assembly implementation taking two visible arguments.
12342 *
12343 * There is one implicit zero'th argument, a pointer to the extended state.
12344 *
12345 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12346 * @param a1 The first extra argument.
12347 * @param a2 The second extra argument.
12348 */
12349#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12350 do { \
12351 IEM_MC_PREPARE_AVX_USAGE(); \
12352 a_pfnAImpl(pXState, (a1), (a2)); \
12353 } while (0)
12354
12355/**
12356 * Calls a AVX assembly implementation taking three visible arguments.
12357 *
12358 * There is one implicit zero'th argument, a pointer to the extended state.
12359 *
12360 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12361 * @param a1 The first extra argument.
12362 * @param a2 The second extra argument.
12363 * @param a3 The third extra argument.
12364 */
12365#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12366 do { \
12367 IEM_MC_PREPARE_AVX_USAGE(); \
12368 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12369 } while (0)
12370
12371/** @note Not for IOPL or IF testing. */
12372#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12373/** @note Not for IOPL or IF testing. */
12374#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12375/** @note Not for IOPL or IF testing. */
12376#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12377/** @note Not for IOPL or IF testing. */
12378#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12379/** @note Not for IOPL or IF testing. */
12380#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12381 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12382 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12383/** @note Not for IOPL or IF testing. */
12384#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12385 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12386 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12387/** @note Not for IOPL or IF testing. */
12388#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12389 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12390 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12391 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12392/** @note Not for IOPL or IF testing. */
12393#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12394 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12395 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12396 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12397#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12398#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12399#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12400/** @note Not for IOPL or IF testing. */
12401#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12402 if ( pVCpu->cpum.GstCtx.cx != 0 \
12403 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12404/** @note Not for IOPL or IF testing. */
12405#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12406 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12407 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12408/** @note Not for IOPL or IF testing. */
12409#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12410 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12411 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12412/** @note Not for IOPL or IF testing. */
12413#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12414 if ( pVCpu->cpum.GstCtx.cx != 0 \
12415 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12416/** @note Not for IOPL or IF testing. */
12417#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12418 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12419 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12420/** @note Not for IOPL or IF testing. */
12421#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12422 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12423 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12424#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12425#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12426
12427#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12428 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12429#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12430 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12431#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12432 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12433#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12434 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12435#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12436 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12437#define IEM_MC_IF_FCW_IM() \
12438 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12439
12440#define IEM_MC_ELSE() } else {
12441#define IEM_MC_ENDIF() } do {} while (0)
12442
12443/** @} */
12444
12445
12446/** @name Opcode Debug Helpers.
12447 * @{
12448 */
12449#ifdef VBOX_WITH_STATISTICS
12450# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12451#else
12452# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12453#endif
12454
12455#ifdef DEBUG
12456# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12457 do { \
12458 IEMOP_INC_STATS(a_Stats); \
12459 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12460 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12461 } while (0)
12462
12463# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12464 do { \
12465 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12466 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12467 (void)RT_CONCAT(OP_,a_Upper); \
12468 (void)(a_fDisHints); \
12469 (void)(a_fIemHints); \
12470 } while (0)
12471
12472# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12473 do { \
12474 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12475 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12476 (void)RT_CONCAT(OP_,a_Upper); \
12477 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12478 (void)(a_fDisHints); \
12479 (void)(a_fIemHints); \
12480 } while (0)
12481
12482# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12483 do { \
12484 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12485 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12486 (void)RT_CONCAT(OP_,a_Upper); \
12487 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12488 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12489 (void)(a_fDisHints); \
12490 (void)(a_fIemHints); \
12491 } while (0)
12492
12493# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12494 do { \
12495 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12496 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12497 (void)RT_CONCAT(OP_,a_Upper); \
12498 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12499 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12500 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12501 (void)(a_fDisHints); \
12502 (void)(a_fIemHints); \
12503 } while (0)
12504
12505# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12506 do { \
12507 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12508 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12509 (void)RT_CONCAT(OP_,a_Upper); \
12510 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12511 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12512 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12513 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12514 (void)(a_fDisHints); \
12515 (void)(a_fIemHints); \
12516 } while (0)
12517
12518#else
12519# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12520
12521# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12522 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12523# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12524 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12525# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12526 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12527# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12528 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12529# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12530 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12531
12532#endif
12533
12534#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12535 IEMOP_MNEMONIC0EX(a_Lower, \
12536 #a_Lower, \
12537 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12538#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12539 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12540 #a_Lower " " #a_Op1, \
12541 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12542#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12543 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12544 #a_Lower " " #a_Op1 "," #a_Op2, \
12545 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12546#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12547 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12548 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12549 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12550#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12551 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12552 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12553 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12554
12555/** @} */
12556
12557
12558/** @name Opcode Helpers.
12559 * @{
12560 */
12561
12562#ifdef IN_RING3
12563# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12564 do { \
12565 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12566 else \
12567 { \
12568 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12569 return IEMOP_RAISE_INVALID_OPCODE(); \
12570 } \
12571 } while (0)
12572#else
12573# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12574 do { \
12575 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12576 else return IEMOP_RAISE_INVALID_OPCODE(); \
12577 } while (0)
12578#endif
12579
12580/** The instruction requires a 186 or later. */
12581#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12582# define IEMOP_HLP_MIN_186() do { } while (0)
12583#else
12584# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12585#endif
12586
12587/** The instruction requires a 286 or later. */
12588#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12589# define IEMOP_HLP_MIN_286() do { } while (0)
12590#else
12591# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12592#endif
12593
12594/** The instruction requires a 386 or later. */
12595#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12596# define IEMOP_HLP_MIN_386() do { } while (0)
12597#else
12598# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12599#endif
12600
12601/** The instruction requires a 386 or later if the given expression is true. */
12602#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12603# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12604#else
12605# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12606#endif
12607
12608/** The instruction requires a 486 or later. */
12609#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12610# define IEMOP_HLP_MIN_486() do { } while (0)
12611#else
12612# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12613#endif
12614
12615/** The instruction requires a Pentium (586) or later. */
12616#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12617# define IEMOP_HLP_MIN_586() do { } while (0)
12618#else
12619# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12620#endif
12621
12622/** The instruction requires a PentiumPro (686) or later. */
12623#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12624# define IEMOP_HLP_MIN_686() do { } while (0)
12625#else
12626# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12627#endif
12628
12629
12630/** The instruction raises an \#UD in real and V8086 mode. */
12631#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12632 do \
12633 { \
12634 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12635 else return IEMOP_RAISE_INVALID_OPCODE(); \
12636 } while (0)
12637
12638#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12639/** This instruction raises an \#UD in real and V8086 mode or when not using a
12640 * 64-bit code segment when in long mode (applicable to all VMX instructions
12641 * except VMCALL).
12642 */
12643#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12644 do \
12645 { \
12646 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12647 && ( !IEM_IS_LONG_MODE(pVCpu) \
12648 || IEM_IS_64BIT_CODE(pVCpu))) \
12649 { /* likely */ } \
12650 else \
12651 { \
12652 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12653 { \
12654 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12655 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12656 return IEMOP_RAISE_INVALID_OPCODE(); \
12657 } \
12658 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12659 { \
12660 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12661 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12662 return IEMOP_RAISE_INVALID_OPCODE(); \
12663 } \
12664 } \
12665 } while (0)
12666
12667/** The instruction can only be executed in VMX operation (VMX root mode and
12668 * non-root mode).
12669 *
12670 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12671 */
12672# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12673 do \
12674 { \
12675 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12676 else \
12677 { \
12678 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12679 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12680 return IEMOP_RAISE_INVALID_OPCODE(); \
12681 } \
12682 } while (0)
12683#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12684
12685/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12686 * 64-bit mode. */
12687#define IEMOP_HLP_NO_64BIT() \
12688 do \
12689 { \
12690 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12691 return IEMOP_RAISE_INVALID_OPCODE(); \
12692 } while (0)
12693
12694/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12695 * 64-bit mode. */
12696#define IEMOP_HLP_ONLY_64BIT() \
12697 do \
12698 { \
12699 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12700 return IEMOP_RAISE_INVALID_OPCODE(); \
12701 } while (0)
12702
12703/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12704#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12705 do \
12706 { \
12707 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12708 iemRecalEffOpSize64Default(pVCpu); \
12709 } while (0)
12710
12711/** The instruction has 64-bit operand size if 64-bit mode. */
12712#define IEMOP_HLP_64BIT_OP_SIZE() \
12713 do \
12714 { \
12715 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12716 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12717 } while (0)
12718
12719/** Only a REX prefix immediately preceeding the first opcode byte takes
12720 * effect. This macro helps ensuring this as well as logging bad guest code. */
12721#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12722 do \
12723 { \
12724 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12725 { \
12726 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12727 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12728 pVCpu->iem.s.uRexB = 0; \
12729 pVCpu->iem.s.uRexIndex = 0; \
12730 pVCpu->iem.s.uRexReg = 0; \
12731 iemRecalEffOpSize(pVCpu); \
12732 } \
12733 } while (0)
12734
12735/**
12736 * Done decoding.
12737 */
12738#define IEMOP_HLP_DONE_DECODING() \
12739 do \
12740 { \
12741 /*nothing for now, maybe later... */ \
12742 } while (0)
12743
12744/**
12745 * Done decoding, raise \#UD exception if lock prefix present.
12746 */
12747#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12748 do \
12749 { \
12750 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12751 { /* likely */ } \
12752 else \
12753 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12754 } while (0)
12755
12756
12757/**
12758 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12759 * repnz or size prefixes are present, or if in real or v8086 mode.
12760 */
12761#define IEMOP_HLP_DONE_VEX_DECODING() \
12762 do \
12763 { \
12764 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12765 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12766 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12767 { /* likely */ } \
12768 else \
12769 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12770 } while (0)
12771
12772/**
12773 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12774 * repnz or size prefixes are present, or if in real or v8086 mode.
12775 */
12776#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12777 do \
12778 { \
12779 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12780 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12781 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12782 && pVCpu->iem.s.uVexLength == 0)) \
12783 { /* likely */ } \
12784 else \
12785 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12786 } while (0)
12787
12788
12789/**
12790 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12791 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12792 * register 0, or if in real or v8086 mode.
12793 */
12794#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12795 do \
12796 { \
12797 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12798 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12799 && !pVCpu->iem.s.uVex3rdReg \
12800 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12801 { /* likely */ } \
12802 else \
12803 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12804 } while (0)
12805
12806/**
12807 * Done decoding VEX, no V, L=0.
12808 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12809 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12810 */
12811#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12812 do \
12813 { \
12814 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12815 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12816 && pVCpu->iem.s.uVexLength == 0 \
12817 && pVCpu->iem.s.uVex3rdReg == 0 \
12818 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12819 { /* likely */ } \
12820 else \
12821 return IEMOP_RAISE_INVALID_OPCODE(); \
12822 } while (0)
12823
12824#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12825 do \
12826 { \
12827 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12828 { /* likely */ } \
12829 else \
12830 { \
12831 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12832 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12833 } \
12834 } while (0)
12835#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12836 do \
12837 { \
12838 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12839 { /* likely */ } \
12840 else \
12841 { \
12842 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12843 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12844 } \
12845 } while (0)
12846
12847/**
12848 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12849 * are present.
12850 */
12851#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12852 do \
12853 { \
12854 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12855 { /* likely */ } \
12856 else \
12857 return IEMOP_RAISE_INVALID_OPCODE(); \
12858 } while (0)
12859
12860/**
12861 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12862 * prefixes are present.
12863 */
12864#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12865 do \
12866 { \
12867 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12868 { /* likely */ } \
12869 else \
12870 return IEMOP_RAISE_INVALID_OPCODE(); \
12871 } while (0)
12872
12873
12874/**
12875 * Calculates the effective address of a ModR/M memory operand.
12876 *
12877 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12878 *
12879 * @return Strict VBox status code.
12880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12881 * @param bRm The ModRM byte.
12882 * @param cbImm The size of any immediate following the
12883 * effective address opcode bytes. Important for
12884 * RIP relative addressing.
12885 * @param pGCPtrEff Where to return the effective address.
12886 */
12887IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12888{
12889 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12890# define SET_SS_DEF() \
12891 do \
12892 { \
12893 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12894 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12895 } while (0)
12896
12897 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12898 {
12899/** @todo Check the effective address size crap! */
12900 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12901 {
12902 uint16_t u16EffAddr;
12903
12904 /* Handle the disp16 form with no registers first. */
12905 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12906 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12907 else
12908 {
12909 /* Get the displacment. */
12910 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12911 {
12912 case 0: u16EffAddr = 0; break;
12913 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12914 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12915 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12916 }
12917
12918 /* Add the base and index registers to the disp. */
12919 switch (bRm & X86_MODRM_RM_MASK)
12920 {
12921 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12922 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12923 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12924 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12925 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12926 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12927 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12928 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12929 }
12930 }
12931
12932 *pGCPtrEff = u16EffAddr;
12933 }
12934 else
12935 {
12936 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12937 uint32_t u32EffAddr;
12938
12939 /* Handle the disp32 form with no registers first. */
12940 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12941 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12942 else
12943 {
12944 /* Get the register (or SIB) value. */
12945 switch ((bRm & X86_MODRM_RM_MASK))
12946 {
12947 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12948 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12949 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12950 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12951 case 4: /* SIB */
12952 {
12953 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12954
12955 /* Get the index and scale it. */
12956 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12957 {
12958 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12959 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12960 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12961 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12962 case 4: u32EffAddr = 0; /*none */ break;
12963 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12964 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12965 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12966 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12967 }
12968 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12969
12970 /* add base */
12971 switch (bSib & X86_SIB_BASE_MASK)
12972 {
12973 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12974 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12975 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12976 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12977 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12978 case 5:
12979 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12980 {
12981 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12982 SET_SS_DEF();
12983 }
12984 else
12985 {
12986 uint32_t u32Disp;
12987 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12988 u32EffAddr += u32Disp;
12989 }
12990 break;
12991 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12992 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12993 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12994 }
12995 break;
12996 }
12997 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12998 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12999 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13000 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13001 }
13002
13003 /* Get and add the displacement. */
13004 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13005 {
13006 case 0:
13007 break;
13008 case 1:
13009 {
13010 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13011 u32EffAddr += i8Disp;
13012 break;
13013 }
13014 case 2:
13015 {
13016 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13017 u32EffAddr += u32Disp;
13018 break;
13019 }
13020 default:
13021 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13022 }
13023
13024 }
13025 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13026 *pGCPtrEff = u32EffAddr;
13027 else
13028 {
13029 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13030 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13031 }
13032 }
13033 }
13034 else
13035 {
13036 uint64_t u64EffAddr;
13037
13038 /* Handle the rip+disp32 form with no registers first. */
13039 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13040 {
13041 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13042 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13043 }
13044 else
13045 {
13046 /* Get the register (or SIB) value. */
13047 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13048 {
13049 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13050 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13051 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13052 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13053 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13054 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13055 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13056 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13057 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13058 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13059 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13060 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13061 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13062 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13063 /* SIB */
13064 case 4:
13065 case 12:
13066 {
13067 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13068
13069 /* Get the index and scale it. */
13070 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13071 {
13072 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13073 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13074 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13075 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13076 case 4: u64EffAddr = 0; /*none */ break;
13077 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13078 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13079 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13080 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13081 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13082 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13083 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13084 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13085 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13086 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13087 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13088 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13089 }
13090 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13091
13092 /* add base */
13093 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13094 {
13095 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13096 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13097 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13098 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13099 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13100 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13101 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13102 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13103 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13104 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13105 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13106 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13107 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13108 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13109 /* complicated encodings */
13110 case 5:
13111 case 13:
13112 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13113 {
13114 if (!pVCpu->iem.s.uRexB)
13115 {
13116 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13117 SET_SS_DEF();
13118 }
13119 else
13120 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13121 }
13122 else
13123 {
13124 uint32_t u32Disp;
13125 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13126 u64EffAddr += (int32_t)u32Disp;
13127 }
13128 break;
13129 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13130 }
13131 break;
13132 }
13133 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13134 }
13135
13136 /* Get and add the displacement. */
13137 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13138 {
13139 case 0:
13140 break;
13141 case 1:
13142 {
13143 int8_t i8Disp;
13144 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13145 u64EffAddr += i8Disp;
13146 break;
13147 }
13148 case 2:
13149 {
13150 uint32_t u32Disp;
13151 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13152 u64EffAddr += (int32_t)u32Disp;
13153 break;
13154 }
13155 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13156 }
13157
13158 }
13159
13160 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13161 *pGCPtrEff = u64EffAddr;
13162 else
13163 {
13164 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13165 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13166 }
13167 }
13168
13169 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13170 return VINF_SUCCESS;
13171}
13172
13173
13174/**
13175 * Calculates the effective address of a ModR/M memory operand.
13176 *
13177 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13178 *
13179 * @return Strict VBox status code.
13180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13181 * @param bRm The ModRM byte.
13182 * @param cbImm The size of any immediate following the
13183 * effective address opcode bytes. Important for
13184 * RIP relative addressing.
13185 * @param pGCPtrEff Where to return the effective address.
13186 * @param offRsp RSP displacement.
13187 */
13188IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13189{
13190 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13191# define SET_SS_DEF() \
13192 do \
13193 { \
13194 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13195 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13196 } while (0)
13197
13198 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13199 {
13200/** @todo Check the effective address size crap! */
13201 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13202 {
13203 uint16_t u16EffAddr;
13204
13205 /* Handle the disp16 form with no registers first. */
13206 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13207 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13208 else
13209 {
13210 /* Get the displacment. */
13211 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13212 {
13213 case 0: u16EffAddr = 0; break;
13214 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13215 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13216 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13217 }
13218
13219 /* Add the base and index registers to the disp. */
13220 switch (bRm & X86_MODRM_RM_MASK)
13221 {
13222 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13223 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13224 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13225 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13226 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13227 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13228 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13229 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13230 }
13231 }
13232
13233 *pGCPtrEff = u16EffAddr;
13234 }
13235 else
13236 {
13237 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13238 uint32_t u32EffAddr;
13239
13240 /* Handle the disp32 form with no registers first. */
13241 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13242 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13243 else
13244 {
13245 /* Get the register (or SIB) value. */
13246 switch ((bRm & X86_MODRM_RM_MASK))
13247 {
13248 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13249 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13250 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13251 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13252 case 4: /* SIB */
13253 {
13254 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13255
13256 /* Get the index and scale it. */
13257 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13258 {
13259 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13260 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13261 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13262 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13263 case 4: u32EffAddr = 0; /*none */ break;
13264 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13265 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13266 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13267 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13268 }
13269 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13270
13271 /* add base */
13272 switch (bSib & X86_SIB_BASE_MASK)
13273 {
13274 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13275 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13276 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13277 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13278 case 4:
13279 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13280 SET_SS_DEF();
13281 break;
13282 case 5:
13283 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13284 {
13285 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13286 SET_SS_DEF();
13287 }
13288 else
13289 {
13290 uint32_t u32Disp;
13291 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13292 u32EffAddr += u32Disp;
13293 }
13294 break;
13295 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13296 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13297 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13298 }
13299 break;
13300 }
13301 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13302 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13303 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13304 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13305 }
13306
13307 /* Get and add the displacement. */
13308 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13309 {
13310 case 0:
13311 break;
13312 case 1:
13313 {
13314 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13315 u32EffAddr += i8Disp;
13316 break;
13317 }
13318 case 2:
13319 {
13320 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13321 u32EffAddr += u32Disp;
13322 break;
13323 }
13324 default:
13325 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13326 }
13327
13328 }
13329 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13330 *pGCPtrEff = u32EffAddr;
13331 else
13332 {
13333 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13334 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13335 }
13336 }
13337 }
13338 else
13339 {
13340 uint64_t u64EffAddr;
13341
13342 /* Handle the rip+disp32 form with no registers first. */
13343 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13344 {
13345 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13346 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13347 }
13348 else
13349 {
13350 /* Get the register (or SIB) value. */
13351 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13352 {
13353 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13354 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13355 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13356 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13357 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13358 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13359 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13360 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13361 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13362 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13363 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13364 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13365 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13366 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13367 /* SIB */
13368 case 4:
13369 case 12:
13370 {
13371 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13372
13373 /* Get the index and scale it. */
13374 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13375 {
13376 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13377 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13378 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13379 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13380 case 4: u64EffAddr = 0; /*none */ break;
13381 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13382 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13383 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13384 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13385 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13386 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13387 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13388 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13389 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13390 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13391 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13392 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13393 }
13394 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13395
13396 /* add base */
13397 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13398 {
13399 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13400 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13401 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13402 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13403 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13404 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13405 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13406 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13407 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13408 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13409 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13410 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13411 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13412 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13413 /* complicated encodings */
13414 case 5:
13415 case 13:
13416 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13417 {
13418 if (!pVCpu->iem.s.uRexB)
13419 {
13420 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13421 SET_SS_DEF();
13422 }
13423 else
13424 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13425 }
13426 else
13427 {
13428 uint32_t u32Disp;
13429 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13430 u64EffAddr += (int32_t)u32Disp;
13431 }
13432 break;
13433 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13434 }
13435 break;
13436 }
13437 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13438 }
13439
13440 /* Get and add the displacement. */
13441 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13442 {
13443 case 0:
13444 break;
13445 case 1:
13446 {
13447 int8_t i8Disp;
13448 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13449 u64EffAddr += i8Disp;
13450 break;
13451 }
13452 case 2:
13453 {
13454 uint32_t u32Disp;
13455 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13456 u64EffAddr += (int32_t)u32Disp;
13457 break;
13458 }
13459 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13460 }
13461
13462 }
13463
13464 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13465 *pGCPtrEff = u64EffAddr;
13466 else
13467 {
13468 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13469 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13470 }
13471 }
13472
13473 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13474 return VINF_SUCCESS;
13475}
13476
13477
13478#ifdef IEM_WITH_SETJMP
13479/**
13480 * Calculates the effective address of a ModR/M memory operand.
13481 *
13482 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13483 *
13484 * May longjmp on internal error.
13485 *
13486 * @return The effective address.
13487 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13488 * @param bRm The ModRM byte.
13489 * @param cbImm The size of any immediate following the
13490 * effective address opcode bytes. Important for
13491 * RIP relative addressing.
13492 */
13493IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13494{
13495 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13496# define SET_SS_DEF() \
13497 do \
13498 { \
13499 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13500 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13501 } while (0)
13502
13503 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13504 {
13505/** @todo Check the effective address size crap! */
13506 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13507 {
13508 uint16_t u16EffAddr;
13509
13510 /* Handle the disp16 form with no registers first. */
13511 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13512 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13513 else
13514 {
13515 /* Get the displacment. */
13516 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13517 {
13518 case 0: u16EffAddr = 0; break;
13519 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13520 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13521 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13522 }
13523
13524 /* Add the base and index registers to the disp. */
13525 switch (bRm & X86_MODRM_RM_MASK)
13526 {
13527 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13528 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13529 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13530 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13531 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13532 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13533 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13534 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13535 }
13536 }
13537
13538 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13539 return u16EffAddr;
13540 }
13541
13542 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13543 uint32_t u32EffAddr;
13544
13545 /* Handle the disp32 form with no registers first. */
13546 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13547 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13548 else
13549 {
13550 /* Get the register (or SIB) value. */
13551 switch ((bRm & X86_MODRM_RM_MASK))
13552 {
13553 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13554 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13555 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13556 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13557 case 4: /* SIB */
13558 {
13559 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13560
13561 /* Get the index and scale it. */
13562 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13563 {
13564 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13565 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13566 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13567 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13568 case 4: u32EffAddr = 0; /*none */ break;
13569 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13570 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13571 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13572 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13573 }
13574 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13575
13576 /* add base */
13577 switch (bSib & X86_SIB_BASE_MASK)
13578 {
13579 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13580 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13581 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13582 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13583 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13584 case 5:
13585 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13586 {
13587 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13588 SET_SS_DEF();
13589 }
13590 else
13591 {
13592 uint32_t u32Disp;
13593 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13594 u32EffAddr += u32Disp;
13595 }
13596 break;
13597 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13598 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13599 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13600 }
13601 break;
13602 }
13603 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13604 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13605 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13606 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13607 }
13608
13609 /* Get and add the displacement. */
13610 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13611 {
13612 case 0:
13613 break;
13614 case 1:
13615 {
13616 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13617 u32EffAddr += i8Disp;
13618 break;
13619 }
13620 case 2:
13621 {
13622 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13623 u32EffAddr += u32Disp;
13624 break;
13625 }
13626 default:
13627 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13628 }
13629 }
13630
13631 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13632 {
13633 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13634 return u32EffAddr;
13635 }
13636 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13637 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13638 return u32EffAddr & UINT16_MAX;
13639 }
13640
13641 uint64_t u64EffAddr;
13642
13643 /* Handle the rip+disp32 form with no registers first. */
13644 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13645 {
13646 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13647 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13648 }
13649 else
13650 {
13651 /* Get the register (or SIB) value. */
13652 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13653 {
13654 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13655 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13656 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13657 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13658 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13659 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13660 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13661 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13662 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13663 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13664 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13665 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13666 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13667 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13668 /* SIB */
13669 case 4:
13670 case 12:
13671 {
13672 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13673
13674 /* Get the index and scale it. */
13675 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13676 {
13677 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13678 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13679 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13680 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13681 case 4: u64EffAddr = 0; /*none */ break;
13682 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13683 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13684 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13685 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13686 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13687 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13688 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13689 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13690 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13691 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13692 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13693 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13694 }
13695 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13696
13697 /* add base */
13698 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13699 {
13700 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13701 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13702 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13703 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13704 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13705 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13706 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13707 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13708 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13709 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13710 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13711 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13712 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13713 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13714 /* complicated encodings */
13715 case 5:
13716 case 13:
13717 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13718 {
13719 if (!pVCpu->iem.s.uRexB)
13720 {
13721 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13722 SET_SS_DEF();
13723 }
13724 else
13725 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13726 }
13727 else
13728 {
13729 uint32_t u32Disp;
13730 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13731 u64EffAddr += (int32_t)u32Disp;
13732 }
13733 break;
13734 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13735 }
13736 break;
13737 }
13738 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13739 }
13740
13741 /* Get and add the displacement. */
13742 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13743 {
13744 case 0:
13745 break;
13746 case 1:
13747 {
13748 int8_t i8Disp;
13749 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13750 u64EffAddr += i8Disp;
13751 break;
13752 }
13753 case 2:
13754 {
13755 uint32_t u32Disp;
13756 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13757 u64EffAddr += (int32_t)u32Disp;
13758 break;
13759 }
13760 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13761 }
13762
13763 }
13764
13765 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13766 {
13767 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13768 return u64EffAddr;
13769 }
13770 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13771 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13772 return u64EffAddr & UINT32_MAX;
13773}
13774#endif /* IEM_WITH_SETJMP */
13775
13776/** @} */
13777
13778
13779
13780/*
13781 * Include the instructions
13782 */
13783#include "IEMAllInstructions.cpp.h"
13784
13785
13786
13787#ifdef LOG_ENABLED
13788/**
13789 * Logs the current instruction.
13790 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13791 * @param fSameCtx Set if we have the same context information as the VMM,
13792 * clear if we may have already executed an instruction in
13793 * our debug context. When clear, we assume IEMCPU holds
13794 * valid CPU mode info.
13795 *
13796 * The @a fSameCtx parameter is now misleading and obsolete.
13797 * @param pszFunction The IEM function doing the execution.
13798 */
13799IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13800{
13801# ifdef IN_RING3
13802 if (LogIs2Enabled())
13803 {
13804 char szInstr[256];
13805 uint32_t cbInstr = 0;
13806 if (fSameCtx)
13807 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13808 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13809 szInstr, sizeof(szInstr), &cbInstr);
13810 else
13811 {
13812 uint32_t fFlags = 0;
13813 switch (pVCpu->iem.s.enmCpuMode)
13814 {
13815 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13816 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13817 case IEMMODE_16BIT:
13818 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13819 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13820 else
13821 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13822 break;
13823 }
13824 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13825 szInstr, sizeof(szInstr), &cbInstr);
13826 }
13827
13828 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13829 Log2(("**** %s\n"
13830 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13831 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13832 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13833 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13834 " %s\n"
13835 , pszFunction,
13836 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13837 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13838 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13839 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13840 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13841 szInstr));
13842
13843 if (LogIs3Enabled())
13844 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13845 }
13846 else
13847# endif
13848 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13849 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13850 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13851}
13852#endif /* LOG_ENABLED */
13853
13854
13855/**
13856 * Makes status code addjustments (pass up from I/O and access handler)
13857 * as well as maintaining statistics.
13858 *
13859 * @returns Strict VBox status code to pass up.
13860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13861 * @param rcStrict The status from executing an instruction.
13862 */
13863DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13864{
13865 if (rcStrict != VINF_SUCCESS)
13866 {
13867 if (RT_SUCCESS(rcStrict))
13868 {
13869 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13870 || rcStrict == VINF_IOM_R3_IOPORT_READ
13871 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13872 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13873 || rcStrict == VINF_IOM_R3_MMIO_READ
13874 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13875 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13876 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13877 || rcStrict == VINF_CPUM_R3_MSR_READ
13878 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13879 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13880 || rcStrict == VINF_EM_RAW_TO_R3
13881 || rcStrict == VINF_EM_TRIPLE_FAULT
13882 || rcStrict == VINF_GIM_R3_HYPERCALL
13883 /* raw-mode / virt handlers only: */
13884 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13885 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13886 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13887 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13888 || rcStrict == VINF_SELM_SYNC_GDT
13889 || rcStrict == VINF_CSAM_PENDING_ACTION
13890 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13891 /* nested hw.virt codes: */
13892 || rcStrict == VINF_VMX_VMEXIT
13893 || rcStrict == VINF_SVM_VMEXIT
13894 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13895/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13896 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13897#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13898 if ( rcStrict == VINF_VMX_VMEXIT
13899 && rcPassUp == VINF_SUCCESS)
13900 rcStrict = VINF_SUCCESS;
13901 else
13902#endif
13903#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13904 if ( rcStrict == VINF_SVM_VMEXIT
13905 && rcPassUp == VINF_SUCCESS)
13906 rcStrict = VINF_SUCCESS;
13907 else
13908#endif
13909 if (rcPassUp == VINF_SUCCESS)
13910 pVCpu->iem.s.cRetInfStatuses++;
13911 else if ( rcPassUp < VINF_EM_FIRST
13912 || rcPassUp > VINF_EM_LAST
13913 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13914 {
13915 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13916 pVCpu->iem.s.cRetPassUpStatus++;
13917 rcStrict = rcPassUp;
13918 }
13919 else
13920 {
13921 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13922 pVCpu->iem.s.cRetInfStatuses++;
13923 }
13924 }
13925 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13926 pVCpu->iem.s.cRetAspectNotImplemented++;
13927 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13928 pVCpu->iem.s.cRetInstrNotImplemented++;
13929 else
13930 pVCpu->iem.s.cRetErrStatuses++;
13931 }
13932 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13933 {
13934 pVCpu->iem.s.cRetPassUpStatus++;
13935 rcStrict = pVCpu->iem.s.rcPassUp;
13936 }
13937
13938 return rcStrict;
13939}
13940
13941
13942/**
13943 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13944 * IEMExecOneWithPrefetchedByPC.
13945 *
13946 * Similar code is found in IEMExecLots.
13947 *
13948 * @return Strict VBox status code.
13949 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13950 * @param fExecuteInhibit If set, execute the instruction following CLI,
13951 * POP SS and MOV SS,GR.
13952 * @param pszFunction The calling function name.
13953 */
13954DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13955{
13956 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13957 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13958 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13959 RT_NOREF_PV(pszFunction);
13960
13961#ifdef IEM_WITH_SETJMP
13962 VBOXSTRICTRC rcStrict;
13963 jmp_buf JmpBuf;
13964 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13965 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13966 if ((rcStrict = setjmp(JmpBuf)) == 0)
13967 {
13968 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13969 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13970 }
13971 else
13972 pVCpu->iem.s.cLongJumps++;
13973 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13974#else
13975 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13976 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13977#endif
13978 if (rcStrict == VINF_SUCCESS)
13979 pVCpu->iem.s.cInstructions++;
13980 if (pVCpu->iem.s.cActiveMappings > 0)
13981 {
13982 Assert(rcStrict != VINF_SUCCESS);
13983 iemMemRollback(pVCpu);
13984 }
13985 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13986 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13987 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13988
13989//#ifdef DEBUG
13990// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13991//#endif
13992
13993 /* Execute the next instruction as well if a cli, pop ss or
13994 mov ss, Gr has just completed successfully. */
13995 if ( fExecuteInhibit
13996 && rcStrict == VINF_SUCCESS
13997 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13998 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
13999 {
14000 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14001 if (rcStrict == VINF_SUCCESS)
14002 {
14003#ifdef LOG_ENABLED
14004 iemLogCurInstr(pVCpu, false, pszFunction);
14005#endif
14006#ifdef IEM_WITH_SETJMP
14007 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14008 if ((rcStrict = setjmp(JmpBuf)) == 0)
14009 {
14010 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14011 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14012 }
14013 else
14014 pVCpu->iem.s.cLongJumps++;
14015 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14016#else
14017 IEM_OPCODE_GET_NEXT_U8(&b);
14018 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14019#endif
14020 if (rcStrict == VINF_SUCCESS)
14021 pVCpu->iem.s.cInstructions++;
14022 if (pVCpu->iem.s.cActiveMappings > 0)
14023 {
14024 Assert(rcStrict != VINF_SUCCESS);
14025 iemMemRollback(pVCpu);
14026 }
14027 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14028 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14029 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14030 }
14031 else if (pVCpu->iem.s.cActiveMappings > 0)
14032 iemMemRollback(pVCpu);
14033 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14034 }
14035
14036 /*
14037 * Return value fiddling, statistics and sanity assertions.
14038 */
14039 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14040
14041 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14042 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14043 return rcStrict;
14044}
14045
14046
14047#ifdef IN_RC
14048/**
14049 * Re-enters raw-mode or ensure we return to ring-3.
14050 *
14051 * @returns rcStrict, maybe modified.
14052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14053 * @param rcStrict The status code returne by the interpreter.
14054 */
14055DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14056{
14057 if ( !pVCpu->iem.s.fInPatchCode
14058 && ( rcStrict == VINF_SUCCESS
14059 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14060 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14061 {
14062 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14063 CPUMRawEnter(pVCpu);
14064 else
14065 {
14066 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14067 rcStrict = VINF_EM_RESCHEDULE;
14068 }
14069 }
14070 return rcStrict;
14071}
14072#endif
14073
14074
14075/**
14076 * Execute one instruction.
14077 *
14078 * @return Strict VBox status code.
14079 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14080 */
14081VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14082{
14083#ifdef LOG_ENABLED
14084 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14085#endif
14086
14087 /*
14088 * Do the decoding and emulation.
14089 */
14090 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14091 if (rcStrict == VINF_SUCCESS)
14092 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14093 else if (pVCpu->iem.s.cActiveMappings > 0)
14094 iemMemRollback(pVCpu);
14095
14096#ifdef IN_RC
14097 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14098#endif
14099 if (rcStrict != VINF_SUCCESS)
14100 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14101 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14102 return rcStrict;
14103}
14104
14105
14106VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14107{
14108 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14109
14110 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14111 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14112 if (rcStrict == VINF_SUCCESS)
14113 {
14114 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14115 if (pcbWritten)
14116 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14117 }
14118 else if (pVCpu->iem.s.cActiveMappings > 0)
14119 iemMemRollback(pVCpu);
14120
14121#ifdef IN_RC
14122 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14123#endif
14124 return rcStrict;
14125}
14126
14127
14128VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14129 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14130{
14131 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14132
14133 VBOXSTRICTRC rcStrict;
14134 if ( cbOpcodeBytes
14135 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14136 {
14137 iemInitDecoder(pVCpu, false);
14138#ifdef IEM_WITH_CODE_TLB
14139 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14140 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14141 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14142 pVCpu->iem.s.offCurInstrStart = 0;
14143 pVCpu->iem.s.offInstrNextByte = 0;
14144#else
14145 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14146 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14147#endif
14148 rcStrict = VINF_SUCCESS;
14149 }
14150 else
14151 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14152 if (rcStrict == VINF_SUCCESS)
14153 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14154 else if (pVCpu->iem.s.cActiveMappings > 0)
14155 iemMemRollback(pVCpu);
14156
14157#ifdef IN_RC
14158 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14159#endif
14160 return rcStrict;
14161}
14162
14163
14164VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14165{
14166 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14167
14168 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14169 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14170 if (rcStrict == VINF_SUCCESS)
14171 {
14172 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14173 if (pcbWritten)
14174 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14175 }
14176 else if (pVCpu->iem.s.cActiveMappings > 0)
14177 iemMemRollback(pVCpu);
14178
14179#ifdef IN_RC
14180 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14181#endif
14182 return rcStrict;
14183}
14184
14185
14186VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14187 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14188{
14189 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14190
14191 VBOXSTRICTRC rcStrict;
14192 if ( cbOpcodeBytes
14193 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14194 {
14195 iemInitDecoder(pVCpu, true);
14196#ifdef IEM_WITH_CODE_TLB
14197 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14198 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14199 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14200 pVCpu->iem.s.offCurInstrStart = 0;
14201 pVCpu->iem.s.offInstrNextByte = 0;
14202#else
14203 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14204 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14205#endif
14206 rcStrict = VINF_SUCCESS;
14207 }
14208 else
14209 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14210 if (rcStrict == VINF_SUCCESS)
14211 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14212 else if (pVCpu->iem.s.cActiveMappings > 0)
14213 iemMemRollback(pVCpu);
14214
14215#ifdef IN_RC
14216 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14217#endif
14218 return rcStrict;
14219}
14220
14221
14222/**
14223 * For debugging DISGetParamSize, may come in handy.
14224 *
14225 * @returns Strict VBox status code.
14226 * @param pVCpu The cross context virtual CPU structure of the
14227 * calling EMT.
14228 * @param pCtxCore The context core structure.
14229 * @param OpcodeBytesPC The PC of the opcode bytes.
14230 * @param pvOpcodeBytes Prefeched opcode bytes.
14231 * @param cbOpcodeBytes Number of prefetched bytes.
14232 * @param pcbWritten Where to return the number of bytes written.
14233 * Optional.
14234 */
14235VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14236 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14237 uint32_t *pcbWritten)
14238{
14239 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14240
14241 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14242 VBOXSTRICTRC rcStrict;
14243 if ( cbOpcodeBytes
14244 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14245 {
14246 iemInitDecoder(pVCpu, true);
14247#ifdef IEM_WITH_CODE_TLB
14248 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14249 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14250 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14251 pVCpu->iem.s.offCurInstrStart = 0;
14252 pVCpu->iem.s.offInstrNextByte = 0;
14253#else
14254 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14255 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14256#endif
14257 rcStrict = VINF_SUCCESS;
14258 }
14259 else
14260 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14261 if (rcStrict == VINF_SUCCESS)
14262 {
14263 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14264 if (pcbWritten)
14265 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14266 }
14267 else if (pVCpu->iem.s.cActiveMappings > 0)
14268 iemMemRollback(pVCpu);
14269
14270#ifdef IN_RC
14271 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14272#endif
14273 return rcStrict;
14274}
14275
14276
14277VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14278{
14279 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14280
14281 /*
14282 * See if there is an interrupt pending in TRPM, inject it if we can.
14283 */
14284 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14285#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14286 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14287 if (fIntrEnabled)
14288 {
14289 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14290 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14291 else
14292 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14293 }
14294#else
14295 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14296#endif
14297 if ( fIntrEnabled
14298 && TRPMHasTrap(pVCpu)
14299 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14300 {
14301 uint8_t u8TrapNo;
14302 TRPMEVENT enmType;
14303 RTGCUINT uErrCode;
14304 RTGCPTR uCr2;
14305 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14306 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14307 TRPMResetTrap(pVCpu);
14308 }
14309
14310 /*
14311 * Initial decoder init w/ prefetch, then setup setjmp.
14312 */
14313 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14314 if (rcStrict == VINF_SUCCESS)
14315 {
14316#ifdef IEM_WITH_SETJMP
14317 jmp_buf JmpBuf;
14318 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14319 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14320 pVCpu->iem.s.cActiveMappings = 0;
14321 if ((rcStrict = setjmp(JmpBuf)) == 0)
14322#endif
14323 {
14324 /*
14325 * The run loop. We limit ourselves to 4096 instructions right now.
14326 */
14327 PVM pVM = pVCpu->CTX_SUFF(pVM);
14328 uint32_t cInstr = 4096;
14329 for (;;)
14330 {
14331 /*
14332 * Log the state.
14333 */
14334#ifdef LOG_ENABLED
14335 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14336#endif
14337
14338 /*
14339 * Do the decoding and emulation.
14340 */
14341 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14342 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14343 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14344 {
14345 Assert(pVCpu->iem.s.cActiveMappings == 0);
14346 pVCpu->iem.s.cInstructions++;
14347 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14348 {
14349 uint64_t fCpu = pVCpu->fLocalForcedActions
14350 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14351 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14352 | VMCPU_FF_TLB_FLUSH
14353#ifdef VBOX_WITH_RAW_MODE
14354 | VMCPU_FF_TRPM_SYNC_IDT
14355 | VMCPU_FF_SELM_SYNC_TSS
14356 | VMCPU_FF_SELM_SYNC_GDT
14357 | VMCPU_FF_SELM_SYNC_LDT
14358#endif
14359 | VMCPU_FF_INHIBIT_INTERRUPTS
14360 | VMCPU_FF_BLOCK_NMIS
14361 | VMCPU_FF_UNHALT ));
14362
14363 if (RT_LIKELY( ( !fCpu
14364 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14365 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14366 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14367 {
14368 if (cInstr-- > 0)
14369 {
14370 Assert(pVCpu->iem.s.cActiveMappings == 0);
14371 iemReInitDecoder(pVCpu);
14372 continue;
14373 }
14374 }
14375 }
14376 Assert(pVCpu->iem.s.cActiveMappings == 0);
14377 }
14378 else if (pVCpu->iem.s.cActiveMappings > 0)
14379 iemMemRollback(pVCpu);
14380 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14381 break;
14382 }
14383 }
14384#ifdef IEM_WITH_SETJMP
14385 else
14386 {
14387 if (pVCpu->iem.s.cActiveMappings > 0)
14388 iemMemRollback(pVCpu);
14389 pVCpu->iem.s.cLongJumps++;
14390 }
14391 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14392#endif
14393
14394 /*
14395 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14396 */
14397 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14398 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14399 }
14400 else
14401 {
14402 if (pVCpu->iem.s.cActiveMappings > 0)
14403 iemMemRollback(pVCpu);
14404
14405#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14406 /*
14407 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14408 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14409 */
14410 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14411#endif
14412 }
14413
14414 /*
14415 * Maybe re-enter raw-mode and log.
14416 */
14417#ifdef IN_RC
14418 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14419#endif
14420 if (rcStrict != VINF_SUCCESS)
14421 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14422 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14423 if (pcInstructions)
14424 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14425 return rcStrict;
14426}
14427
14428
14429/**
14430 * Interface used by EMExecuteExec, does exit statistics and limits.
14431 *
14432 * @returns Strict VBox status code.
14433 * @param pVCpu The cross context virtual CPU structure.
14434 * @param fWillExit To be defined.
14435 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14436 * @param cMaxInstructions Maximum number of instructions to execute.
14437 * @param cMaxInstructionsWithoutExits
14438 * The max number of instructions without exits.
14439 * @param pStats Where to return statistics.
14440 */
14441VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14442 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14443{
14444 NOREF(fWillExit); /** @todo define flexible exit crits */
14445
14446 /*
14447 * Initialize return stats.
14448 */
14449 pStats->cInstructions = 0;
14450 pStats->cExits = 0;
14451 pStats->cMaxExitDistance = 0;
14452 pStats->cReserved = 0;
14453
14454 /*
14455 * Initial decoder init w/ prefetch, then setup setjmp.
14456 */
14457 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14458 if (rcStrict == VINF_SUCCESS)
14459 {
14460#ifdef IEM_WITH_SETJMP
14461 jmp_buf JmpBuf;
14462 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14463 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14464 pVCpu->iem.s.cActiveMappings = 0;
14465 if ((rcStrict = setjmp(JmpBuf)) == 0)
14466#endif
14467 {
14468#ifdef IN_RING0
14469 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14470#endif
14471 uint32_t cInstructionSinceLastExit = 0;
14472
14473 /*
14474 * The run loop. We limit ourselves to 4096 instructions right now.
14475 */
14476 PVM pVM = pVCpu->CTX_SUFF(pVM);
14477 for (;;)
14478 {
14479 /*
14480 * Log the state.
14481 */
14482#ifdef LOG_ENABLED
14483 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14484#endif
14485
14486 /*
14487 * Do the decoding and emulation.
14488 */
14489 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14490
14491 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14492 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14493
14494 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14495 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14496 {
14497 pStats->cExits += 1;
14498 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14499 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14500 cInstructionSinceLastExit = 0;
14501 }
14502
14503 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14504 {
14505 Assert(pVCpu->iem.s.cActiveMappings == 0);
14506 pVCpu->iem.s.cInstructions++;
14507 pStats->cInstructions++;
14508 cInstructionSinceLastExit++;
14509 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14510 {
14511 uint64_t fCpu = pVCpu->fLocalForcedActions
14512 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14513 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14514 | VMCPU_FF_TLB_FLUSH
14515#ifdef VBOX_WITH_RAW_MODE
14516 | VMCPU_FF_TRPM_SYNC_IDT
14517 | VMCPU_FF_SELM_SYNC_TSS
14518 | VMCPU_FF_SELM_SYNC_GDT
14519 | VMCPU_FF_SELM_SYNC_LDT
14520#endif
14521 | VMCPU_FF_INHIBIT_INTERRUPTS
14522 | VMCPU_FF_BLOCK_NMIS
14523 | VMCPU_FF_UNHALT ));
14524
14525 if (RT_LIKELY( ( ( !fCpu
14526 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14527 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14528 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14529 || pStats->cInstructions < cMinInstructions))
14530 {
14531 if (pStats->cInstructions < cMaxInstructions)
14532 {
14533 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14534 {
14535#ifdef IN_RING0
14536 if ( !fCheckPreemptionPending
14537 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14538#endif
14539 {
14540 Assert(pVCpu->iem.s.cActiveMappings == 0);
14541 iemReInitDecoder(pVCpu);
14542 continue;
14543 }
14544#ifdef IN_RING0
14545 rcStrict = VINF_EM_RAW_INTERRUPT;
14546 break;
14547#endif
14548 }
14549 }
14550 }
14551 Assert(!(fCpu & VMCPU_FF_IEM));
14552 }
14553 Assert(pVCpu->iem.s.cActiveMappings == 0);
14554 }
14555 else if (pVCpu->iem.s.cActiveMappings > 0)
14556 iemMemRollback(pVCpu);
14557 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14558 break;
14559 }
14560 }
14561#ifdef IEM_WITH_SETJMP
14562 else
14563 {
14564 if (pVCpu->iem.s.cActiveMappings > 0)
14565 iemMemRollback(pVCpu);
14566 pVCpu->iem.s.cLongJumps++;
14567 }
14568 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14569#endif
14570
14571 /*
14572 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14573 */
14574 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14575 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14576 }
14577 else
14578 {
14579 if (pVCpu->iem.s.cActiveMappings > 0)
14580 iemMemRollback(pVCpu);
14581
14582#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14583 /*
14584 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14585 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14586 */
14587 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14588#endif
14589 }
14590
14591 /*
14592 * Maybe re-enter raw-mode and log.
14593 */
14594#ifdef IN_RC
14595 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14596#endif
14597 if (rcStrict != VINF_SUCCESS)
14598 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14599 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14600 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14601 return rcStrict;
14602}
14603
14604
14605/**
14606 * Injects a trap, fault, abort, software interrupt or external interrupt.
14607 *
14608 * The parameter list matches TRPMQueryTrapAll pretty closely.
14609 *
14610 * @returns Strict VBox status code.
14611 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14612 * @param u8TrapNo The trap number.
14613 * @param enmType What type is it (trap/fault/abort), software
14614 * interrupt or hardware interrupt.
14615 * @param uErrCode The error code if applicable.
14616 * @param uCr2 The CR2 value if applicable.
14617 * @param cbInstr The instruction length (only relevant for
14618 * software interrupts).
14619 */
14620VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14621 uint8_t cbInstr)
14622{
14623 iemInitDecoder(pVCpu, false);
14624#ifdef DBGFTRACE_ENABLED
14625 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14626 u8TrapNo, enmType, uErrCode, uCr2);
14627#endif
14628
14629 uint32_t fFlags;
14630 switch (enmType)
14631 {
14632 case TRPM_HARDWARE_INT:
14633 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14634 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14635 uErrCode = uCr2 = 0;
14636 break;
14637
14638 case TRPM_SOFTWARE_INT:
14639 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14640 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14641 uErrCode = uCr2 = 0;
14642 break;
14643
14644 case TRPM_TRAP:
14645 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14646 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14647 if (u8TrapNo == X86_XCPT_PF)
14648 fFlags |= IEM_XCPT_FLAGS_CR2;
14649 switch (u8TrapNo)
14650 {
14651 case X86_XCPT_DF:
14652 case X86_XCPT_TS:
14653 case X86_XCPT_NP:
14654 case X86_XCPT_SS:
14655 case X86_XCPT_PF:
14656 case X86_XCPT_AC:
14657 fFlags |= IEM_XCPT_FLAGS_ERR;
14658 break;
14659
14660 case X86_XCPT_NMI:
14661 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14662 break;
14663 }
14664 break;
14665
14666 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14667 }
14668
14669 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14670
14671 if (pVCpu->iem.s.cActiveMappings > 0)
14672 iemMemRollback(pVCpu);
14673
14674 return rcStrict;
14675}
14676
14677
14678/**
14679 * Injects the active TRPM event.
14680 *
14681 * @returns Strict VBox status code.
14682 * @param pVCpu The cross context virtual CPU structure.
14683 */
14684VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14685{
14686#ifndef IEM_IMPLEMENTS_TASKSWITCH
14687 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14688#else
14689 uint8_t u8TrapNo;
14690 TRPMEVENT enmType;
14691 RTGCUINT uErrCode;
14692 RTGCUINTPTR uCr2;
14693 uint8_t cbInstr;
14694 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14695 if (RT_FAILURE(rc))
14696 return rc;
14697
14698 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14699# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14700 if (rcStrict == VINF_SVM_VMEXIT)
14701 rcStrict = VINF_SUCCESS;
14702# endif
14703
14704 /** @todo Are there any other codes that imply the event was successfully
14705 * delivered to the guest? See @bugref{6607}. */
14706 if ( rcStrict == VINF_SUCCESS
14707 || rcStrict == VINF_IEM_RAISED_XCPT)
14708 TRPMResetTrap(pVCpu);
14709
14710 return rcStrict;
14711#endif
14712}
14713
14714
14715VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14716{
14717 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14718 return VERR_NOT_IMPLEMENTED;
14719}
14720
14721
14722VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14723{
14724 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14725 return VERR_NOT_IMPLEMENTED;
14726}
14727
14728
14729#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14730/**
14731 * Executes a IRET instruction with default operand size.
14732 *
14733 * This is for PATM.
14734 *
14735 * @returns VBox status code.
14736 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14737 * @param pCtxCore The register frame.
14738 */
14739VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14740{
14741 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14742
14743 iemCtxCoreToCtx(pCtx, pCtxCore);
14744 iemInitDecoder(pVCpu);
14745 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14746 if (rcStrict == VINF_SUCCESS)
14747 iemCtxToCtxCore(pCtxCore, pCtx);
14748 else
14749 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14750 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14751 return rcStrict;
14752}
14753#endif
14754
14755
14756/**
14757 * Macro used by the IEMExec* method to check the given instruction length.
14758 *
14759 * Will return on failure!
14760 *
14761 * @param a_cbInstr The given instruction length.
14762 * @param a_cbMin The minimum length.
14763 */
14764#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14765 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14766 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14767
14768
14769/**
14770 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14771 *
14772 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14773 *
14774 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14776 * @param rcStrict The status code to fiddle.
14777 */
14778DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14779{
14780 iemUninitExec(pVCpu);
14781#ifdef IN_RC
14782 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14783#else
14784 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14785#endif
14786}
14787
14788
14789/**
14790 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14791 *
14792 * This API ASSUMES that the caller has already verified that the guest code is
14793 * allowed to access the I/O port. (The I/O port is in the DX register in the
14794 * guest state.)
14795 *
14796 * @returns Strict VBox status code.
14797 * @param pVCpu The cross context virtual CPU structure.
14798 * @param cbValue The size of the I/O port access (1, 2, or 4).
14799 * @param enmAddrMode The addressing mode.
14800 * @param fRepPrefix Indicates whether a repeat prefix is used
14801 * (doesn't matter which for this instruction).
14802 * @param cbInstr The instruction length in bytes.
14803 * @param iEffSeg The effective segment address.
14804 * @param fIoChecked Whether the access to the I/O port has been
14805 * checked or not. It's typically checked in the
14806 * HM scenario.
14807 */
14808VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14809 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14810{
14811 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14812 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14813
14814 /*
14815 * State init.
14816 */
14817 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14818
14819 /*
14820 * Switch orgy for getting to the right handler.
14821 */
14822 VBOXSTRICTRC rcStrict;
14823 if (fRepPrefix)
14824 {
14825 switch (enmAddrMode)
14826 {
14827 case IEMMODE_16BIT:
14828 switch (cbValue)
14829 {
14830 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14831 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14832 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14833 default:
14834 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14835 }
14836 break;
14837
14838 case IEMMODE_32BIT:
14839 switch (cbValue)
14840 {
14841 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14842 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14843 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14844 default:
14845 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14846 }
14847 break;
14848
14849 case IEMMODE_64BIT:
14850 switch (cbValue)
14851 {
14852 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14853 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14854 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14855 default:
14856 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14857 }
14858 break;
14859
14860 default:
14861 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14862 }
14863 }
14864 else
14865 {
14866 switch (enmAddrMode)
14867 {
14868 case IEMMODE_16BIT:
14869 switch (cbValue)
14870 {
14871 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14872 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14873 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14874 default:
14875 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14876 }
14877 break;
14878
14879 case IEMMODE_32BIT:
14880 switch (cbValue)
14881 {
14882 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14883 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14884 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14885 default:
14886 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14887 }
14888 break;
14889
14890 case IEMMODE_64BIT:
14891 switch (cbValue)
14892 {
14893 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14894 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14895 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14896 default:
14897 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14898 }
14899 break;
14900
14901 default:
14902 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14903 }
14904 }
14905
14906 if (pVCpu->iem.s.cActiveMappings)
14907 iemMemRollback(pVCpu);
14908
14909 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14910}
14911
14912
14913/**
14914 * Interface for HM and EM for executing string I/O IN (read) instructions.
14915 *
14916 * This API ASSUMES that the caller has already verified that the guest code is
14917 * allowed to access the I/O port. (The I/O port is in the DX register in the
14918 * guest state.)
14919 *
14920 * @returns Strict VBox status code.
14921 * @param pVCpu The cross context virtual CPU structure.
14922 * @param cbValue The size of the I/O port access (1, 2, or 4).
14923 * @param enmAddrMode The addressing mode.
14924 * @param fRepPrefix Indicates whether a repeat prefix is used
14925 * (doesn't matter which for this instruction).
14926 * @param cbInstr The instruction length in bytes.
14927 * @param fIoChecked Whether the access to the I/O port has been
14928 * checked or not. It's typically checked in the
14929 * HM scenario.
14930 */
14931VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14932 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14933{
14934 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14935
14936 /*
14937 * State init.
14938 */
14939 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14940
14941 /*
14942 * Switch orgy for getting to the right handler.
14943 */
14944 VBOXSTRICTRC rcStrict;
14945 if (fRepPrefix)
14946 {
14947 switch (enmAddrMode)
14948 {
14949 case IEMMODE_16BIT:
14950 switch (cbValue)
14951 {
14952 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14953 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14954 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14955 default:
14956 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14957 }
14958 break;
14959
14960 case IEMMODE_32BIT:
14961 switch (cbValue)
14962 {
14963 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14964 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14965 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14966 default:
14967 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14968 }
14969 break;
14970
14971 case IEMMODE_64BIT:
14972 switch (cbValue)
14973 {
14974 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14975 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14976 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14977 default:
14978 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14979 }
14980 break;
14981
14982 default:
14983 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14984 }
14985 }
14986 else
14987 {
14988 switch (enmAddrMode)
14989 {
14990 case IEMMODE_16BIT:
14991 switch (cbValue)
14992 {
14993 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14994 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14995 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14996 default:
14997 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14998 }
14999 break;
15000
15001 case IEMMODE_32BIT:
15002 switch (cbValue)
15003 {
15004 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15005 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15006 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15007 default:
15008 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15009 }
15010 break;
15011
15012 case IEMMODE_64BIT:
15013 switch (cbValue)
15014 {
15015 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15016 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15017 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15018 default:
15019 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15020 }
15021 break;
15022
15023 default:
15024 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15025 }
15026 }
15027
15028 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15029 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15030}
15031
15032
15033/**
15034 * Interface for rawmode to write execute an OUT instruction.
15035 *
15036 * @returns Strict VBox status code.
15037 * @param pVCpu The cross context virtual CPU structure.
15038 * @param cbInstr The instruction length in bytes.
15039 * @param u16Port The port to read.
15040 * @param fImm Whether the port is specified using an immediate operand or
15041 * using the implicit DX register.
15042 * @param cbReg The register size.
15043 *
15044 * @remarks In ring-0 not all of the state needs to be synced in.
15045 */
15046VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15047{
15048 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15049 Assert(cbReg <= 4 && cbReg != 3);
15050
15051 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15052 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15053 Assert(!pVCpu->iem.s.cActiveMappings);
15054 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15055}
15056
15057
15058/**
15059 * Interface for rawmode to write execute an IN instruction.
15060 *
15061 * @returns Strict VBox status code.
15062 * @param pVCpu The cross context virtual CPU structure.
15063 * @param cbInstr The instruction length in bytes.
15064 * @param u16Port The port to read.
15065 * @param fImm Whether the port is specified using an immediate operand or
15066 * using the implicit DX.
15067 * @param cbReg The register size.
15068 */
15069VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15070{
15071 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15072 Assert(cbReg <= 4 && cbReg != 3);
15073
15074 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15075 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15076 Assert(!pVCpu->iem.s.cActiveMappings);
15077 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15078}
15079
15080
15081/**
15082 * Interface for HM and EM to write to a CRx register.
15083 *
15084 * @returns Strict VBox status code.
15085 * @param pVCpu The cross context virtual CPU structure.
15086 * @param cbInstr The instruction length in bytes.
15087 * @param iCrReg The control register number (destination).
15088 * @param iGReg The general purpose register number (source).
15089 *
15090 * @remarks In ring-0 not all of the state needs to be synced in.
15091 */
15092VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15093{
15094 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15095 Assert(iCrReg < 16);
15096 Assert(iGReg < 16);
15097
15098 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15099 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15100 Assert(!pVCpu->iem.s.cActiveMappings);
15101 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15102}
15103
15104
15105/**
15106 * Interface for HM and EM to read from a CRx register.
15107 *
15108 * @returns Strict VBox status code.
15109 * @param pVCpu The cross context virtual CPU structure.
15110 * @param cbInstr The instruction length in bytes.
15111 * @param iGReg The general purpose register number (destination).
15112 * @param iCrReg The control register number (source).
15113 *
15114 * @remarks In ring-0 not all of the state needs to be synced in.
15115 */
15116VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15117{
15118 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15119 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15120 | CPUMCTX_EXTRN_APIC_TPR);
15121 Assert(iCrReg < 16);
15122 Assert(iGReg < 16);
15123
15124 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15125 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15126 Assert(!pVCpu->iem.s.cActiveMappings);
15127 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15128}
15129
15130
15131/**
15132 * Interface for HM and EM to clear the CR0[TS] bit.
15133 *
15134 * @returns Strict VBox status code.
15135 * @param pVCpu The cross context virtual CPU structure.
15136 * @param cbInstr The instruction length in bytes.
15137 *
15138 * @remarks In ring-0 not all of the state needs to be synced in.
15139 */
15140VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15141{
15142 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15143
15144 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15145 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15146 Assert(!pVCpu->iem.s.cActiveMappings);
15147 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15148}
15149
15150
15151/**
15152 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15153 *
15154 * @returns Strict VBox status code.
15155 * @param pVCpu The cross context virtual CPU structure.
15156 * @param cbInstr The instruction length in bytes.
15157 * @param uValue The value to load into CR0.
15158 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15159 * memory operand. Otherwise pass NIL_RTGCPTR.
15160 *
15161 * @remarks In ring-0 not all of the state needs to be synced in.
15162 */
15163VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15164{
15165 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15166
15167 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15168 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15169 Assert(!pVCpu->iem.s.cActiveMappings);
15170 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15171}
15172
15173
15174/**
15175 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15176 *
15177 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15178 *
15179 * @returns Strict VBox status code.
15180 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15181 * @param cbInstr The instruction length in bytes.
15182 * @remarks In ring-0 not all of the state needs to be synced in.
15183 * @thread EMT(pVCpu)
15184 */
15185VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15186{
15187 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15188
15189 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15190 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15191 Assert(!pVCpu->iem.s.cActiveMappings);
15192 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15193}
15194
15195
15196/**
15197 * Interface for HM and EM to emulate the WBINVD instruction.
15198 *
15199 * @returns Strict VBox status code.
15200 * @param pVCpu The cross context virtual CPU structure.
15201 * @param cbInstr The instruction length in bytes.
15202 *
15203 * @remarks In ring-0 not all of the state needs to be synced in.
15204 */
15205VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15206{
15207 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15208
15209 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15210 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15211 Assert(!pVCpu->iem.s.cActiveMappings);
15212 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15213}
15214
15215
15216/**
15217 * Interface for HM and EM to emulate the INVD instruction.
15218 *
15219 * @returns Strict VBox status code.
15220 * @param pVCpu The cross context virtual CPU structure.
15221 * @param cbInstr The instruction length in bytes.
15222 *
15223 * @remarks In ring-0 not all of the state needs to be synced in.
15224 */
15225VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15226{
15227 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15228
15229 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15230 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15231 Assert(!pVCpu->iem.s.cActiveMappings);
15232 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15233}
15234
15235
15236/**
15237 * Interface for HM and EM to emulate the INVLPG instruction.
15238 *
15239 * @returns Strict VBox status code.
15240 * @retval VINF_PGM_SYNC_CR3
15241 *
15242 * @param pVCpu The cross context virtual CPU structure.
15243 * @param cbInstr The instruction length in bytes.
15244 * @param GCPtrPage The effective address of the page to invalidate.
15245 *
15246 * @remarks In ring-0 not all of the state needs to be synced in.
15247 */
15248VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15249{
15250 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15251
15252 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15253 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15254 Assert(!pVCpu->iem.s.cActiveMappings);
15255 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15256}
15257
15258
15259/**
15260 * Interface for HM and EM to emulate the CPUID instruction.
15261 *
15262 * @returns Strict VBox status code.
15263 *
15264 * @param pVCpu The cross context virtual CPU structure.
15265 * @param cbInstr The instruction length in bytes.
15266 *
15267 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15268 */
15269VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15270{
15271 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15272 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15273
15274 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15275 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15276 Assert(!pVCpu->iem.s.cActiveMappings);
15277 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15278}
15279
15280
15281/**
15282 * Interface for HM and EM to emulate the RDPMC instruction.
15283 *
15284 * @returns Strict VBox status code.
15285 *
15286 * @param pVCpu The cross context virtual CPU structure.
15287 * @param cbInstr The instruction length in bytes.
15288 *
15289 * @remarks Not all of the state needs to be synced in.
15290 */
15291VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15292{
15293 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15294 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15295
15296 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15297 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15298 Assert(!pVCpu->iem.s.cActiveMappings);
15299 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15300}
15301
15302
15303/**
15304 * Interface for HM and EM to emulate the RDTSC instruction.
15305 *
15306 * @returns Strict VBox status code.
15307 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15308 *
15309 * @param pVCpu The cross context virtual CPU structure.
15310 * @param cbInstr The instruction length in bytes.
15311 *
15312 * @remarks Not all of the state needs to be synced in.
15313 */
15314VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15315{
15316 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15317 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15318
15319 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15320 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15321 Assert(!pVCpu->iem.s.cActiveMappings);
15322 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15323}
15324
15325
15326/**
15327 * Interface for HM and EM to emulate the RDTSCP instruction.
15328 *
15329 * @returns Strict VBox status code.
15330 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15331 *
15332 * @param pVCpu The cross context virtual CPU structure.
15333 * @param cbInstr The instruction length in bytes.
15334 *
15335 * @remarks Not all of the state needs to be synced in. Recommended
15336 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15337 */
15338VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15339{
15340 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15341 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15342
15343 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15344 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15345 Assert(!pVCpu->iem.s.cActiveMappings);
15346 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15347}
15348
15349
15350/**
15351 * Interface for HM and EM to emulate the RDMSR instruction.
15352 *
15353 * @returns Strict VBox status code.
15354 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15355 *
15356 * @param pVCpu The cross context virtual CPU structure.
15357 * @param cbInstr The instruction length in bytes.
15358 *
15359 * @remarks Not all of the state needs to be synced in. Requires RCX and
15360 * (currently) all MSRs.
15361 */
15362VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15363{
15364 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15365 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15366
15367 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15368 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15369 Assert(!pVCpu->iem.s.cActiveMappings);
15370 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15371}
15372
15373
15374/**
15375 * Interface for HM and EM to emulate the WRMSR instruction.
15376 *
15377 * @returns Strict VBox status code.
15378 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15379 *
15380 * @param pVCpu The cross context virtual CPU structure.
15381 * @param cbInstr The instruction length in bytes.
15382 *
15383 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15384 * and (currently) all MSRs.
15385 */
15386VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15387{
15388 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15389 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15390 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15391
15392 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15393 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15394 Assert(!pVCpu->iem.s.cActiveMappings);
15395 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15396}
15397
15398
15399/**
15400 * Interface for HM and EM to emulate the MONITOR instruction.
15401 *
15402 * @returns Strict VBox status code.
15403 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15404 *
15405 * @param pVCpu The cross context virtual CPU structure.
15406 * @param cbInstr The instruction length in bytes.
15407 *
15408 * @remarks Not all of the state needs to be synced in.
15409 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15410 * are used.
15411 */
15412VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15413{
15414 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15415 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15416
15417 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15418 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15419 Assert(!pVCpu->iem.s.cActiveMappings);
15420 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15421}
15422
15423
15424/**
15425 * Interface for HM and EM to emulate the MWAIT instruction.
15426 *
15427 * @returns Strict VBox status code.
15428 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15429 *
15430 * @param pVCpu The cross context virtual CPU structure.
15431 * @param cbInstr The instruction length in bytes.
15432 *
15433 * @remarks Not all of the state needs to be synced in.
15434 */
15435VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15436{
15437 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15438
15439 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15440 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15441 Assert(!pVCpu->iem.s.cActiveMappings);
15442 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15443}
15444
15445
15446/**
15447 * Interface for HM and EM to emulate the HLT instruction.
15448 *
15449 * @returns Strict VBox status code.
15450 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15451 *
15452 * @param pVCpu The cross context virtual CPU structure.
15453 * @param cbInstr The instruction length in bytes.
15454 *
15455 * @remarks Not all of the state needs to be synced in.
15456 */
15457VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15458{
15459 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15460
15461 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15462 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15463 Assert(!pVCpu->iem.s.cActiveMappings);
15464 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15465}
15466
15467
15468/**
15469 * Checks if IEM is in the process of delivering an event (interrupt or
15470 * exception).
15471 *
15472 * @returns true if we're in the process of raising an interrupt or exception,
15473 * false otherwise.
15474 * @param pVCpu The cross context virtual CPU structure.
15475 * @param puVector Where to store the vector associated with the
15476 * currently delivered event, optional.
15477 * @param pfFlags Where to store th event delivery flags (see
15478 * IEM_XCPT_FLAGS_XXX), optional.
15479 * @param puErr Where to store the error code associated with the
15480 * event, optional.
15481 * @param puCr2 Where to store the CR2 associated with the event,
15482 * optional.
15483 * @remarks The caller should check the flags to determine if the error code and
15484 * CR2 are valid for the event.
15485 */
15486VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15487{
15488 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15489 if (fRaisingXcpt)
15490 {
15491 if (puVector)
15492 *puVector = pVCpu->iem.s.uCurXcpt;
15493 if (pfFlags)
15494 *pfFlags = pVCpu->iem.s.fCurXcpt;
15495 if (puErr)
15496 *puErr = pVCpu->iem.s.uCurXcptErr;
15497 if (puCr2)
15498 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15499 }
15500 return fRaisingXcpt;
15501}
15502
15503#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15504
15505/**
15506 * Interface for HM and EM to emulate the CLGI instruction.
15507 *
15508 * @returns Strict VBox status code.
15509 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15510 * @param cbInstr The instruction length in bytes.
15511 * @thread EMT(pVCpu)
15512 */
15513VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15514{
15515 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15516
15517 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15518 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15519 Assert(!pVCpu->iem.s.cActiveMappings);
15520 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15521}
15522
15523
15524/**
15525 * Interface for HM and EM to emulate the STGI instruction.
15526 *
15527 * @returns Strict VBox status code.
15528 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15529 * @param cbInstr The instruction length in bytes.
15530 * @thread EMT(pVCpu)
15531 */
15532VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15533{
15534 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15535
15536 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15537 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15538 Assert(!pVCpu->iem.s.cActiveMappings);
15539 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15540}
15541
15542
15543/**
15544 * Interface for HM and EM to emulate the VMLOAD instruction.
15545 *
15546 * @returns Strict VBox status code.
15547 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15548 * @param cbInstr The instruction length in bytes.
15549 * @thread EMT(pVCpu)
15550 */
15551VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15552{
15553 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15554
15555 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15556 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15557 Assert(!pVCpu->iem.s.cActiveMappings);
15558 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15559}
15560
15561
15562/**
15563 * Interface for HM and EM to emulate the VMSAVE instruction.
15564 *
15565 * @returns Strict VBox status code.
15566 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15567 * @param cbInstr The instruction length in bytes.
15568 * @thread EMT(pVCpu)
15569 */
15570VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15571{
15572 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15573
15574 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15575 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15576 Assert(!pVCpu->iem.s.cActiveMappings);
15577 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15578}
15579
15580
15581/**
15582 * Interface for HM and EM to emulate the INVLPGA instruction.
15583 *
15584 * @returns Strict VBox status code.
15585 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15586 * @param cbInstr The instruction length in bytes.
15587 * @thread EMT(pVCpu)
15588 */
15589VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15590{
15591 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15592
15593 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15594 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15595 Assert(!pVCpu->iem.s.cActiveMappings);
15596 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15597}
15598
15599
15600/**
15601 * Interface for HM and EM to emulate the VMRUN instruction.
15602 *
15603 * @returns Strict VBox status code.
15604 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15605 * @param cbInstr The instruction length in bytes.
15606 * @thread EMT(pVCpu)
15607 */
15608VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15609{
15610 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15611 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15612
15613 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15614 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15615 Assert(!pVCpu->iem.s.cActiveMappings);
15616 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15617}
15618
15619
15620/**
15621 * Interface for HM and EM to emulate \#VMEXIT.
15622 *
15623 * @returns Strict VBox status code.
15624 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15625 * @param uExitCode The exit code.
15626 * @param uExitInfo1 The exit info. 1 field.
15627 * @param uExitInfo2 The exit info. 2 field.
15628 * @thread EMT(pVCpu)
15629 */
15630VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15631{
15632 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15633 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15634 if (pVCpu->iem.s.cActiveMappings)
15635 iemMemRollback(pVCpu);
15636 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15637}
15638
15639#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15640
15641#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15642
15643/**
15644 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15645 *
15646 * @returns Strict VBox status code.
15647 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15648 * @param uVector The external interrupt vector.
15649 * @param fIntPending Whether the external interrupt is pending or
15650 * acknowdledged in the interrupt controller.
15651 * @thread EMT(pVCpu)
15652 */
15653VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15654{
15655 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15656 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15657 if (pVCpu->iem.s.cActiveMappings)
15658 iemMemRollback(pVCpu);
15659 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15660}
15661
15662
15663/**
15664 * Interface for HM and EM to emulate VM-exits for interrupt-windows.
15665 *
15666 * @returns Strict VBox status code.
15667 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15668 * @param uExitReason The VM-exit reason.
15669 * @param uExitQual The VM-exit qualification.
15670 *
15671 * @thread EMT(pVCpu)
15672 */
15673VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitIntWindow(PVMCPU pVCpu)
15674{
15675 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15676 VBOXSTRICTRC rcStrict = iemVmxVmexitIntWindow(pVCpu);
15677 if (pVCpu->iem.s.cActiveMappings)
15678 iemMemRollback(pVCpu);
15679 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15680}
15681
15682
15683/**
15684 * Interface for HM and EM to emulate the VMREAD instruction.
15685 *
15686 * @returns Strict VBox status code.
15687 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15688 * @param pExitInfo Pointer to the VM-exit information struct.
15689 * @thread EMT(pVCpu)
15690 */
15691VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15692{
15693 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15694 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15695 Assert(pExitInfo);
15696
15697 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15698
15699 VBOXSTRICTRC rcStrict;
15700 uint8_t const cbInstr = pExitInfo->cbInstr;
15701 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15702 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15703 {
15704 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15705 {
15706 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15707 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15708 }
15709 else
15710 {
15711 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15712 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15713 }
15714 }
15715 else
15716 {
15717 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15718 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15719 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15720 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15721 }
15722 if (pVCpu->iem.s.cActiveMappings)
15723 iemMemRollback(pVCpu);
15724 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15725}
15726
15727
15728/**
15729 * Interface for HM and EM to emulate the VMWRITE instruction.
15730 *
15731 * @returns Strict VBox status code.
15732 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15733 * @param pExitInfo Pointer to the VM-exit information struct.
15734 * @thread EMT(pVCpu)
15735 */
15736VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15737{
15738 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15739 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15740 Assert(pExitInfo);
15741
15742 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15743
15744 uint64_t u64Val;
15745 uint8_t iEffSeg;
15746 IEMMODE enmEffAddrMode;
15747 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15748 {
15749 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15750 iEffSeg = UINT8_MAX;
15751 enmEffAddrMode = UINT8_MAX;
15752 }
15753 else
15754 {
15755 u64Val = pExitInfo->GCPtrEffAddr;
15756 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15757 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15758 }
15759 uint8_t const cbInstr = pExitInfo->cbInstr;
15760 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15761 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
15762 if (pVCpu->iem.s.cActiveMappings)
15763 iemMemRollback(pVCpu);
15764 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15765}
15766
15767
15768/**
15769 * Interface for HM and EM to emulate the VMPTRLD instruction.
15770 *
15771 * @returns Strict VBox status code.
15772 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15773 * @param pExitInfo Pointer to the VM-exit information struct.
15774 * @thread EMT(pVCpu)
15775 */
15776VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15777{
15778 Assert(pExitInfo);
15779 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15780 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15781
15782 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15783
15784 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15785 uint8_t const cbInstr = pExitInfo->cbInstr;
15786 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15787 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15788 if (pVCpu->iem.s.cActiveMappings)
15789 iemMemRollback(pVCpu);
15790 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15791}
15792
15793
15794/**
15795 * Interface for HM and EM to emulate the VMPTRST instruction.
15796 *
15797 * @returns Strict VBox status code.
15798 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15799 * @param pExitInfo Pointer to the VM-exit information struct.
15800 * @thread EMT(pVCpu)
15801 */
15802VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15803{
15804 Assert(pExitInfo);
15805 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15806 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15807
15808 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15809
15810 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15811 uint8_t const cbInstr = pExitInfo->cbInstr;
15812 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15813 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15814 if (pVCpu->iem.s.cActiveMappings)
15815 iemMemRollback(pVCpu);
15816 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15817}
15818
15819
15820/**
15821 * Interface for HM and EM to emulate the VMCLEAR instruction.
15822 *
15823 * @returns Strict VBox status code.
15824 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15825 * @param pExitInfo Pointer to the VM-exit information struct.
15826 * @thread EMT(pVCpu)
15827 */
15828VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15829{
15830 Assert(pExitInfo);
15831 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15832 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15833
15834 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15835
15836 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15837 uint8_t const cbInstr = pExitInfo->cbInstr;
15838 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15839 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15840 if (pVCpu->iem.s.cActiveMappings)
15841 iemMemRollback(pVCpu);
15842 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15843}
15844
15845
15846/**
15847 * Interface for HM and EM to emulate the VMXON instruction.
15848 *
15849 * @returns Strict VBox status code.
15850 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15851 * @param pExitInfo Pointer to the VM-exit information struct.
15852 * @thread EMT(pVCpu)
15853 */
15854VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15855{
15856 Assert(pExitInfo);
15857 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15858 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15859
15860 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15861
15862 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15863 uint8_t const cbInstr = pExitInfo->cbInstr;
15864 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
15865 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
15866 if (pVCpu->iem.s.cActiveMappings)
15867 iemMemRollback(pVCpu);
15868 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15869}
15870
15871
15872/**
15873 * Interface for HM and EM to emulate the VMXOFF instruction.
15874 *
15875 * @returns Strict VBox status code.
15876 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15877 * @param cbInstr The instruction length in bytes.
15878 * @thread EMT(pVCpu)
15879 */
15880VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
15881{
15882 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15883 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HM_VMX_MASK);
15884
15885 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15886 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
15887 Assert(!pVCpu->iem.s.cActiveMappings);
15888 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15889}
15890
15891#endif
15892
15893#ifdef IN_RING3
15894
15895/**
15896 * Handles the unlikely and probably fatal merge cases.
15897 *
15898 * @returns Merged status code.
15899 * @param rcStrict Current EM status code.
15900 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15901 * with @a rcStrict.
15902 * @param iMemMap The memory mapping index. For error reporting only.
15903 * @param pVCpu The cross context virtual CPU structure of the calling
15904 * thread, for error reporting only.
15905 */
15906DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15907 unsigned iMemMap, PVMCPU pVCpu)
15908{
15909 if (RT_FAILURE_NP(rcStrict))
15910 return rcStrict;
15911
15912 if (RT_FAILURE_NP(rcStrictCommit))
15913 return rcStrictCommit;
15914
15915 if (rcStrict == rcStrictCommit)
15916 return rcStrictCommit;
15917
15918 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15919 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15920 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15921 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15922 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15923 return VERR_IOM_FF_STATUS_IPE;
15924}
15925
15926
15927/**
15928 * Helper for IOMR3ProcessForceFlag.
15929 *
15930 * @returns Merged status code.
15931 * @param rcStrict Current EM status code.
15932 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15933 * with @a rcStrict.
15934 * @param iMemMap The memory mapping index. For error reporting only.
15935 * @param pVCpu The cross context virtual CPU structure of the calling
15936 * thread, for error reporting only.
15937 */
15938DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15939{
15940 /* Simple. */
15941 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15942 return rcStrictCommit;
15943
15944 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15945 return rcStrict;
15946
15947 /* EM scheduling status codes. */
15948 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15949 && rcStrict <= VINF_EM_LAST))
15950 {
15951 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15952 && rcStrictCommit <= VINF_EM_LAST))
15953 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15954 }
15955
15956 /* Unlikely */
15957 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15958}
15959
15960
15961/**
15962 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15963 *
15964 * @returns Merge between @a rcStrict and what the commit operation returned.
15965 * @param pVM The cross context VM structure.
15966 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15967 * @param rcStrict The status code returned by ring-0 or raw-mode.
15968 */
15969VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15970{
15971 /*
15972 * Reset the pending commit.
15973 */
15974 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15975 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15976 ("%#x %#x %#x\n",
15977 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15978 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15979
15980 /*
15981 * Commit the pending bounce buffers (usually just one).
15982 */
15983 unsigned cBufs = 0;
15984 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15985 while (iMemMap-- > 0)
15986 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15987 {
15988 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15989 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15990 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15991
15992 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15993 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15994 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15995
15996 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15997 {
15998 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15999 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16000 pbBuf,
16001 cbFirst,
16002 PGMACCESSORIGIN_IEM);
16003 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16004 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16005 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16006 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16007 }
16008
16009 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16010 {
16011 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16012 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16013 pbBuf + cbFirst,
16014 cbSecond,
16015 PGMACCESSORIGIN_IEM);
16016 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16017 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16018 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16019 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16020 }
16021 cBufs++;
16022 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16023 }
16024
16025 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16026 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16027 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16028 pVCpu->iem.s.cActiveMappings = 0;
16029 return rcStrict;
16030}
16031
16032#endif /* IN_RING3 */
16033
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette